1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * 4 * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved. 5 * 6 * This program is free software and is provided to you under the terms of the 7 * GNU General Public License version 2 as published by the Free Software 8 * Foundation, and any use by you of this program is subject to the terms 9 * of such GNU license. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, you can access it online at 18 * http://www.gnu.org/licenses/gpl-2.0.html. 19 * 20 */ 21 22 /* 23 * THIS FILE IS AUTOGENERATED BY generate_tracepoints.py. 24 * DO NOT EDIT. 25 */ 26 27 #if !defined(_KBASE_TRACEPOINTS_H) 28 #define _KBASE_TRACEPOINTS_H 29 30 /* Tracepoints are abstract callbacks notifying that some important 31 * software or hardware event has happened. 32 * 33 * In this particular implementation, it results into a MIPE 34 * timeline event and, in some cases, it also fires an ftrace event 35 * (a.k.a. Gator events, see details below). 36 */ 37 38 #include "mali_kbase.h" 39 #include "mali_kbase_gator.h" 40 41 #include <linux/types.h> 42 #include <linux/atomic.h> 43 44 /* clang-format off */ 45 46 struct kbase_tlstream; 47 48 extern const size_t __obj_stream_offset; 49 extern const size_t __aux_stream_offset; 50 51 /* This macro dispatches a kbase_tlstream from 52 * a kbase_device instance. Only AUX or OBJ 53 * streams can be dispatched. It is aware of 54 * kbase_timeline binary representation and 55 * relies on offset variables: 56 * __obj_stream_offset and __aux_stream_offset. 57 */ 58 #define __TL_DISPATCH_STREAM(kbdev, stype) \ 59 ((struct kbase_tlstream *) \ 60 ((u8 *)kbdev->timeline + __ ## stype ## _stream_offset)) 61 62 struct tp_desc; 63 64 /* Descriptors of timeline messages transmitted in object events stream. */ 65 extern const char *obj_desc_header; 66 extern const size_t obj_desc_header_size; 67 /* Descriptors of timeline messages transmitted in auxiliary events stream. */ 68 extern const char *aux_desc_header; 69 extern const size_t aux_desc_header_size; 70 71 #define TL_ATOM_STATE_IDLE 0 72 #define TL_ATOM_STATE_READY 1 73 #define TL_ATOM_STATE_DONE 2 74 #define TL_ATOM_STATE_POSTED 3 75 76 #define TL_JS_EVENT_START GATOR_JOB_SLOT_START 77 #define TL_JS_EVENT_STOP GATOR_JOB_SLOT_STOP 78 #define TL_JS_EVENT_SOFT_STOP GATOR_JOB_SLOT_SOFT_STOPPED 79 80 #define TLSTREAM_ENABLED (1u << 31) 81 82 void __kbase_tlstream_tl_new_ctx( 83 struct kbase_tlstream *stream, 84 const void *ctx, 85 u32 ctx_nr, 86 u32 tgid 87 ); 88 89 void __kbase_tlstream_tl_new_gpu( 90 struct kbase_tlstream *stream, 91 const void *gpu, 92 u32 gpu_id, 93 u32 core_count 94 ); 95 96 void __kbase_tlstream_tl_new_lpu( 97 struct kbase_tlstream *stream, 98 const void *lpu, 99 u32 lpu_nr, 100 u32 lpu_fn 101 ); 102 103 void __kbase_tlstream_tl_new_atom( 104 struct kbase_tlstream *stream, 105 const void *atom, 106 u32 atom_nr 107 ); 108 109 void __kbase_tlstream_tl_new_as( 110 struct kbase_tlstream *stream, 111 const void *address_space, 112 u32 as_nr 113 ); 114 115 void __kbase_tlstream_tl_del_ctx( 116 struct kbase_tlstream *stream, 117 const void *ctx 118 ); 119 120 void __kbase_tlstream_tl_del_atom( 121 struct kbase_tlstream *stream, 122 const void *atom 123 ); 124 125 void __kbase_tlstream_tl_lifelink_lpu_gpu( 126 struct kbase_tlstream *stream, 127 const void *lpu, 128 const void *gpu 129 ); 130 131 void __kbase_tlstream_tl_lifelink_as_gpu( 132 struct kbase_tlstream *stream, 133 const void *address_space, 134 const void *gpu 135 ); 136 137 void __kbase_tlstream_tl_ret_ctx_lpu( 138 struct kbase_tlstream *stream, 139 const void *ctx, 140 const void *lpu 141 ); 142 143 void __kbase_tlstream_tl_ret_atom_ctx( 144 struct kbase_tlstream *stream, 145 const void *atom, 146 const void *ctx 147 ); 148 149 void __kbase_tlstream_tl_ret_atom_lpu( 150 struct kbase_tlstream *stream, 151 const void *atom, 152 const void *lpu, 153 const char *attrib_match_list 154 ); 155 156 void __kbase_tlstream_tl_nret_ctx_lpu( 157 struct kbase_tlstream *stream, 158 const void *ctx, 159 const void *lpu 160 ); 161 162 void __kbase_tlstream_tl_nret_atom_ctx( 163 struct kbase_tlstream *stream, 164 const void *atom, 165 const void *ctx 166 ); 167 168 void __kbase_tlstream_tl_nret_atom_lpu( 169 struct kbase_tlstream *stream, 170 const void *atom, 171 const void *lpu 172 ); 173 174 void __kbase_tlstream_tl_ret_as_ctx( 175 struct kbase_tlstream *stream, 176 const void *address_space, 177 const void *ctx 178 ); 179 180 void __kbase_tlstream_tl_nret_as_ctx( 181 struct kbase_tlstream *stream, 182 const void *address_space, 183 const void *ctx 184 ); 185 186 void __kbase_tlstream_tl_ret_atom_as( 187 struct kbase_tlstream *stream, 188 const void *atom, 189 const void *address_space 190 ); 191 192 void __kbase_tlstream_tl_nret_atom_as( 193 struct kbase_tlstream *stream, 194 const void *atom, 195 const void *address_space 196 ); 197 198 void __kbase_tlstream_tl_attrib_atom_config( 199 struct kbase_tlstream *stream, 200 const void *atom, 201 u64 descriptor, 202 u64 affinity, 203 u32 config 204 ); 205 206 void __kbase_tlstream_tl_jit_usedpages( 207 struct kbase_tlstream *stream, 208 u64 used_pages, 209 u32 j_id 210 ); 211 212 void __kbase_tlstream_tl_attrib_atom_jitallocinfo( 213 struct kbase_tlstream *stream, 214 const void *atom, 215 u64 va_pgs, 216 u64 com_pgs, 217 u64 extent, 218 u32 j_id, 219 u32 bin_id, 220 u32 max_allocs, 221 u32 jit_flags, 222 u32 usg_id 223 ); 224 225 void __kbase_tlstream_tl_attrib_atom_jitfreeinfo( 226 struct kbase_tlstream *stream, 227 const void *atom, 228 u32 j_id 229 ); 230 231 void __kbase_tlstream_tl_attrib_as_config( 232 struct kbase_tlstream *stream, 233 const void *address_space, 234 u64 transtab, 235 u64 memattr, 236 u64 transcfg 237 ); 238 239 void __kbase_tlstream_tl_event_lpu_softstop( 240 struct kbase_tlstream *stream, 241 const void *lpu 242 ); 243 244 void __kbase_tlstream_tl_event_atom_softstop_ex( 245 struct kbase_tlstream *stream, 246 const void *atom 247 ); 248 249 void __kbase_tlstream_tl_event_atom_softstop_issue( 250 struct kbase_tlstream *stream, 251 const void *atom 252 ); 253 254 void __kbase_tlstream_tl_event_atom_softjob_start( 255 struct kbase_tlstream *stream, 256 const void *atom 257 ); 258 259 void __kbase_tlstream_tl_event_atom_softjob_end( 260 struct kbase_tlstream *stream, 261 const void *atom 262 ); 263 264 void __kbase_tlstream_tl_arbiter_granted( 265 struct kbase_tlstream *stream, 266 const void *gpu 267 ); 268 269 void __kbase_tlstream_tl_arbiter_started( 270 struct kbase_tlstream *stream, 271 const void *gpu 272 ); 273 274 void __kbase_tlstream_tl_arbiter_stop_requested( 275 struct kbase_tlstream *stream, 276 const void *gpu 277 ); 278 279 void __kbase_tlstream_tl_arbiter_stopped( 280 struct kbase_tlstream *stream, 281 const void *gpu 282 ); 283 284 void __kbase_tlstream_tl_arbiter_requested( 285 struct kbase_tlstream *stream, 286 const void *gpu 287 ); 288 289 void __kbase_tlstream_jd_gpu_soft_reset( 290 struct kbase_tlstream *stream, 291 const void *gpu 292 ); 293 294 void __kbase_tlstream_jd_tiler_heap_chunk_alloc( 295 struct kbase_tlstream *stream, 296 u32 ctx_nr, 297 u64 heap_id, 298 u64 chunk_va 299 ); 300 301 void __kbase_tlstream_tl_js_sched_start( 302 struct kbase_tlstream *stream, 303 u32 dummy 304 ); 305 306 void __kbase_tlstream_tl_js_sched_end( 307 struct kbase_tlstream *stream, 308 u32 dummy 309 ); 310 311 void __kbase_tlstream_tl_jd_submit_atom_start( 312 struct kbase_tlstream *stream, 313 const void *atom 314 ); 315 316 void __kbase_tlstream_tl_jd_submit_atom_end( 317 struct kbase_tlstream *stream, 318 const void *atom 319 ); 320 321 void __kbase_tlstream_tl_jd_done_no_lock_start( 322 struct kbase_tlstream *stream, 323 const void *atom 324 ); 325 326 void __kbase_tlstream_tl_jd_done_no_lock_end( 327 struct kbase_tlstream *stream, 328 const void *atom 329 ); 330 331 void __kbase_tlstream_tl_jd_done_start( 332 struct kbase_tlstream *stream, 333 const void *atom 334 ); 335 336 void __kbase_tlstream_tl_jd_done_end( 337 struct kbase_tlstream *stream, 338 const void *atom 339 ); 340 341 void __kbase_tlstream_tl_jd_atom_complete( 342 struct kbase_tlstream *stream, 343 const void *atom 344 ); 345 346 void __kbase_tlstream_tl_run_atom_start( 347 struct kbase_tlstream *stream, 348 const void *atom, 349 u32 atom_nr 350 ); 351 352 void __kbase_tlstream_tl_run_atom_end( 353 struct kbase_tlstream *stream, 354 const void *atom, 355 u32 atom_nr 356 ); 357 358 void __kbase_tlstream_tl_attrib_atom_priority( 359 struct kbase_tlstream *stream, 360 const void *atom, 361 u32 prio 362 ); 363 364 void __kbase_tlstream_tl_attrib_atom_state( 365 struct kbase_tlstream *stream, 366 const void *atom, 367 u32 state 368 ); 369 370 void __kbase_tlstream_tl_attrib_atom_prioritized( 371 struct kbase_tlstream *stream, 372 const void *atom 373 ); 374 375 void __kbase_tlstream_tl_attrib_atom_jit( 376 struct kbase_tlstream *stream, 377 const void *atom, 378 u64 edit_addr, 379 u64 new_addr, 380 u32 jit_flags, 381 u64 mem_flags, 382 u32 j_id, 383 u64 com_pgs, 384 u64 extent, 385 u64 va_pgs 386 ); 387 388 void __kbase_tlstream_tl_kbase_new_device( 389 struct kbase_tlstream *stream, 390 u32 kbase_device_id, 391 u32 kbase_device_gpu_core_count, 392 u32 kbase_device_max_num_csgs, 393 u32 kbase_device_as_count, 394 u32 kbase_device_sb_entry_count, 395 u32 kbase_device_has_cross_stream_sync, 396 u32 kbase_device_supports_gpu_sleep 397 ); 398 399 void __kbase_tlstream_tl_kbase_gpucmdqueue_kick( 400 struct kbase_tlstream *stream, 401 u32 kernel_ctx_id, 402 u64 buffer_gpu_addr 403 ); 404 405 void __kbase_tlstream_tl_kbase_device_program_csg( 406 struct kbase_tlstream *stream, 407 u32 kbase_device_id, 408 u32 kernel_ctx_id, 409 u32 gpu_cmdq_grp_handle, 410 u32 kbase_device_csg_slot_index, 411 u32 kbase_device_csg_slot_resuming 412 ); 413 414 void __kbase_tlstream_tl_kbase_device_deprogram_csg( 415 struct kbase_tlstream *stream, 416 u32 kbase_device_id, 417 u32 kbase_device_csg_slot_index 418 ); 419 420 void __kbase_tlstream_tl_kbase_device_halting_csg( 421 struct kbase_tlstream *stream, 422 u32 kbase_device_id, 423 u32 kbase_device_csg_slot_index, 424 u32 kbase_device_csg_slot_suspending 425 ); 426 427 void __kbase_tlstream_tl_kbase_device_suspend_csg( 428 struct kbase_tlstream *stream, 429 u32 kbase_device_id, 430 u32 kbase_device_csg_slot_index 431 ); 432 433 void __kbase_tlstream_tl_kbase_device_csg_idle( 434 struct kbase_tlstream *stream, 435 u32 kbase_device_id, 436 u32 kbase_device_csg_slot_index 437 ); 438 439 void __kbase_tlstream_tl_kbase_new_ctx( 440 struct kbase_tlstream *stream, 441 u32 kernel_ctx_id, 442 u32 kbase_device_id 443 ); 444 445 void __kbase_tlstream_tl_kbase_del_ctx( 446 struct kbase_tlstream *stream, 447 u32 kernel_ctx_id 448 ); 449 450 void __kbase_tlstream_tl_kbase_ctx_assign_as( 451 struct kbase_tlstream *stream, 452 u32 kernel_ctx_id, 453 u32 kbase_device_as_index 454 ); 455 456 void __kbase_tlstream_tl_kbase_ctx_unassign_as( 457 struct kbase_tlstream *stream, 458 u32 kernel_ctx_id 459 ); 460 461 void __kbase_tlstream_tl_kbase_new_kcpuqueue( 462 struct kbase_tlstream *stream, 463 const void *kcpu_queue, 464 u32 kcpu_queue_id, 465 u32 kernel_ctx_id, 466 u32 kcpuq_num_pending_cmds 467 ); 468 469 void __kbase_tlstream_tl_kbase_del_kcpuqueue( 470 struct kbase_tlstream *stream, 471 const void *kcpu_queue 472 ); 473 474 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal( 475 struct kbase_tlstream *stream, 476 const void *kcpu_queue, 477 const void *fence 478 ); 479 480 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait( 481 struct kbase_tlstream *stream, 482 const void *kcpu_queue, 483 const void *fence 484 ); 485 486 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait( 487 struct kbase_tlstream *stream, 488 const void *kcpu_queue, 489 u64 cqs_obj_gpu_addr, 490 u32 compare_value, 491 u32 inherit_error 492 ); 493 494 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set( 495 struct kbase_tlstream *stream, 496 const void *kcpu_queue, 497 u64 cqs_obj_gpu_addr 498 ); 499 500 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait_operation( 501 struct kbase_tlstream *stream, 502 const void *kcpu_queue, 503 u64 cqs_obj_gpu_addr, 504 u64 compare_value, 505 u32 condition, 506 u32 data_type, 507 u32 inherit_error 508 ); 509 510 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set_operation( 511 struct kbase_tlstream *stream, 512 const void *kcpu_queue, 513 u64 cqs_obj_gpu_addr, 514 u64 value, 515 u32 operation, 516 u32 data_type 517 ); 518 519 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import( 520 struct kbase_tlstream *stream, 521 const void *kcpu_queue, 522 u64 map_import_buf_gpu_addr 523 ); 524 525 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import( 526 struct kbase_tlstream *stream, 527 const void *kcpu_queue, 528 u64 map_import_buf_gpu_addr 529 ); 530 531 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force( 532 struct kbase_tlstream *stream, 533 const void *kcpu_queue, 534 u64 map_import_buf_gpu_addr 535 ); 536 537 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc( 538 struct kbase_tlstream *stream, 539 const void *kcpu_queue 540 ); 541 542 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc( 543 struct kbase_tlstream *stream, 544 const void *kcpu_queue, 545 u64 jit_alloc_gpu_alloc_addr_dest, 546 u64 jit_alloc_va_pages, 547 u64 jit_alloc_commit_pages, 548 u64 jit_alloc_extent, 549 u32 jit_alloc_jit_id, 550 u32 jit_alloc_bin_id, 551 u32 jit_alloc_max_allocations, 552 u32 jit_alloc_flags, 553 u32 jit_alloc_usage_id 554 ); 555 556 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc( 557 struct kbase_tlstream *stream, 558 const void *kcpu_queue 559 ); 560 561 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free( 562 struct kbase_tlstream *stream, 563 const void *kcpu_queue 564 ); 565 566 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free( 567 struct kbase_tlstream *stream, 568 const void *kcpu_queue, 569 u32 jit_alloc_jit_id 570 ); 571 572 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free( 573 struct kbase_tlstream *stream, 574 const void *kcpu_queue 575 ); 576 577 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier( 578 struct kbase_tlstream *stream, 579 const void *kcpu_queue 580 ); 581 582 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend( 583 struct kbase_tlstream *stream, 584 const void *kcpu_queue, 585 const void *group_suspend_buf, 586 u32 gpu_cmdq_grp_handle 587 ); 588 589 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start( 590 struct kbase_tlstream *stream, 591 const void *kcpu_queue 592 ); 593 594 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end( 595 struct kbase_tlstream *stream, 596 const void *kcpu_queue, 597 u32 execute_error 598 ); 599 600 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start( 601 struct kbase_tlstream *stream, 602 const void *kcpu_queue 603 ); 604 605 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end( 606 struct kbase_tlstream *stream, 607 const void *kcpu_queue, 608 u32 execute_error 609 ); 610 611 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start( 612 struct kbase_tlstream *stream, 613 const void *kcpu_queue 614 ); 615 616 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end( 617 struct kbase_tlstream *stream, 618 const void *kcpu_queue, 619 u32 execute_error 620 ); 621 622 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set( 623 struct kbase_tlstream *stream, 624 const void *kcpu_queue, 625 u32 execute_error 626 ); 627 628 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_start( 629 struct kbase_tlstream *stream, 630 const void *kcpu_queue 631 ); 632 633 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_end( 634 struct kbase_tlstream *stream, 635 const void *kcpu_queue, 636 u32 execute_error 637 ); 638 639 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set_operation( 640 struct kbase_tlstream *stream, 641 const void *kcpu_queue, 642 u32 execute_error 643 ); 644 645 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start( 646 struct kbase_tlstream *stream, 647 const void *kcpu_queue 648 ); 649 650 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end( 651 struct kbase_tlstream *stream, 652 const void *kcpu_queue, 653 u32 execute_error 654 ); 655 656 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start( 657 struct kbase_tlstream *stream, 658 const void *kcpu_queue 659 ); 660 661 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end( 662 struct kbase_tlstream *stream, 663 const void *kcpu_queue, 664 u32 execute_error 665 ); 666 667 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start( 668 struct kbase_tlstream *stream, 669 const void *kcpu_queue 670 ); 671 672 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end( 673 struct kbase_tlstream *stream, 674 const void *kcpu_queue, 675 u32 execute_error 676 ); 677 678 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start( 679 struct kbase_tlstream *stream, 680 const void *kcpu_queue 681 ); 682 683 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end( 684 struct kbase_tlstream *stream, 685 const void *kcpu_queue 686 ); 687 688 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end( 689 struct kbase_tlstream *stream, 690 const void *kcpu_queue, 691 u32 execute_error, 692 u64 jit_alloc_gpu_alloc_addr, 693 u64 jit_alloc_mmu_flags 694 ); 695 696 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end( 697 struct kbase_tlstream *stream, 698 const void *kcpu_queue 699 ); 700 701 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start( 702 struct kbase_tlstream *stream, 703 const void *kcpu_queue 704 ); 705 706 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end( 707 struct kbase_tlstream *stream, 708 const void *kcpu_queue 709 ); 710 711 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end( 712 struct kbase_tlstream *stream, 713 const void *kcpu_queue, 714 u32 execute_error, 715 u64 jit_free_pages_used 716 ); 717 718 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end( 719 struct kbase_tlstream *stream, 720 const void *kcpu_queue 721 ); 722 723 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_error_barrier( 724 struct kbase_tlstream *stream, 725 const void *kcpu_queue 726 ); 727 728 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_start( 729 struct kbase_tlstream *stream, 730 const void *kcpu_queue 731 ); 732 733 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_end( 734 struct kbase_tlstream *stream, 735 const void *kcpu_queue, 736 u32 execute_error 737 ); 738 739 void __kbase_tlstream_tl_kbase_csffw_fw_reloading( 740 struct kbase_tlstream *stream, 741 u64 csffw_cycle 742 ); 743 744 void __kbase_tlstream_tl_kbase_csffw_fw_enabling( 745 struct kbase_tlstream *stream, 746 u64 csffw_cycle 747 ); 748 749 void __kbase_tlstream_tl_kbase_csffw_fw_request_sleep( 750 struct kbase_tlstream *stream, 751 u64 csffw_cycle 752 ); 753 754 void __kbase_tlstream_tl_kbase_csffw_fw_request_wakeup( 755 struct kbase_tlstream *stream, 756 u64 csffw_cycle 757 ); 758 759 void __kbase_tlstream_tl_kbase_csffw_fw_request_halt( 760 struct kbase_tlstream *stream, 761 u64 csffw_cycle 762 ); 763 764 void __kbase_tlstream_tl_kbase_csffw_fw_disabling( 765 struct kbase_tlstream *stream, 766 u64 csffw_cycle 767 ); 768 769 void __kbase_tlstream_tl_kbase_csffw_fw_off( 770 struct kbase_tlstream *stream, 771 u64 csffw_cycle 772 ); 773 774 void __kbase_tlstream_tl_kbase_csffw_tlstream_overflow( 775 struct kbase_tlstream *stream, 776 u64 csffw_timestamp, 777 u64 csffw_cycle 778 ); 779 780 void __kbase_tlstream_aux_pm_state( 781 struct kbase_tlstream *stream, 782 u32 core_type, 783 u64 core_state_bitset 784 ); 785 786 void __kbase_tlstream_aux_pagefault( 787 struct kbase_tlstream *stream, 788 u32 ctx_nr, 789 u32 as_nr, 790 u64 page_cnt_change 791 ); 792 793 void __kbase_tlstream_aux_pagesalloc( 794 struct kbase_tlstream *stream, 795 u32 ctx_nr, 796 u64 page_cnt 797 ); 798 799 void __kbase_tlstream_aux_devfreq_target( 800 struct kbase_tlstream *stream, 801 u64 target_freq 802 ); 803 804 void __kbase_tlstream_aux_jit_stats( 805 struct kbase_tlstream *stream, 806 u32 ctx_nr, 807 u32 bid, 808 u32 max_allocs, 809 u32 allocs, 810 u32 va_pages, 811 u32 ph_pages 812 ); 813 814 void __kbase_tlstream_aux_tiler_heap_stats( 815 struct kbase_tlstream *stream, 816 u32 ctx_nr, 817 u64 heap_id, 818 u32 va_pages, 819 u32 ph_pages, 820 u32 max_chunks, 821 u32 chunk_size, 822 u32 chunk_count, 823 u32 target_in_flight, 824 u32 nr_in_flight 825 ); 826 827 void __kbase_tlstream_aux_event_job_slot( 828 struct kbase_tlstream *stream, 829 const void *ctx, 830 u32 slot_nr, 831 u32 atom_nr, 832 u32 event 833 ); 834 835 void __kbase_tlstream_aux_protected_enter_start( 836 struct kbase_tlstream *stream, 837 const void *gpu 838 ); 839 840 void __kbase_tlstream_aux_protected_enter_end( 841 struct kbase_tlstream *stream, 842 const void *gpu 843 ); 844 845 void __kbase_tlstream_aux_mmu_command( 846 struct kbase_tlstream *stream, 847 u32 kernel_ctx_id, 848 u32 mmu_cmd_id, 849 u32 mmu_synchronicity, 850 u64 mmu_lock_addr, 851 u32 mmu_lock_page_num 852 ); 853 854 void __kbase_tlstream_aux_protected_leave_start( 855 struct kbase_tlstream *stream, 856 const void *gpu 857 ); 858 859 void __kbase_tlstream_aux_protected_leave_end( 860 struct kbase_tlstream *stream, 861 const void *gpu 862 ); 863 864 struct kbase_tlstream; 865 866 /** 867 * KBASE_TLSTREAM_TL_NEW_CTX - object ctx is created 868 * 869 * @kbdev: Kbase device 870 * @ctx: Name of the context object 871 * @ctx_nr: Kernel context number 872 * @tgid: Thread Group Id 873 */ 874 #define KBASE_TLSTREAM_TL_NEW_CTX( \ 875 kbdev, \ 876 ctx, \ 877 ctx_nr, \ 878 tgid \ 879 ) \ 880 do { \ 881 int enabled = atomic_read(&kbdev->timeline_flags); \ 882 if (enabled & TLSTREAM_ENABLED) \ 883 __kbase_tlstream_tl_new_ctx( \ 884 __TL_DISPATCH_STREAM(kbdev, obj), \ 885 ctx, \ 886 ctx_nr, \ 887 tgid \ 888 ); \ 889 } while (0) 890 891 /** 892 * KBASE_TLSTREAM_TL_NEW_GPU - object gpu is created 893 * 894 * @kbdev: Kbase device 895 * @gpu: Name of the GPU object 896 * @gpu_id: Name of the GPU object 897 * @core_count: Number of cores this GPU hosts 898 */ 899 #define KBASE_TLSTREAM_TL_NEW_GPU( \ 900 kbdev, \ 901 gpu, \ 902 gpu_id, \ 903 core_count \ 904 ) \ 905 do { \ 906 int enabled = atomic_read(&kbdev->timeline_flags); \ 907 if (enabled & TLSTREAM_ENABLED) \ 908 __kbase_tlstream_tl_new_gpu( \ 909 __TL_DISPATCH_STREAM(kbdev, obj), \ 910 gpu, \ 911 gpu_id, \ 912 core_count \ 913 ); \ 914 } while (0) 915 916 /** 917 * KBASE_TLSTREAM_TL_NEW_LPU - object lpu is created 918 * 919 * @kbdev: Kbase device 920 * @lpu: Name of the Logical Processing Unit object 921 * @lpu_nr: Sequential number assigned to the newly created LPU 922 * @lpu_fn: Property describing functional abilities of this LPU 923 */ 924 #define KBASE_TLSTREAM_TL_NEW_LPU( \ 925 kbdev, \ 926 lpu, \ 927 lpu_nr, \ 928 lpu_fn \ 929 ) \ 930 do { \ 931 int enabled = atomic_read(&kbdev->timeline_flags); \ 932 if (enabled & TLSTREAM_ENABLED) \ 933 __kbase_tlstream_tl_new_lpu( \ 934 __TL_DISPATCH_STREAM(kbdev, obj), \ 935 lpu, \ 936 lpu_nr, \ 937 lpu_fn \ 938 ); \ 939 } while (0) 940 941 /** 942 * KBASE_TLSTREAM_TL_NEW_ATOM - object atom is created 943 * 944 * @kbdev: Kbase device 945 * @atom: Atom identifier 946 * @atom_nr: Sequential number of an atom 947 */ 948 #define KBASE_TLSTREAM_TL_NEW_ATOM( \ 949 kbdev, \ 950 atom, \ 951 atom_nr \ 952 ) \ 953 do { \ 954 int enabled = atomic_read(&kbdev->timeline_flags); \ 955 if (enabled & TLSTREAM_ENABLED) \ 956 __kbase_tlstream_tl_new_atom( \ 957 __TL_DISPATCH_STREAM(kbdev, obj), \ 958 atom, \ 959 atom_nr \ 960 ); \ 961 } while (0) 962 963 /** 964 * KBASE_TLSTREAM_TL_NEW_AS - address space object is created 965 * 966 * @kbdev: Kbase device 967 * @address_space: Name of the address space object 968 * @as_nr: Address space number 969 */ 970 #define KBASE_TLSTREAM_TL_NEW_AS( \ 971 kbdev, \ 972 address_space, \ 973 as_nr \ 974 ) \ 975 do { \ 976 int enabled = atomic_read(&kbdev->timeline_flags); \ 977 if (enabled & TLSTREAM_ENABLED) \ 978 __kbase_tlstream_tl_new_as( \ 979 __TL_DISPATCH_STREAM(kbdev, obj), \ 980 address_space, \ 981 as_nr \ 982 ); \ 983 } while (0) 984 985 /** 986 * KBASE_TLSTREAM_TL_DEL_CTX - context is destroyed 987 * 988 * @kbdev: Kbase device 989 * @ctx: Name of the context object 990 */ 991 #define KBASE_TLSTREAM_TL_DEL_CTX( \ 992 kbdev, \ 993 ctx \ 994 ) \ 995 do { \ 996 int enabled = atomic_read(&kbdev->timeline_flags); \ 997 if (enabled & TLSTREAM_ENABLED) \ 998 __kbase_tlstream_tl_del_ctx( \ 999 __TL_DISPATCH_STREAM(kbdev, obj), \ 1000 ctx \ 1001 ); \ 1002 } while (0) 1003 1004 /** 1005 * KBASE_TLSTREAM_TL_DEL_ATOM - atom is destroyed 1006 * 1007 * @kbdev: Kbase device 1008 * @atom: Atom identifier 1009 */ 1010 #define KBASE_TLSTREAM_TL_DEL_ATOM( \ 1011 kbdev, \ 1012 atom \ 1013 ) \ 1014 do { \ 1015 int enabled = atomic_read(&kbdev->timeline_flags); \ 1016 if (enabled & TLSTREAM_ENABLED) \ 1017 __kbase_tlstream_tl_del_atom( \ 1018 __TL_DISPATCH_STREAM(kbdev, obj), \ 1019 atom \ 1020 ); \ 1021 } while (0) 1022 1023 /** 1024 * KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU - lpu is deleted with gpu 1025 * 1026 * @kbdev: Kbase device 1027 * @lpu: Name of the Logical Processing Unit object 1028 * @gpu: Name of the GPU object 1029 */ 1030 #define KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU( \ 1031 kbdev, \ 1032 lpu, \ 1033 gpu \ 1034 ) \ 1035 do { \ 1036 int enabled = atomic_read(&kbdev->timeline_flags); \ 1037 if (enabled & TLSTREAM_ENABLED) \ 1038 __kbase_tlstream_tl_lifelink_lpu_gpu( \ 1039 __TL_DISPATCH_STREAM(kbdev, obj), \ 1040 lpu, \ 1041 gpu \ 1042 ); \ 1043 } while (0) 1044 1045 /** 1046 * KBASE_TLSTREAM_TL_LIFELINK_AS_GPU - address space is deleted with gpu 1047 * 1048 * @kbdev: Kbase device 1049 * @address_space: Name of the address space object 1050 * @gpu: Name of the GPU object 1051 */ 1052 #define KBASE_TLSTREAM_TL_LIFELINK_AS_GPU( \ 1053 kbdev, \ 1054 address_space, \ 1055 gpu \ 1056 ) \ 1057 do { \ 1058 int enabled = atomic_read(&kbdev->timeline_flags); \ 1059 if (enabled & TLSTREAM_ENABLED) \ 1060 __kbase_tlstream_tl_lifelink_as_gpu( \ 1061 __TL_DISPATCH_STREAM(kbdev, obj), \ 1062 address_space, \ 1063 gpu \ 1064 ); \ 1065 } while (0) 1066 1067 /** 1068 * KBASE_TLSTREAM_TL_RET_CTX_LPU - context is retained by lpu 1069 * 1070 * @kbdev: Kbase device 1071 * @ctx: Name of the context object 1072 * @lpu: Name of the Logical Processing Unit object 1073 */ 1074 #define KBASE_TLSTREAM_TL_RET_CTX_LPU( \ 1075 kbdev, \ 1076 ctx, \ 1077 lpu \ 1078 ) \ 1079 do { \ 1080 int enabled = atomic_read(&kbdev->timeline_flags); \ 1081 if (enabled & TLSTREAM_ENABLED) \ 1082 __kbase_tlstream_tl_ret_ctx_lpu( \ 1083 __TL_DISPATCH_STREAM(kbdev, obj), \ 1084 ctx, \ 1085 lpu \ 1086 ); \ 1087 } while (0) 1088 1089 /** 1090 * KBASE_TLSTREAM_TL_RET_ATOM_CTX - atom is retained by context 1091 * 1092 * @kbdev: Kbase device 1093 * @atom: Atom identifier 1094 * @ctx: Name of the context object 1095 */ 1096 #define KBASE_TLSTREAM_TL_RET_ATOM_CTX( \ 1097 kbdev, \ 1098 atom, \ 1099 ctx \ 1100 ) \ 1101 do { \ 1102 int enabled = atomic_read(&kbdev->timeline_flags); \ 1103 if (enabled & TLSTREAM_ENABLED) \ 1104 __kbase_tlstream_tl_ret_atom_ctx( \ 1105 __TL_DISPATCH_STREAM(kbdev, obj), \ 1106 atom, \ 1107 ctx \ 1108 ); \ 1109 } while (0) 1110 1111 /** 1112 * KBASE_TLSTREAM_TL_RET_ATOM_LPU - atom is retained by lpu 1113 * 1114 * @kbdev: Kbase device 1115 * @atom: Atom identifier 1116 * @lpu: Name of the Logical Processing Unit object 1117 * @attrib_match_list: List containing match operator attributes 1118 */ 1119 #define KBASE_TLSTREAM_TL_RET_ATOM_LPU( \ 1120 kbdev, \ 1121 atom, \ 1122 lpu, \ 1123 attrib_match_list \ 1124 ) \ 1125 do { \ 1126 int enabled = atomic_read(&kbdev->timeline_flags); \ 1127 if (enabled & TLSTREAM_ENABLED) \ 1128 __kbase_tlstream_tl_ret_atom_lpu( \ 1129 __TL_DISPATCH_STREAM(kbdev, obj), \ 1130 atom, \ 1131 lpu, \ 1132 attrib_match_list \ 1133 ); \ 1134 } while (0) 1135 1136 /** 1137 * KBASE_TLSTREAM_TL_NRET_CTX_LPU - context is released by lpu 1138 * 1139 * @kbdev: Kbase device 1140 * @ctx: Name of the context object 1141 * @lpu: Name of the Logical Processing Unit object 1142 */ 1143 #define KBASE_TLSTREAM_TL_NRET_CTX_LPU( \ 1144 kbdev, \ 1145 ctx, \ 1146 lpu \ 1147 ) \ 1148 do { \ 1149 int enabled = atomic_read(&kbdev->timeline_flags); \ 1150 if (enabled & TLSTREAM_ENABLED) \ 1151 __kbase_tlstream_tl_nret_ctx_lpu( \ 1152 __TL_DISPATCH_STREAM(kbdev, obj), \ 1153 ctx, \ 1154 lpu \ 1155 ); \ 1156 } while (0) 1157 1158 /** 1159 * KBASE_TLSTREAM_TL_NRET_ATOM_CTX - atom is released by context 1160 * 1161 * @kbdev: Kbase device 1162 * @atom: Atom identifier 1163 * @ctx: Name of the context object 1164 */ 1165 #define KBASE_TLSTREAM_TL_NRET_ATOM_CTX( \ 1166 kbdev, \ 1167 atom, \ 1168 ctx \ 1169 ) \ 1170 do { \ 1171 int enabled = atomic_read(&kbdev->timeline_flags); \ 1172 if (enabled & TLSTREAM_ENABLED) \ 1173 __kbase_tlstream_tl_nret_atom_ctx( \ 1174 __TL_DISPATCH_STREAM(kbdev, obj), \ 1175 atom, \ 1176 ctx \ 1177 ); \ 1178 } while (0) 1179 1180 /** 1181 * KBASE_TLSTREAM_TL_NRET_ATOM_LPU - atom is released by lpu 1182 * 1183 * @kbdev: Kbase device 1184 * @atom: Atom identifier 1185 * @lpu: Name of the Logical Processing Unit object 1186 */ 1187 #define KBASE_TLSTREAM_TL_NRET_ATOM_LPU( \ 1188 kbdev, \ 1189 atom, \ 1190 lpu \ 1191 ) \ 1192 do { \ 1193 int enabled = atomic_read(&kbdev->timeline_flags); \ 1194 if (enabled & TLSTREAM_ENABLED) \ 1195 __kbase_tlstream_tl_nret_atom_lpu( \ 1196 __TL_DISPATCH_STREAM(kbdev, obj), \ 1197 atom, \ 1198 lpu \ 1199 ); \ 1200 } while (0) 1201 1202 /** 1203 * KBASE_TLSTREAM_TL_RET_AS_CTX - address space is retained by context 1204 * 1205 * @kbdev: Kbase device 1206 * @address_space: Name of the address space object 1207 * @ctx: Name of the context object 1208 */ 1209 #define KBASE_TLSTREAM_TL_RET_AS_CTX( \ 1210 kbdev, \ 1211 address_space, \ 1212 ctx \ 1213 ) \ 1214 do { \ 1215 int enabled = atomic_read(&kbdev->timeline_flags); \ 1216 if (enabled & TLSTREAM_ENABLED) \ 1217 __kbase_tlstream_tl_ret_as_ctx( \ 1218 __TL_DISPATCH_STREAM(kbdev, obj), \ 1219 address_space, \ 1220 ctx \ 1221 ); \ 1222 } while (0) 1223 1224 /** 1225 * KBASE_TLSTREAM_TL_NRET_AS_CTX - address space is released by context 1226 * 1227 * @kbdev: Kbase device 1228 * @address_space: Name of the address space object 1229 * @ctx: Name of the context object 1230 */ 1231 #define KBASE_TLSTREAM_TL_NRET_AS_CTX( \ 1232 kbdev, \ 1233 address_space, \ 1234 ctx \ 1235 ) \ 1236 do { \ 1237 int enabled = atomic_read(&kbdev->timeline_flags); \ 1238 if (enabled & TLSTREAM_ENABLED) \ 1239 __kbase_tlstream_tl_nret_as_ctx( \ 1240 __TL_DISPATCH_STREAM(kbdev, obj), \ 1241 address_space, \ 1242 ctx \ 1243 ); \ 1244 } while (0) 1245 1246 /** 1247 * KBASE_TLSTREAM_TL_RET_ATOM_AS - atom is retained by address space 1248 * 1249 * @kbdev: Kbase device 1250 * @atom: Atom identifier 1251 * @address_space: Name of the address space object 1252 */ 1253 #define KBASE_TLSTREAM_TL_RET_ATOM_AS( \ 1254 kbdev, \ 1255 atom, \ 1256 address_space \ 1257 ) \ 1258 do { \ 1259 int enabled = atomic_read(&kbdev->timeline_flags); \ 1260 if (enabled & TLSTREAM_ENABLED) \ 1261 __kbase_tlstream_tl_ret_atom_as( \ 1262 __TL_DISPATCH_STREAM(kbdev, obj), \ 1263 atom, \ 1264 address_space \ 1265 ); \ 1266 } while (0) 1267 1268 /** 1269 * KBASE_TLSTREAM_TL_NRET_ATOM_AS - atom is released by address space 1270 * 1271 * @kbdev: Kbase device 1272 * @atom: Atom identifier 1273 * @address_space: Name of the address space object 1274 */ 1275 #define KBASE_TLSTREAM_TL_NRET_ATOM_AS( \ 1276 kbdev, \ 1277 atom, \ 1278 address_space \ 1279 ) \ 1280 do { \ 1281 int enabled = atomic_read(&kbdev->timeline_flags); \ 1282 if (enabled & TLSTREAM_ENABLED) \ 1283 __kbase_tlstream_tl_nret_atom_as( \ 1284 __TL_DISPATCH_STREAM(kbdev, obj), \ 1285 atom, \ 1286 address_space \ 1287 ); \ 1288 } while (0) 1289 1290 /** 1291 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG - atom job slot attributes 1292 * 1293 * @kbdev: Kbase device 1294 * @atom: Atom identifier 1295 * @descriptor: Job descriptor address 1296 * @affinity: Job affinity 1297 * @config: Job config 1298 */ 1299 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG( \ 1300 kbdev, \ 1301 atom, \ 1302 descriptor, \ 1303 affinity, \ 1304 config \ 1305 ) \ 1306 do { \ 1307 int enabled = atomic_read(&kbdev->timeline_flags); \ 1308 if (enabled & TLSTREAM_ENABLED) \ 1309 __kbase_tlstream_tl_attrib_atom_config( \ 1310 __TL_DISPATCH_STREAM(kbdev, obj), \ 1311 atom, \ 1312 descriptor, \ 1313 affinity, \ 1314 config \ 1315 ); \ 1316 } while (0) 1317 1318 /** 1319 * KBASE_TLSTREAM_TL_JIT_USEDPAGES - used pages for jit 1320 * 1321 * @kbdev: Kbase device 1322 * @used_pages: Number of pages used for jit 1323 * @j_id: Unique ID provided by the caller, this is used to pair allocation and free requests. 1324 */ 1325 #define KBASE_TLSTREAM_TL_JIT_USEDPAGES( \ 1326 kbdev, \ 1327 used_pages, \ 1328 j_id \ 1329 ) \ 1330 do { \ 1331 int enabled = atomic_read(&kbdev->timeline_flags); \ 1332 if (enabled & TLSTREAM_ENABLED) \ 1333 __kbase_tlstream_tl_jit_usedpages( \ 1334 __TL_DISPATCH_STREAM(kbdev, obj), \ 1335 used_pages, \ 1336 j_id \ 1337 ); \ 1338 } while (0) 1339 1340 /** 1341 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO - Information about JIT allocations 1342 * 1343 * @kbdev: Kbase device 1344 * @atom: Atom identifier 1345 * @va_pgs: The minimum number of virtual pages required 1346 * @com_pgs: The minimum number of physical pages which should back the allocation. 1347 * @extent: Granularity of physical pages to grow the allocation by during a fault. 1348 * @j_id: Unique ID provided by the caller, this is used to pair allocation and free requests. 1349 * @bin_id: The JIT allocation bin, used in conjunction with max_allocations to limit the number of each type of JIT allocation. 1350 * @max_allocs: Maximum allocations allowed in this bin. 1351 * @jit_flags: Flags specifying the special requirements for the JIT allocation. 1352 * @usg_id: A hint about which allocation should be reused. 1353 */ 1354 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO( \ 1355 kbdev, \ 1356 atom, \ 1357 va_pgs, \ 1358 com_pgs, \ 1359 extent, \ 1360 j_id, \ 1361 bin_id, \ 1362 max_allocs, \ 1363 jit_flags, \ 1364 usg_id \ 1365 ) \ 1366 do { \ 1367 int enabled = atomic_read(&kbdev->timeline_flags); \ 1368 if (enabled & TLSTREAM_ENABLED) \ 1369 __kbase_tlstream_tl_attrib_atom_jitallocinfo( \ 1370 __TL_DISPATCH_STREAM(kbdev, obj), \ 1371 atom, \ 1372 va_pgs, \ 1373 com_pgs, \ 1374 extent, \ 1375 j_id, \ 1376 bin_id, \ 1377 max_allocs, \ 1378 jit_flags, \ 1379 usg_id \ 1380 ); \ 1381 } while (0) 1382 1383 /** 1384 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO - Information about JIT frees 1385 * 1386 * @kbdev: Kbase device 1387 * @atom: Atom identifier 1388 * @j_id: Unique ID provided by the caller, this is used to pair allocation and free requests. 1389 */ 1390 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO( \ 1391 kbdev, \ 1392 atom, \ 1393 j_id \ 1394 ) \ 1395 do { \ 1396 int enabled = atomic_read(&kbdev->timeline_flags); \ 1397 if (enabled & TLSTREAM_ENABLED) \ 1398 __kbase_tlstream_tl_attrib_atom_jitfreeinfo( \ 1399 __TL_DISPATCH_STREAM(kbdev, obj), \ 1400 atom, \ 1401 j_id \ 1402 ); \ 1403 } while (0) 1404 1405 /** 1406 * KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG - address space attributes 1407 * 1408 * @kbdev: Kbase device 1409 * @address_space: Name of the address space object 1410 * @transtab: Configuration of the TRANSTAB register 1411 * @memattr: Configuration of the MEMATTR register 1412 * @transcfg: Configuration of the TRANSCFG register (or zero if not present) 1413 */ 1414 #define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG( \ 1415 kbdev, \ 1416 address_space, \ 1417 transtab, \ 1418 memattr, \ 1419 transcfg \ 1420 ) \ 1421 do { \ 1422 int enabled = atomic_read(&kbdev->timeline_flags); \ 1423 if (enabled & TLSTREAM_ENABLED) \ 1424 __kbase_tlstream_tl_attrib_as_config( \ 1425 __TL_DISPATCH_STREAM(kbdev, obj), \ 1426 address_space, \ 1427 transtab, \ 1428 memattr, \ 1429 transcfg \ 1430 ); \ 1431 } while (0) 1432 1433 /** 1434 * KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP - softstop event on given lpu 1435 * 1436 * @kbdev: Kbase device 1437 * @lpu: Name of the Logical Processing Unit object 1438 */ 1439 #define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP( \ 1440 kbdev, \ 1441 lpu \ 1442 ) \ 1443 do { \ 1444 int enabled = atomic_read(&kbdev->timeline_flags); \ 1445 if (enabled & TLSTREAM_ENABLED) \ 1446 __kbase_tlstream_tl_event_lpu_softstop( \ 1447 __TL_DISPATCH_STREAM(kbdev, obj), \ 1448 lpu \ 1449 ); \ 1450 } while (0) 1451 1452 /** 1453 * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX - atom softstopped 1454 * 1455 * @kbdev: Kbase device 1456 * @atom: Atom identifier 1457 */ 1458 #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX( \ 1459 kbdev, \ 1460 atom \ 1461 ) \ 1462 do { \ 1463 int enabled = atomic_read(&kbdev->timeline_flags); \ 1464 if (enabled & TLSTREAM_ENABLED) \ 1465 __kbase_tlstream_tl_event_atom_softstop_ex( \ 1466 __TL_DISPATCH_STREAM(kbdev, obj), \ 1467 atom \ 1468 ); \ 1469 } while (0) 1470 1471 /** 1472 * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE - atom softstop issued 1473 * 1474 * @kbdev: Kbase device 1475 * @atom: Atom identifier 1476 */ 1477 #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE( \ 1478 kbdev, \ 1479 atom \ 1480 ) \ 1481 do { \ 1482 int enabled = atomic_read(&kbdev->timeline_flags); \ 1483 if (enabled & TLSTREAM_ENABLED) \ 1484 __kbase_tlstream_tl_event_atom_softstop_issue( \ 1485 __TL_DISPATCH_STREAM(kbdev, obj), \ 1486 atom \ 1487 ); \ 1488 } while (0) 1489 1490 /** 1491 * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START - atom soft job has started 1492 * 1493 * @kbdev: Kbase device 1494 * @atom: Atom identifier 1495 */ 1496 #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START( \ 1497 kbdev, \ 1498 atom \ 1499 ) \ 1500 do { \ 1501 int enabled = atomic_read(&kbdev->timeline_flags); \ 1502 if (enabled & TLSTREAM_ENABLED) \ 1503 __kbase_tlstream_tl_event_atom_softjob_start( \ 1504 __TL_DISPATCH_STREAM(kbdev, obj), \ 1505 atom \ 1506 ); \ 1507 } while (0) 1508 1509 /** 1510 * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END - atom soft job has completed 1511 * 1512 * @kbdev: Kbase device 1513 * @atom: Atom identifier 1514 */ 1515 #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END( \ 1516 kbdev, \ 1517 atom \ 1518 ) \ 1519 do { \ 1520 int enabled = atomic_read(&kbdev->timeline_flags); \ 1521 if (enabled & TLSTREAM_ENABLED) \ 1522 __kbase_tlstream_tl_event_atom_softjob_end( \ 1523 __TL_DISPATCH_STREAM(kbdev, obj), \ 1524 atom \ 1525 ); \ 1526 } while (0) 1527 1528 /** 1529 * KBASE_TLSTREAM_TL_ARBITER_GRANTED - Arbiter has granted gpu access 1530 * 1531 * @kbdev: Kbase device 1532 * @gpu: Name of the GPU object 1533 */ 1534 #define KBASE_TLSTREAM_TL_ARBITER_GRANTED( \ 1535 kbdev, \ 1536 gpu \ 1537 ) \ 1538 do { \ 1539 int enabled = atomic_read(&kbdev->timeline_flags); \ 1540 if (enabled & TLSTREAM_ENABLED) \ 1541 __kbase_tlstream_tl_arbiter_granted( \ 1542 __TL_DISPATCH_STREAM(kbdev, obj), \ 1543 gpu \ 1544 ); \ 1545 } while (0) 1546 1547 /** 1548 * KBASE_TLSTREAM_TL_ARBITER_STARTED - Driver is running again and able to process jobs 1549 * 1550 * @kbdev: Kbase device 1551 * @gpu: Name of the GPU object 1552 */ 1553 #define KBASE_TLSTREAM_TL_ARBITER_STARTED( \ 1554 kbdev, \ 1555 gpu \ 1556 ) \ 1557 do { \ 1558 int enabled = atomic_read(&kbdev->timeline_flags); \ 1559 if (enabled & TLSTREAM_ENABLED) \ 1560 __kbase_tlstream_tl_arbiter_started( \ 1561 __TL_DISPATCH_STREAM(kbdev, obj), \ 1562 gpu \ 1563 ); \ 1564 } while (0) 1565 1566 /** 1567 * KBASE_TLSTREAM_TL_ARBITER_STOP_REQUESTED - Arbiter has requested driver to stop using gpu 1568 * 1569 * @kbdev: Kbase device 1570 * @gpu: Name of the GPU object 1571 */ 1572 #define KBASE_TLSTREAM_TL_ARBITER_STOP_REQUESTED( \ 1573 kbdev, \ 1574 gpu \ 1575 ) \ 1576 do { \ 1577 int enabled = atomic_read(&kbdev->timeline_flags); \ 1578 if (enabled & TLSTREAM_ENABLED) \ 1579 __kbase_tlstream_tl_arbiter_stop_requested( \ 1580 __TL_DISPATCH_STREAM(kbdev, obj), \ 1581 gpu \ 1582 ); \ 1583 } while (0) 1584 1585 /** 1586 * KBASE_TLSTREAM_TL_ARBITER_STOPPED - Driver has stopped using gpu 1587 * 1588 * @kbdev: Kbase device 1589 * @gpu: Name of the GPU object 1590 */ 1591 #define KBASE_TLSTREAM_TL_ARBITER_STOPPED( \ 1592 kbdev, \ 1593 gpu \ 1594 ) \ 1595 do { \ 1596 int enabled = atomic_read(&kbdev->timeline_flags); \ 1597 if (enabled & TLSTREAM_ENABLED) \ 1598 __kbase_tlstream_tl_arbiter_stopped( \ 1599 __TL_DISPATCH_STREAM(kbdev, obj), \ 1600 gpu \ 1601 ); \ 1602 } while (0) 1603 1604 /** 1605 * KBASE_TLSTREAM_TL_ARBITER_REQUESTED - Driver has requested the arbiter for gpu access 1606 * 1607 * @kbdev: Kbase device 1608 * @gpu: Name of the GPU object 1609 */ 1610 #define KBASE_TLSTREAM_TL_ARBITER_REQUESTED( \ 1611 kbdev, \ 1612 gpu \ 1613 ) \ 1614 do { \ 1615 int enabled = atomic_read(&kbdev->timeline_flags); \ 1616 if (enabled & TLSTREAM_ENABLED) \ 1617 __kbase_tlstream_tl_arbiter_requested( \ 1618 __TL_DISPATCH_STREAM(kbdev, obj), \ 1619 gpu \ 1620 ); \ 1621 } while (0) 1622 1623 /** 1624 * KBASE_TLSTREAM_JD_GPU_SOFT_RESET - gpu soft reset 1625 * 1626 * @kbdev: Kbase device 1627 * @gpu: Name of the GPU object 1628 */ 1629 #define KBASE_TLSTREAM_JD_GPU_SOFT_RESET( \ 1630 kbdev, \ 1631 gpu \ 1632 ) \ 1633 do { \ 1634 int enabled = atomic_read(&kbdev->timeline_flags); \ 1635 if (enabled & TLSTREAM_ENABLED) \ 1636 __kbase_tlstream_jd_gpu_soft_reset( \ 1637 __TL_DISPATCH_STREAM(kbdev, obj), \ 1638 gpu \ 1639 ); \ 1640 } while (0) 1641 1642 /** 1643 * KBASE_TLSTREAM_JD_TILER_HEAP_CHUNK_ALLOC - Tiler Heap Chunk Allocation 1644 * 1645 * @kbdev: Kbase device 1646 * @ctx_nr: Kernel context number 1647 * @heap_id: Unique id used to represent a heap under a context 1648 * @chunk_va: Virtual start address of tiler heap chunk 1649 */ 1650 #define KBASE_TLSTREAM_JD_TILER_HEAP_CHUNK_ALLOC( \ 1651 kbdev, \ 1652 ctx_nr, \ 1653 heap_id, \ 1654 chunk_va \ 1655 ) \ 1656 do { \ 1657 int enabled = atomic_read(&kbdev->timeline_flags); \ 1658 if (enabled & TLSTREAM_ENABLED) \ 1659 __kbase_tlstream_jd_tiler_heap_chunk_alloc( \ 1660 __TL_DISPATCH_STREAM(kbdev, obj), \ 1661 ctx_nr, \ 1662 heap_id, \ 1663 chunk_va \ 1664 ); \ 1665 } while (0) 1666 1667 /** 1668 * KBASE_TLSTREAM_TL_JS_SCHED_START - Scheduling starts 1669 * 1670 * @kbdev: Kbase device 1671 * @dummy: dummy argument 1672 */ 1673 #define KBASE_TLSTREAM_TL_JS_SCHED_START( \ 1674 kbdev, \ 1675 dummy \ 1676 ) \ 1677 do { \ 1678 int enabled = atomic_read(&kbdev->timeline_flags); \ 1679 if (enabled & TLSTREAM_ENABLED) \ 1680 __kbase_tlstream_tl_js_sched_start( \ 1681 __TL_DISPATCH_STREAM(kbdev, obj), \ 1682 dummy \ 1683 ); \ 1684 } while (0) 1685 1686 /** 1687 * KBASE_TLSTREAM_TL_JS_SCHED_END - Scheduling ends 1688 * 1689 * @kbdev: Kbase device 1690 * @dummy: dummy argument 1691 */ 1692 #define KBASE_TLSTREAM_TL_JS_SCHED_END( \ 1693 kbdev, \ 1694 dummy \ 1695 ) \ 1696 do { \ 1697 int enabled = atomic_read(&kbdev->timeline_flags); \ 1698 if (enabled & TLSTREAM_ENABLED) \ 1699 __kbase_tlstream_tl_js_sched_end( \ 1700 __TL_DISPATCH_STREAM(kbdev, obj), \ 1701 dummy \ 1702 ); \ 1703 } while (0) 1704 1705 /** 1706 * KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_START - Submitting an atom starts 1707 * 1708 * @kbdev: Kbase device 1709 * @atom: Atom identifier 1710 */ 1711 #define KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_START( \ 1712 kbdev, \ 1713 atom \ 1714 ) \ 1715 do { \ 1716 int enabled = atomic_read(&kbdev->timeline_flags); \ 1717 if (enabled & TLSTREAM_ENABLED) \ 1718 __kbase_tlstream_tl_jd_submit_atom_start( \ 1719 __TL_DISPATCH_STREAM(kbdev, obj), \ 1720 atom \ 1721 ); \ 1722 } while (0) 1723 1724 /** 1725 * KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_END - Submitting an atom ends 1726 * 1727 * @kbdev: Kbase device 1728 * @atom: Atom identifier 1729 */ 1730 #define KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_END( \ 1731 kbdev, \ 1732 atom \ 1733 ) \ 1734 do { \ 1735 int enabled = atomic_read(&kbdev->timeline_flags); \ 1736 if (enabled & TLSTREAM_ENABLED) \ 1737 __kbase_tlstream_tl_jd_submit_atom_end( \ 1738 __TL_DISPATCH_STREAM(kbdev, obj), \ 1739 atom \ 1740 ); \ 1741 } while (0) 1742 1743 /** 1744 * KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_START - Within function kbase_jd_done_nolock 1745 * 1746 * @kbdev: Kbase device 1747 * @atom: Atom identifier 1748 */ 1749 #define KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_START( \ 1750 kbdev, \ 1751 atom \ 1752 ) \ 1753 do { \ 1754 int enabled = atomic_read(&kbdev->timeline_flags); \ 1755 if (enabled & TLSTREAM_ENABLED) \ 1756 __kbase_tlstream_tl_jd_done_no_lock_start( \ 1757 __TL_DISPATCH_STREAM(kbdev, obj), \ 1758 atom \ 1759 ); \ 1760 } while (0) 1761 1762 /** 1763 * KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_END - Within function kbase_jd_done_nolock - end 1764 * 1765 * @kbdev: Kbase device 1766 * @atom: Atom identifier 1767 */ 1768 #define KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_END( \ 1769 kbdev, \ 1770 atom \ 1771 ) \ 1772 do { \ 1773 int enabled = atomic_read(&kbdev->timeline_flags); \ 1774 if (enabled & TLSTREAM_ENABLED) \ 1775 __kbase_tlstream_tl_jd_done_no_lock_end( \ 1776 __TL_DISPATCH_STREAM(kbdev, obj), \ 1777 atom \ 1778 ); \ 1779 } while (0) 1780 1781 /** 1782 * KBASE_TLSTREAM_TL_JD_DONE_START - Start of kbase_jd_done 1783 * 1784 * @kbdev: Kbase device 1785 * @atom: Atom identifier 1786 */ 1787 #define KBASE_TLSTREAM_TL_JD_DONE_START( \ 1788 kbdev, \ 1789 atom \ 1790 ) \ 1791 do { \ 1792 int enabled = atomic_read(&kbdev->timeline_flags); \ 1793 if (enabled & TLSTREAM_ENABLED) \ 1794 __kbase_tlstream_tl_jd_done_start( \ 1795 __TL_DISPATCH_STREAM(kbdev, obj), \ 1796 atom \ 1797 ); \ 1798 } while (0) 1799 1800 /** 1801 * KBASE_TLSTREAM_TL_JD_DONE_END - End of kbase_jd_done 1802 * 1803 * @kbdev: Kbase device 1804 * @atom: Atom identifier 1805 */ 1806 #define KBASE_TLSTREAM_TL_JD_DONE_END( \ 1807 kbdev, \ 1808 atom \ 1809 ) \ 1810 do { \ 1811 int enabled = atomic_read(&kbdev->timeline_flags); \ 1812 if (enabled & TLSTREAM_ENABLED) \ 1813 __kbase_tlstream_tl_jd_done_end( \ 1814 __TL_DISPATCH_STREAM(kbdev, obj), \ 1815 atom \ 1816 ); \ 1817 } while (0) 1818 1819 /** 1820 * KBASE_TLSTREAM_TL_JD_ATOM_COMPLETE - Atom marked complete 1821 * 1822 * @kbdev: Kbase device 1823 * @atom: Atom identifier 1824 */ 1825 #define KBASE_TLSTREAM_TL_JD_ATOM_COMPLETE( \ 1826 kbdev, \ 1827 atom \ 1828 ) \ 1829 do { \ 1830 int enabled = atomic_read(&kbdev->timeline_flags); \ 1831 if (enabled & TLSTREAM_ENABLED) \ 1832 __kbase_tlstream_tl_jd_atom_complete( \ 1833 __TL_DISPATCH_STREAM(kbdev, obj), \ 1834 atom \ 1835 ); \ 1836 } while (0) 1837 1838 /** 1839 * KBASE_TLSTREAM_TL_RUN_ATOM_START - Running of atom starts 1840 * 1841 * @kbdev: Kbase device 1842 * @atom: Atom identifier 1843 * @atom_nr: Sequential number of an atom 1844 */ 1845 #define KBASE_TLSTREAM_TL_RUN_ATOM_START( \ 1846 kbdev, \ 1847 atom, \ 1848 atom_nr \ 1849 ) \ 1850 do { \ 1851 int enabled = atomic_read(&kbdev->timeline_flags); \ 1852 if (enabled & TLSTREAM_ENABLED) \ 1853 __kbase_tlstream_tl_run_atom_start( \ 1854 __TL_DISPATCH_STREAM(kbdev, obj), \ 1855 atom, \ 1856 atom_nr \ 1857 ); \ 1858 } while (0) 1859 1860 /** 1861 * KBASE_TLSTREAM_TL_RUN_ATOM_END - Running of atom ends 1862 * 1863 * @kbdev: Kbase device 1864 * @atom: Atom identifier 1865 * @atom_nr: Sequential number of an atom 1866 */ 1867 #define KBASE_TLSTREAM_TL_RUN_ATOM_END( \ 1868 kbdev, \ 1869 atom, \ 1870 atom_nr \ 1871 ) \ 1872 do { \ 1873 int enabled = atomic_read(&kbdev->timeline_flags); \ 1874 if (enabled & TLSTREAM_ENABLED) \ 1875 __kbase_tlstream_tl_run_atom_end( \ 1876 __TL_DISPATCH_STREAM(kbdev, obj), \ 1877 atom, \ 1878 atom_nr \ 1879 ); \ 1880 } while (0) 1881 1882 /** 1883 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY - atom priority 1884 * 1885 * @kbdev: Kbase device 1886 * @atom: Atom identifier 1887 * @prio: Atom priority 1888 */ 1889 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY( \ 1890 kbdev, \ 1891 atom, \ 1892 prio \ 1893 ) \ 1894 do { \ 1895 int enabled = atomic_read(&kbdev->timeline_flags); \ 1896 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 1897 __kbase_tlstream_tl_attrib_atom_priority( \ 1898 __TL_DISPATCH_STREAM(kbdev, obj), \ 1899 atom, \ 1900 prio \ 1901 ); \ 1902 } while (0) 1903 1904 /** 1905 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE - atom state 1906 * 1907 * @kbdev: Kbase device 1908 * @atom: Atom identifier 1909 * @state: Atom state 1910 */ 1911 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE( \ 1912 kbdev, \ 1913 atom, \ 1914 state \ 1915 ) \ 1916 do { \ 1917 int enabled = atomic_read(&kbdev->timeline_flags); \ 1918 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 1919 __kbase_tlstream_tl_attrib_atom_state( \ 1920 __TL_DISPATCH_STREAM(kbdev, obj), \ 1921 atom, \ 1922 state \ 1923 ); \ 1924 } while (0) 1925 1926 /** 1927 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED - atom caused priority change 1928 * 1929 * @kbdev: Kbase device 1930 * @atom: Atom identifier 1931 */ 1932 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED( \ 1933 kbdev, \ 1934 atom \ 1935 ) \ 1936 do { \ 1937 int enabled = atomic_read(&kbdev->timeline_flags); \ 1938 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 1939 __kbase_tlstream_tl_attrib_atom_prioritized( \ 1940 __TL_DISPATCH_STREAM(kbdev, obj), \ 1941 atom \ 1942 ); \ 1943 } while (0) 1944 1945 /** 1946 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT - jit done for atom 1947 * 1948 * @kbdev: Kbase device 1949 * @atom: Atom identifier 1950 * @edit_addr: Address edited by jit 1951 * @new_addr: Address placed into the edited location 1952 * @jit_flags: Flags specifying the special requirements for the JIT allocation. 1953 * @mem_flags: Flags defining the properties of a memory region 1954 * @j_id: Unique ID provided by the caller, this is used to pair allocation and free requests. 1955 * @com_pgs: The minimum number of physical pages which should back the allocation. 1956 * @extent: Granularity of physical pages to grow the allocation by during a fault. 1957 * @va_pgs: The minimum number of virtual pages required 1958 */ 1959 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT( \ 1960 kbdev, \ 1961 atom, \ 1962 edit_addr, \ 1963 new_addr, \ 1964 jit_flags, \ 1965 mem_flags, \ 1966 j_id, \ 1967 com_pgs, \ 1968 extent, \ 1969 va_pgs \ 1970 ) \ 1971 do { \ 1972 int enabled = atomic_read(&kbdev->timeline_flags); \ 1973 if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED) \ 1974 __kbase_tlstream_tl_attrib_atom_jit( \ 1975 __TL_DISPATCH_STREAM(kbdev, obj), \ 1976 atom, \ 1977 edit_addr, \ 1978 new_addr, \ 1979 jit_flags, \ 1980 mem_flags, \ 1981 j_id, \ 1982 com_pgs, \ 1983 extent, \ 1984 va_pgs \ 1985 ); \ 1986 } while (0) 1987 1988 /** 1989 * KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE - New KBase Device 1990 * 1991 * @kbdev: Kbase device 1992 * @kbase_device_id: The ID of the physical hardware 1993 * @kbase_device_gpu_core_count: The number of gpu cores in the physical hardware 1994 * @kbase_device_max_num_csgs: The max number of CSGs the physical hardware supports 1995 * @kbase_device_as_count: The number of address spaces the physical hardware has available 1996 * @kbase_device_sb_entry_count: The number of entries each scoreboard set in the physical hardware has available 1997 * @kbase_device_has_cross_stream_sync: Whether cross-stream synchronization is supported 1998 * @kbase_device_supports_gpu_sleep: Whether GPU sleep is supported 1999 */ 2000 #if MALI_USE_CSF 2001 #define KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE( \ 2002 kbdev, \ 2003 kbase_device_id, \ 2004 kbase_device_gpu_core_count, \ 2005 kbase_device_max_num_csgs, \ 2006 kbase_device_as_count, \ 2007 kbase_device_sb_entry_count, \ 2008 kbase_device_has_cross_stream_sync, \ 2009 kbase_device_supports_gpu_sleep \ 2010 ) \ 2011 do { \ 2012 int enabled = atomic_read(&kbdev->timeline_flags); \ 2013 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2014 __kbase_tlstream_tl_kbase_new_device( \ 2015 __TL_DISPATCH_STREAM(kbdev, obj), \ 2016 kbase_device_id, \ 2017 kbase_device_gpu_core_count, \ 2018 kbase_device_max_num_csgs, \ 2019 kbase_device_as_count, \ 2020 kbase_device_sb_entry_count, \ 2021 kbase_device_has_cross_stream_sync, \ 2022 kbase_device_supports_gpu_sleep \ 2023 ); \ 2024 } while (0) 2025 #else 2026 #define KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE( \ 2027 kbdev, \ 2028 kbase_device_id, \ 2029 kbase_device_gpu_core_count, \ 2030 kbase_device_max_num_csgs, \ 2031 kbase_device_as_count, \ 2032 kbase_device_sb_entry_count, \ 2033 kbase_device_has_cross_stream_sync, \ 2034 kbase_device_supports_gpu_sleep \ 2035 ) \ 2036 do { } while (0) 2037 #endif /* MALI_USE_CSF */ 2038 2039 /** 2040 * KBASE_TLSTREAM_TL_KBASE_GPUCMDQUEUE_KICK - Kernel receives a request to process new GPU queue instructions 2041 * 2042 * @kbdev: Kbase device 2043 * @kernel_ctx_id: Unique ID for the KBase Context 2044 * @buffer_gpu_addr: Address of the GPU queue's command buffer 2045 */ 2046 #if MALI_USE_CSF 2047 #define KBASE_TLSTREAM_TL_KBASE_GPUCMDQUEUE_KICK( \ 2048 kbdev, \ 2049 kernel_ctx_id, \ 2050 buffer_gpu_addr \ 2051 ) \ 2052 do { \ 2053 int enabled = atomic_read(&kbdev->timeline_flags); \ 2054 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2055 __kbase_tlstream_tl_kbase_gpucmdqueue_kick( \ 2056 __TL_DISPATCH_STREAM(kbdev, obj), \ 2057 kernel_ctx_id, \ 2058 buffer_gpu_addr \ 2059 ); \ 2060 } while (0) 2061 #else 2062 #define KBASE_TLSTREAM_TL_KBASE_GPUCMDQUEUE_KICK( \ 2063 kbdev, \ 2064 kernel_ctx_id, \ 2065 buffer_gpu_addr \ 2066 ) \ 2067 do { } while (0) 2068 #endif /* MALI_USE_CSF */ 2069 2070 /** 2071 * KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG - CSG is programmed to a slot 2072 * 2073 * @kbdev: Kbase device 2074 * @kbase_device_id: The ID of the physical hardware 2075 * @kernel_ctx_id: Unique ID for the KBase Context 2076 * @gpu_cmdq_grp_handle: GPU Command Queue Group handle which will match userspace 2077 * @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed 2078 * @kbase_device_csg_slot_resuming: Whether the csg is being resumed 2079 */ 2080 #if MALI_USE_CSF 2081 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG( \ 2082 kbdev, \ 2083 kbase_device_id, \ 2084 kernel_ctx_id, \ 2085 gpu_cmdq_grp_handle, \ 2086 kbase_device_csg_slot_index, \ 2087 kbase_device_csg_slot_resuming \ 2088 ) \ 2089 do { \ 2090 int enabled = atomic_read(&kbdev->timeline_flags); \ 2091 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2092 __kbase_tlstream_tl_kbase_device_program_csg( \ 2093 __TL_DISPATCH_STREAM(kbdev, obj), \ 2094 kbase_device_id, \ 2095 kernel_ctx_id, \ 2096 gpu_cmdq_grp_handle, \ 2097 kbase_device_csg_slot_index, \ 2098 kbase_device_csg_slot_resuming \ 2099 ); \ 2100 } while (0) 2101 #else 2102 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG( \ 2103 kbdev, \ 2104 kbase_device_id, \ 2105 kernel_ctx_id, \ 2106 gpu_cmdq_grp_handle, \ 2107 kbase_device_csg_slot_index, \ 2108 kbase_device_csg_slot_resuming \ 2109 ) \ 2110 do { } while (0) 2111 #endif /* MALI_USE_CSF */ 2112 2113 /** 2114 * KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG - CSG is deprogrammed from a slot 2115 * 2116 * @kbdev: Kbase device 2117 * @kbase_device_id: The ID of the physical hardware 2118 * @kbase_device_csg_slot_index: The index of the slot in the scheduler whose CSG is being deprogrammed 2119 */ 2120 #if MALI_USE_CSF 2121 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG( \ 2122 kbdev, \ 2123 kbase_device_id, \ 2124 kbase_device_csg_slot_index \ 2125 ) \ 2126 do { \ 2127 int enabled = atomic_read(&kbdev->timeline_flags); \ 2128 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2129 __kbase_tlstream_tl_kbase_device_deprogram_csg( \ 2130 __TL_DISPATCH_STREAM(kbdev, obj), \ 2131 kbase_device_id, \ 2132 kbase_device_csg_slot_index \ 2133 ); \ 2134 } while (0) 2135 #else 2136 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG( \ 2137 kbdev, \ 2138 kbase_device_id, \ 2139 kbase_device_csg_slot_index \ 2140 ) \ 2141 do { } while (0) 2142 #endif /* MALI_USE_CSF */ 2143 2144 /** 2145 * KBASE_TLSTREAM_TL_KBASE_DEVICE_HALTING_CSG - CSG is halting 2146 * 2147 * @kbdev: Kbase device 2148 * @kbase_device_id: The ID of the physical hardware 2149 * @kbase_device_csg_slot_index: The index of the slot in the scheduler whose CSG is being halted 2150 * @kbase_device_csg_slot_suspending: Whether the csg is being suspended 2151 */ 2152 #if MALI_USE_CSF 2153 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_HALTING_CSG( \ 2154 kbdev, \ 2155 kbase_device_id, \ 2156 kbase_device_csg_slot_index, \ 2157 kbase_device_csg_slot_suspending \ 2158 ) \ 2159 do { \ 2160 int enabled = atomic_read(&kbdev->timeline_flags); \ 2161 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2162 __kbase_tlstream_tl_kbase_device_halting_csg( \ 2163 __TL_DISPATCH_STREAM(kbdev, obj), \ 2164 kbase_device_id, \ 2165 kbase_device_csg_slot_index, \ 2166 kbase_device_csg_slot_suspending \ 2167 ); \ 2168 } while (0) 2169 #else 2170 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_HALTING_CSG( \ 2171 kbdev, \ 2172 kbase_device_id, \ 2173 kbase_device_csg_slot_index, \ 2174 kbase_device_csg_slot_suspending \ 2175 ) \ 2176 do { } while (0) 2177 #endif /* MALI_USE_CSF */ 2178 2179 /** 2180 * KBASE_TLSTREAM_TL_KBASE_DEVICE_SUSPEND_CSG - CSG is suspended 2181 * 2182 * @kbdev: Kbase device 2183 * @kbase_device_id: The ID of the physical hardware 2184 * @kbase_device_csg_slot_index: The index of the slot in the scheduler whose CSG is being suspended 2185 */ 2186 #if MALI_USE_CSF 2187 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_SUSPEND_CSG( \ 2188 kbdev, \ 2189 kbase_device_id, \ 2190 kbase_device_csg_slot_index \ 2191 ) \ 2192 do { \ 2193 int enabled = atomic_read(&kbdev->timeline_flags); \ 2194 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2195 __kbase_tlstream_tl_kbase_device_suspend_csg( \ 2196 __TL_DISPATCH_STREAM(kbdev, obj), \ 2197 kbase_device_id, \ 2198 kbase_device_csg_slot_index \ 2199 ); \ 2200 } while (0) 2201 #else 2202 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_SUSPEND_CSG( \ 2203 kbdev, \ 2204 kbase_device_id, \ 2205 kbase_device_csg_slot_index \ 2206 ) \ 2207 do { } while (0) 2208 #endif /* MALI_USE_CSF */ 2209 2210 /** 2211 * KBASE_TLSTREAM_TL_KBASE_DEVICE_CSG_IDLE - KBase device is notified that CSG is idle. 2212 * 2213 * @kbdev: Kbase device 2214 * @kbase_device_id: The ID of the physical hardware 2215 * @kbase_device_csg_slot_index: The index of the slot in the scheduler whose CSG for which we are receiving an idle notification 2216 */ 2217 #if MALI_USE_CSF 2218 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_CSG_IDLE( \ 2219 kbdev, \ 2220 kbase_device_id, \ 2221 kbase_device_csg_slot_index \ 2222 ) \ 2223 do { \ 2224 int enabled = atomic_read(&kbdev->timeline_flags); \ 2225 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2226 __kbase_tlstream_tl_kbase_device_csg_idle( \ 2227 __TL_DISPATCH_STREAM(kbdev, obj), \ 2228 kbase_device_id, \ 2229 kbase_device_csg_slot_index \ 2230 ); \ 2231 } while (0) 2232 #else 2233 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_CSG_IDLE( \ 2234 kbdev, \ 2235 kbase_device_id, \ 2236 kbase_device_csg_slot_index \ 2237 ) \ 2238 do { } while (0) 2239 #endif /* MALI_USE_CSF */ 2240 2241 /** 2242 * KBASE_TLSTREAM_TL_KBASE_NEW_CTX - New KBase Context 2243 * 2244 * @kbdev: Kbase device 2245 * @kernel_ctx_id: Unique ID for the KBase Context 2246 * @kbase_device_id: The ID of the physical hardware 2247 */ 2248 #if MALI_USE_CSF 2249 #define KBASE_TLSTREAM_TL_KBASE_NEW_CTX( \ 2250 kbdev, \ 2251 kernel_ctx_id, \ 2252 kbase_device_id \ 2253 ) \ 2254 do { \ 2255 int enabled = atomic_read(&kbdev->timeline_flags); \ 2256 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2257 __kbase_tlstream_tl_kbase_new_ctx( \ 2258 __TL_DISPATCH_STREAM(kbdev, obj), \ 2259 kernel_ctx_id, \ 2260 kbase_device_id \ 2261 ); \ 2262 } while (0) 2263 #else 2264 #define KBASE_TLSTREAM_TL_KBASE_NEW_CTX( \ 2265 kbdev, \ 2266 kernel_ctx_id, \ 2267 kbase_device_id \ 2268 ) \ 2269 do { } while (0) 2270 #endif /* MALI_USE_CSF */ 2271 2272 /** 2273 * KBASE_TLSTREAM_TL_KBASE_DEL_CTX - Delete KBase Context 2274 * 2275 * @kbdev: Kbase device 2276 * @kernel_ctx_id: Unique ID for the KBase Context 2277 */ 2278 #if MALI_USE_CSF 2279 #define KBASE_TLSTREAM_TL_KBASE_DEL_CTX( \ 2280 kbdev, \ 2281 kernel_ctx_id \ 2282 ) \ 2283 do { \ 2284 int enabled = atomic_read(&kbdev->timeline_flags); \ 2285 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2286 __kbase_tlstream_tl_kbase_del_ctx( \ 2287 __TL_DISPATCH_STREAM(kbdev, obj), \ 2288 kernel_ctx_id \ 2289 ); \ 2290 } while (0) 2291 #else 2292 #define KBASE_TLSTREAM_TL_KBASE_DEL_CTX( \ 2293 kbdev, \ 2294 kernel_ctx_id \ 2295 ) \ 2296 do { } while (0) 2297 #endif /* MALI_USE_CSF */ 2298 2299 /** 2300 * KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS - Address Space is assigned to a KBase context 2301 * 2302 * @kbdev: Kbase device 2303 * @kernel_ctx_id: Unique ID for the KBase Context 2304 * @kbase_device_as_index: The index of the device address space being assigned 2305 */ 2306 #if MALI_USE_CSF 2307 #define KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS( \ 2308 kbdev, \ 2309 kernel_ctx_id, \ 2310 kbase_device_as_index \ 2311 ) \ 2312 do { \ 2313 int enabled = atomic_read(&kbdev->timeline_flags); \ 2314 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2315 __kbase_tlstream_tl_kbase_ctx_assign_as( \ 2316 __TL_DISPATCH_STREAM(kbdev, obj), \ 2317 kernel_ctx_id, \ 2318 kbase_device_as_index \ 2319 ); \ 2320 } while (0) 2321 #else 2322 #define KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS( \ 2323 kbdev, \ 2324 kernel_ctx_id, \ 2325 kbase_device_as_index \ 2326 ) \ 2327 do { } while (0) 2328 #endif /* MALI_USE_CSF */ 2329 2330 /** 2331 * KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS - Address Space is unassigned from a KBase context 2332 * 2333 * @kbdev: Kbase device 2334 * @kernel_ctx_id: Unique ID for the KBase Context 2335 */ 2336 #if MALI_USE_CSF 2337 #define KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS( \ 2338 kbdev, \ 2339 kernel_ctx_id \ 2340 ) \ 2341 do { \ 2342 int enabled = atomic_read(&kbdev->timeline_flags); \ 2343 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2344 __kbase_tlstream_tl_kbase_ctx_unassign_as( \ 2345 __TL_DISPATCH_STREAM(kbdev, obj), \ 2346 kernel_ctx_id \ 2347 ); \ 2348 } while (0) 2349 #else 2350 #define KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS( \ 2351 kbdev, \ 2352 kernel_ctx_id \ 2353 ) \ 2354 do { } while (0) 2355 #endif /* MALI_USE_CSF */ 2356 2357 /** 2358 * KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE - New KCPU Queue 2359 * 2360 * @kbdev: Kbase device 2361 * @kcpu_queue: KCPU queue 2362 * @kcpu_queue_id: KCPU queue ID 2363 * @kernel_ctx_id: Unique ID for the KBase Context 2364 * @kcpuq_num_pending_cmds: Number of commands already enqueued in the KCPU queue 2365 */ 2366 #if MALI_USE_CSF 2367 #define KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE( \ 2368 kbdev, \ 2369 kcpu_queue, \ 2370 kcpu_queue_id, \ 2371 kernel_ctx_id, \ 2372 kcpuq_num_pending_cmds \ 2373 ) \ 2374 do { \ 2375 int enabled = atomic_read(&kbdev->timeline_flags); \ 2376 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2377 __kbase_tlstream_tl_kbase_new_kcpuqueue( \ 2378 __TL_DISPATCH_STREAM(kbdev, obj), \ 2379 kcpu_queue, \ 2380 kcpu_queue_id, \ 2381 kernel_ctx_id, \ 2382 kcpuq_num_pending_cmds \ 2383 ); \ 2384 } while (0) 2385 #else 2386 #define KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE( \ 2387 kbdev, \ 2388 kcpu_queue, \ 2389 kcpu_queue_id, \ 2390 kernel_ctx_id, \ 2391 kcpuq_num_pending_cmds \ 2392 ) \ 2393 do { } while (0) 2394 #endif /* MALI_USE_CSF */ 2395 2396 /** 2397 * KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE - Delete KCPU Queue 2398 * 2399 * @kbdev: Kbase device 2400 * @kcpu_queue: KCPU queue 2401 */ 2402 #if MALI_USE_CSF 2403 #define KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE( \ 2404 kbdev, \ 2405 kcpu_queue \ 2406 ) \ 2407 do { \ 2408 int enabled = atomic_read(&kbdev->timeline_flags); \ 2409 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2410 __kbase_tlstream_tl_kbase_del_kcpuqueue( \ 2411 __TL_DISPATCH_STREAM(kbdev, obj), \ 2412 kcpu_queue \ 2413 ); \ 2414 } while (0) 2415 #else 2416 #define KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE( \ 2417 kbdev, \ 2418 kcpu_queue \ 2419 ) \ 2420 do { } while (0) 2421 #endif /* MALI_USE_CSF */ 2422 2423 /** 2424 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL - KCPU Queue enqueues Signal on Fence 2425 * 2426 * @kbdev: Kbase device 2427 * @kcpu_queue: KCPU queue 2428 * @fence: Fence object handle 2429 */ 2430 #if MALI_USE_CSF 2431 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \ 2432 kbdev, \ 2433 kcpu_queue, \ 2434 fence \ 2435 ) \ 2436 do { \ 2437 int enabled = atomic_read(&kbdev->timeline_flags); \ 2438 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2439 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal( \ 2440 __TL_DISPATCH_STREAM(kbdev, obj), \ 2441 kcpu_queue, \ 2442 fence \ 2443 ); \ 2444 } while (0) 2445 #else 2446 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \ 2447 kbdev, \ 2448 kcpu_queue, \ 2449 fence \ 2450 ) \ 2451 do { } while (0) 2452 #endif /* MALI_USE_CSF */ 2453 2454 /** 2455 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT - KCPU Queue enqueues Wait on Fence 2456 * 2457 * @kbdev: Kbase device 2458 * @kcpu_queue: KCPU queue 2459 * @fence: Fence object handle 2460 */ 2461 #if MALI_USE_CSF 2462 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \ 2463 kbdev, \ 2464 kcpu_queue, \ 2465 fence \ 2466 ) \ 2467 do { \ 2468 int enabled = atomic_read(&kbdev->timeline_flags); \ 2469 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2470 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait( \ 2471 __TL_DISPATCH_STREAM(kbdev, obj), \ 2472 kcpu_queue, \ 2473 fence \ 2474 ); \ 2475 } while (0) 2476 #else 2477 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \ 2478 kbdev, \ 2479 kcpu_queue, \ 2480 fence \ 2481 ) \ 2482 do { } while (0) 2483 #endif /* MALI_USE_CSF */ 2484 2485 /** 2486 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT - KCPU Queue enqueues Wait on Cross Queue Sync Object 2487 * 2488 * @kbdev: Kbase device 2489 * @kcpu_queue: KCPU queue 2490 * @cqs_obj_gpu_addr: CQS Object GPU pointer 2491 * @compare_value: Semaphore value that should be exceeded for the WAIT to pass 2492 * @inherit_error: Flag which indicates if the CQS object error state should be inherited by the queue 2493 */ 2494 #if MALI_USE_CSF 2495 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT( \ 2496 kbdev, \ 2497 kcpu_queue, \ 2498 cqs_obj_gpu_addr, \ 2499 compare_value, \ 2500 inherit_error \ 2501 ) \ 2502 do { \ 2503 int enabled = atomic_read(&kbdev->timeline_flags); \ 2504 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2505 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait( \ 2506 __TL_DISPATCH_STREAM(kbdev, obj), \ 2507 kcpu_queue, \ 2508 cqs_obj_gpu_addr, \ 2509 compare_value, \ 2510 inherit_error \ 2511 ); \ 2512 } while (0) 2513 #else 2514 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT( \ 2515 kbdev, \ 2516 kcpu_queue, \ 2517 cqs_obj_gpu_addr, \ 2518 compare_value, \ 2519 inherit_error \ 2520 ) \ 2521 do { } while (0) 2522 #endif /* MALI_USE_CSF */ 2523 2524 /** 2525 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET - KCPU Queue enqueues Set on Cross Queue Sync Object 2526 * 2527 * @kbdev: Kbase device 2528 * @kcpu_queue: KCPU queue 2529 * @cqs_obj_gpu_addr: CQS Object GPU pointer 2530 */ 2531 #if MALI_USE_CSF 2532 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET( \ 2533 kbdev, \ 2534 kcpu_queue, \ 2535 cqs_obj_gpu_addr \ 2536 ) \ 2537 do { \ 2538 int enabled = atomic_read(&kbdev->timeline_flags); \ 2539 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2540 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set( \ 2541 __TL_DISPATCH_STREAM(kbdev, obj), \ 2542 kcpu_queue, \ 2543 cqs_obj_gpu_addr \ 2544 ); \ 2545 } while (0) 2546 #else 2547 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET( \ 2548 kbdev, \ 2549 kcpu_queue, \ 2550 cqs_obj_gpu_addr \ 2551 ) \ 2552 do { } while (0) 2553 #endif /* MALI_USE_CSF */ 2554 2555 /** 2556 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT_OPERATION - KCPU Queue enqueues Wait Operation on Cross Queue Sync Object 2557 * 2558 * @kbdev: Kbase device 2559 * @kcpu_queue: KCPU queue 2560 * @cqs_obj_gpu_addr: CQS Object GPU pointer 2561 * @compare_value: Value that should be compared to semaphore value for the WAIT to pass 2562 * @condition: Condition for unblocking WAITs on Timeline Cross Queue Sync Object (e.g. greater than, less or equal) 2563 * @data_type: Data type of a CQS Object's value 2564 * @inherit_error: Flag which indicates if the CQS object error state should be inherited by the queue 2565 */ 2566 #if MALI_USE_CSF 2567 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT_OPERATION( \ 2568 kbdev, \ 2569 kcpu_queue, \ 2570 cqs_obj_gpu_addr, \ 2571 compare_value, \ 2572 condition, \ 2573 data_type, \ 2574 inherit_error \ 2575 ) \ 2576 do { \ 2577 int enabled = atomic_read(&kbdev->timeline_flags); \ 2578 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2579 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait_operation( \ 2580 __TL_DISPATCH_STREAM(kbdev, obj), \ 2581 kcpu_queue, \ 2582 cqs_obj_gpu_addr, \ 2583 compare_value, \ 2584 condition, \ 2585 data_type, \ 2586 inherit_error \ 2587 ); \ 2588 } while (0) 2589 #else 2590 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT_OPERATION( \ 2591 kbdev, \ 2592 kcpu_queue, \ 2593 cqs_obj_gpu_addr, \ 2594 compare_value, \ 2595 condition, \ 2596 data_type, \ 2597 inherit_error \ 2598 ) \ 2599 do { } while (0) 2600 #endif /* MALI_USE_CSF */ 2601 2602 /** 2603 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET_OPERATION - KCPU Queue enqueues Set Operation on Cross Queue Sync Object 2604 * 2605 * @kbdev: Kbase device 2606 * @kcpu_queue: KCPU queue 2607 * @cqs_obj_gpu_addr: CQS Object GPU pointer 2608 * @value: Value that will be set or added to semaphore 2609 * @operation: Operation type performed on semaphore value (SET or ADD) 2610 * @data_type: Data type of a CQS Object's value 2611 */ 2612 #if MALI_USE_CSF 2613 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET_OPERATION( \ 2614 kbdev, \ 2615 kcpu_queue, \ 2616 cqs_obj_gpu_addr, \ 2617 value, \ 2618 operation, \ 2619 data_type \ 2620 ) \ 2621 do { \ 2622 int enabled = atomic_read(&kbdev->timeline_flags); \ 2623 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2624 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set_operation( \ 2625 __TL_DISPATCH_STREAM(kbdev, obj), \ 2626 kcpu_queue, \ 2627 cqs_obj_gpu_addr, \ 2628 value, \ 2629 operation, \ 2630 data_type \ 2631 ); \ 2632 } while (0) 2633 #else 2634 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET_OPERATION( \ 2635 kbdev, \ 2636 kcpu_queue, \ 2637 cqs_obj_gpu_addr, \ 2638 value, \ 2639 operation, \ 2640 data_type \ 2641 ) \ 2642 do { } while (0) 2643 #endif /* MALI_USE_CSF */ 2644 2645 /** 2646 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT - KCPU Queue enqueues Map Import 2647 * 2648 * @kbdev: Kbase device 2649 * @kcpu_queue: KCPU queue 2650 * @map_import_buf_gpu_addr: Map import buffer GPU pointer 2651 */ 2652 #if MALI_USE_CSF 2653 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \ 2654 kbdev, \ 2655 kcpu_queue, \ 2656 map_import_buf_gpu_addr \ 2657 ) \ 2658 do { \ 2659 int enabled = atomic_read(&kbdev->timeline_flags); \ 2660 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2661 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import( \ 2662 __TL_DISPATCH_STREAM(kbdev, obj), \ 2663 kcpu_queue, \ 2664 map_import_buf_gpu_addr \ 2665 ); \ 2666 } while (0) 2667 #else 2668 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \ 2669 kbdev, \ 2670 kcpu_queue, \ 2671 map_import_buf_gpu_addr \ 2672 ) \ 2673 do { } while (0) 2674 #endif /* MALI_USE_CSF */ 2675 2676 /** 2677 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT - KCPU Queue enqueues Unmap Import 2678 * 2679 * @kbdev: Kbase device 2680 * @kcpu_queue: KCPU queue 2681 * @map_import_buf_gpu_addr: Map import buffer GPU pointer 2682 */ 2683 #if MALI_USE_CSF 2684 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \ 2685 kbdev, \ 2686 kcpu_queue, \ 2687 map_import_buf_gpu_addr \ 2688 ) \ 2689 do { \ 2690 int enabled = atomic_read(&kbdev->timeline_flags); \ 2691 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2692 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import( \ 2693 __TL_DISPATCH_STREAM(kbdev, obj), \ 2694 kcpu_queue, \ 2695 map_import_buf_gpu_addr \ 2696 ); \ 2697 } while (0) 2698 #else 2699 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \ 2700 kbdev, \ 2701 kcpu_queue, \ 2702 map_import_buf_gpu_addr \ 2703 ) \ 2704 do { } while (0) 2705 #endif /* MALI_USE_CSF */ 2706 2707 /** 2708 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE - KCPU Queue enqueues Unmap Import ignoring reference count 2709 * 2710 * @kbdev: Kbase device 2711 * @kcpu_queue: KCPU queue 2712 * @map_import_buf_gpu_addr: Map import buffer GPU pointer 2713 */ 2714 #if MALI_USE_CSF 2715 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE( \ 2716 kbdev, \ 2717 kcpu_queue, \ 2718 map_import_buf_gpu_addr \ 2719 ) \ 2720 do { \ 2721 int enabled = atomic_read(&kbdev->timeline_flags); \ 2722 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2723 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force( \ 2724 __TL_DISPATCH_STREAM(kbdev, obj), \ 2725 kcpu_queue, \ 2726 map_import_buf_gpu_addr \ 2727 ); \ 2728 } while (0) 2729 #else 2730 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE( \ 2731 kbdev, \ 2732 kcpu_queue, \ 2733 map_import_buf_gpu_addr \ 2734 ) \ 2735 do { } while (0) 2736 #endif /* MALI_USE_CSF */ 2737 2738 /** 2739 * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC - Begin array of KCPU Queue enqueues JIT Alloc 2740 * 2741 * @kbdev: Kbase device 2742 * @kcpu_queue: KCPU queue 2743 */ 2744 #if MALI_USE_CSF 2745 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2746 kbdev, \ 2747 kcpu_queue \ 2748 ) \ 2749 do { \ 2750 int enabled = atomic_read(&kbdev->timeline_flags); \ 2751 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2752 __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc( \ 2753 __TL_DISPATCH_STREAM(kbdev, obj), \ 2754 kcpu_queue \ 2755 ); \ 2756 } while (0) 2757 #else 2758 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2759 kbdev, \ 2760 kcpu_queue \ 2761 ) \ 2762 do { } while (0) 2763 #endif /* MALI_USE_CSF */ 2764 2765 /** 2766 * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC - Array item of KCPU Queue enqueues JIT Alloc 2767 * 2768 * @kbdev: Kbase device 2769 * @kcpu_queue: KCPU queue 2770 * @jit_alloc_gpu_alloc_addr_dest: The GPU virtual address to write the JIT allocated GPU virtual address to 2771 * @jit_alloc_va_pages: The minimum number of virtual pages required 2772 * @jit_alloc_commit_pages: The minimum number of physical pages which should back the allocation 2773 * @jit_alloc_extent: Granularity of physical pages to grow the allocation by during a fault 2774 * @jit_alloc_jit_id: Unique ID provided by the caller, this is used to pair allocation and free requests. Zero is not a valid value 2775 * @jit_alloc_bin_id: The JIT allocation bin, used in conjunction with max_allocations to limit the number of each type of JIT allocation 2776 * @jit_alloc_max_allocations: The maximum number of allocations allowed within the bin specified by bin_id. Should be the same for all JIT allocations within the same bin. 2777 * @jit_alloc_flags: Flags specifying the special requirements for the JIT allocation 2778 * @jit_alloc_usage_id: A hint about which allocation should be reused. The kernel should attempt to use a previous allocation with the same usage_id 2779 */ 2780 #if MALI_USE_CSF 2781 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2782 kbdev, \ 2783 kcpu_queue, \ 2784 jit_alloc_gpu_alloc_addr_dest, \ 2785 jit_alloc_va_pages, \ 2786 jit_alloc_commit_pages, \ 2787 jit_alloc_extent, \ 2788 jit_alloc_jit_id, \ 2789 jit_alloc_bin_id, \ 2790 jit_alloc_max_allocations, \ 2791 jit_alloc_flags, \ 2792 jit_alloc_usage_id \ 2793 ) \ 2794 do { \ 2795 int enabled = atomic_read(&kbdev->timeline_flags); \ 2796 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2797 __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc( \ 2798 __TL_DISPATCH_STREAM(kbdev, obj), \ 2799 kcpu_queue, \ 2800 jit_alloc_gpu_alloc_addr_dest, \ 2801 jit_alloc_va_pages, \ 2802 jit_alloc_commit_pages, \ 2803 jit_alloc_extent, \ 2804 jit_alloc_jit_id, \ 2805 jit_alloc_bin_id, \ 2806 jit_alloc_max_allocations, \ 2807 jit_alloc_flags, \ 2808 jit_alloc_usage_id \ 2809 ); \ 2810 } while (0) 2811 #else 2812 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2813 kbdev, \ 2814 kcpu_queue, \ 2815 jit_alloc_gpu_alloc_addr_dest, \ 2816 jit_alloc_va_pages, \ 2817 jit_alloc_commit_pages, \ 2818 jit_alloc_extent, \ 2819 jit_alloc_jit_id, \ 2820 jit_alloc_bin_id, \ 2821 jit_alloc_max_allocations, \ 2822 jit_alloc_flags, \ 2823 jit_alloc_usage_id \ 2824 ) \ 2825 do { } while (0) 2826 #endif /* MALI_USE_CSF */ 2827 2828 /** 2829 * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC - End array of KCPU Queue enqueues JIT Alloc 2830 * 2831 * @kbdev: Kbase device 2832 * @kcpu_queue: KCPU queue 2833 */ 2834 #if MALI_USE_CSF 2835 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2836 kbdev, \ 2837 kcpu_queue \ 2838 ) \ 2839 do { \ 2840 int enabled = atomic_read(&kbdev->timeline_flags); \ 2841 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2842 __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc( \ 2843 __TL_DISPATCH_STREAM(kbdev, obj), \ 2844 kcpu_queue \ 2845 ); \ 2846 } while (0) 2847 #else 2848 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2849 kbdev, \ 2850 kcpu_queue \ 2851 ) \ 2852 do { } while (0) 2853 #endif /* MALI_USE_CSF */ 2854 2855 /** 2856 * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE - Begin array of KCPU Queue enqueues JIT Free 2857 * 2858 * @kbdev: Kbase device 2859 * @kcpu_queue: KCPU queue 2860 */ 2861 #if MALI_USE_CSF 2862 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2863 kbdev, \ 2864 kcpu_queue \ 2865 ) \ 2866 do { \ 2867 int enabled = atomic_read(&kbdev->timeline_flags); \ 2868 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2869 __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free( \ 2870 __TL_DISPATCH_STREAM(kbdev, obj), \ 2871 kcpu_queue \ 2872 ); \ 2873 } while (0) 2874 #else 2875 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2876 kbdev, \ 2877 kcpu_queue \ 2878 ) \ 2879 do { } while (0) 2880 #endif /* MALI_USE_CSF */ 2881 2882 /** 2883 * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE - Array item of KCPU Queue enqueues JIT Free 2884 * 2885 * @kbdev: Kbase device 2886 * @kcpu_queue: KCPU queue 2887 * @jit_alloc_jit_id: Unique ID provided by the caller, this is used to pair allocation and free requests. Zero is not a valid value 2888 */ 2889 #if MALI_USE_CSF 2890 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2891 kbdev, \ 2892 kcpu_queue, \ 2893 jit_alloc_jit_id \ 2894 ) \ 2895 do { \ 2896 int enabled = atomic_read(&kbdev->timeline_flags); \ 2897 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2898 __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free( \ 2899 __TL_DISPATCH_STREAM(kbdev, obj), \ 2900 kcpu_queue, \ 2901 jit_alloc_jit_id \ 2902 ); \ 2903 } while (0) 2904 #else 2905 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2906 kbdev, \ 2907 kcpu_queue, \ 2908 jit_alloc_jit_id \ 2909 ) \ 2910 do { } while (0) 2911 #endif /* MALI_USE_CSF */ 2912 2913 /** 2914 * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE - End array of KCPU Queue enqueues JIT Free 2915 * 2916 * @kbdev: Kbase device 2917 * @kcpu_queue: KCPU queue 2918 */ 2919 #if MALI_USE_CSF 2920 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2921 kbdev, \ 2922 kcpu_queue \ 2923 ) \ 2924 do { \ 2925 int enabled = atomic_read(&kbdev->timeline_flags); \ 2926 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2927 __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free( \ 2928 __TL_DISPATCH_STREAM(kbdev, obj), \ 2929 kcpu_queue \ 2930 ); \ 2931 } while (0) 2932 #else 2933 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2934 kbdev, \ 2935 kcpu_queue \ 2936 ) \ 2937 do { } while (0) 2938 #endif /* MALI_USE_CSF */ 2939 2940 /** 2941 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER - KCPU Queue enqueues Error Barrier 2942 * 2943 * @kbdev: Kbase device 2944 * @kcpu_queue: KCPU queue 2945 */ 2946 #if MALI_USE_CSF 2947 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER( \ 2948 kbdev, \ 2949 kcpu_queue \ 2950 ) \ 2951 do { \ 2952 int enabled = atomic_read(&kbdev->timeline_flags); \ 2953 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2954 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier( \ 2955 __TL_DISPATCH_STREAM(kbdev, obj), \ 2956 kcpu_queue \ 2957 ); \ 2958 } while (0) 2959 #else 2960 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER( \ 2961 kbdev, \ 2962 kcpu_queue \ 2963 ) \ 2964 do { } while (0) 2965 #endif /* MALI_USE_CSF */ 2966 2967 /** 2968 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND - KCPU Queue enqueues Group Suspend 2969 * 2970 * @kbdev: Kbase device 2971 * @kcpu_queue: KCPU queue 2972 * @group_suspend_buf: Pointer to the suspend buffer structure 2973 * @gpu_cmdq_grp_handle: GPU Command Queue Group handle which will match userspace 2974 */ 2975 #if MALI_USE_CSF 2976 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND( \ 2977 kbdev, \ 2978 kcpu_queue, \ 2979 group_suspend_buf, \ 2980 gpu_cmdq_grp_handle \ 2981 ) \ 2982 do { \ 2983 int enabled = atomic_read(&kbdev->timeline_flags); \ 2984 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2985 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend( \ 2986 __TL_DISPATCH_STREAM(kbdev, obj), \ 2987 kcpu_queue, \ 2988 group_suspend_buf, \ 2989 gpu_cmdq_grp_handle \ 2990 ); \ 2991 } while (0) 2992 #else 2993 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND( \ 2994 kbdev, \ 2995 kcpu_queue, \ 2996 group_suspend_buf, \ 2997 gpu_cmdq_grp_handle \ 2998 ) \ 2999 do { } while (0) 3000 #endif /* MALI_USE_CSF */ 3001 3002 /** 3003 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START - KCPU Queue starts a Signal on Fence 3004 * 3005 * @kbdev: Kbase device 3006 * @kcpu_queue: KCPU queue 3007 */ 3008 #if MALI_USE_CSF 3009 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \ 3010 kbdev, \ 3011 kcpu_queue \ 3012 ) \ 3013 do { \ 3014 int enabled = atomic_read(&kbdev->timeline_flags); \ 3015 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3016 __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start( \ 3017 __TL_DISPATCH_STREAM(kbdev, obj), \ 3018 kcpu_queue \ 3019 ); \ 3020 } while (0) 3021 #else 3022 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \ 3023 kbdev, \ 3024 kcpu_queue \ 3025 ) \ 3026 do { } while (0) 3027 #endif /* MALI_USE_CSF */ 3028 3029 /** 3030 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END - KCPU Queue ends a Signal on Fence 3031 * 3032 * @kbdev: Kbase device 3033 * @kcpu_queue: KCPU queue 3034 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3035 */ 3036 #if MALI_USE_CSF 3037 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \ 3038 kbdev, \ 3039 kcpu_queue, \ 3040 execute_error \ 3041 ) \ 3042 do { \ 3043 int enabled = atomic_read(&kbdev->timeline_flags); \ 3044 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3045 __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end( \ 3046 __TL_DISPATCH_STREAM(kbdev, obj), \ 3047 kcpu_queue, \ 3048 execute_error \ 3049 ); \ 3050 } while (0) 3051 #else 3052 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \ 3053 kbdev, \ 3054 kcpu_queue, \ 3055 execute_error \ 3056 ) \ 3057 do { } while (0) 3058 #endif /* MALI_USE_CSF */ 3059 3060 /** 3061 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START - KCPU Queue starts a Wait on Fence 3062 * 3063 * @kbdev: Kbase device 3064 * @kcpu_queue: KCPU queue 3065 */ 3066 #if MALI_USE_CSF 3067 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \ 3068 kbdev, \ 3069 kcpu_queue \ 3070 ) \ 3071 do { \ 3072 int enabled = atomic_read(&kbdev->timeline_flags); \ 3073 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3074 __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start( \ 3075 __TL_DISPATCH_STREAM(kbdev, obj), \ 3076 kcpu_queue \ 3077 ); \ 3078 } while (0) 3079 #else 3080 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \ 3081 kbdev, \ 3082 kcpu_queue \ 3083 ) \ 3084 do { } while (0) 3085 #endif /* MALI_USE_CSF */ 3086 3087 /** 3088 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END - KCPU Queue ends a Wait on Fence 3089 * 3090 * @kbdev: Kbase device 3091 * @kcpu_queue: KCPU queue 3092 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3093 */ 3094 #if MALI_USE_CSF 3095 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \ 3096 kbdev, \ 3097 kcpu_queue, \ 3098 execute_error \ 3099 ) \ 3100 do { \ 3101 int enabled = atomic_read(&kbdev->timeline_flags); \ 3102 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3103 __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end( \ 3104 __TL_DISPATCH_STREAM(kbdev, obj), \ 3105 kcpu_queue, \ 3106 execute_error \ 3107 ); \ 3108 } while (0) 3109 #else 3110 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \ 3111 kbdev, \ 3112 kcpu_queue, \ 3113 execute_error \ 3114 ) \ 3115 do { } while (0) 3116 #endif /* MALI_USE_CSF */ 3117 3118 /** 3119 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START - KCPU Queue starts a Wait on Cross Queue Sync Object 3120 * 3121 * @kbdev: Kbase device 3122 * @kcpu_queue: KCPU queue 3123 */ 3124 #if MALI_USE_CSF 3125 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \ 3126 kbdev, \ 3127 kcpu_queue \ 3128 ) \ 3129 do { \ 3130 int enabled = atomic_read(&kbdev->timeline_flags); \ 3131 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3132 __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start( \ 3133 __TL_DISPATCH_STREAM(kbdev, obj), \ 3134 kcpu_queue \ 3135 ); \ 3136 } while (0) 3137 #else 3138 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \ 3139 kbdev, \ 3140 kcpu_queue \ 3141 ) \ 3142 do { } while (0) 3143 #endif /* MALI_USE_CSF */ 3144 3145 /** 3146 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END - KCPU Queue ends a Wait on Cross Queue Sync Object 3147 * 3148 * @kbdev: Kbase device 3149 * @kcpu_queue: KCPU queue 3150 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3151 */ 3152 #if MALI_USE_CSF 3153 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \ 3154 kbdev, \ 3155 kcpu_queue, \ 3156 execute_error \ 3157 ) \ 3158 do { \ 3159 int enabled = atomic_read(&kbdev->timeline_flags); \ 3160 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3161 __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end( \ 3162 __TL_DISPATCH_STREAM(kbdev, obj), \ 3163 kcpu_queue, \ 3164 execute_error \ 3165 ); \ 3166 } while (0) 3167 #else 3168 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \ 3169 kbdev, \ 3170 kcpu_queue, \ 3171 execute_error \ 3172 ) \ 3173 do { } while (0) 3174 #endif /* MALI_USE_CSF */ 3175 3176 /** 3177 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET - KCPU Queue executes a Set on Cross Queue Sync Object 3178 * 3179 * @kbdev: Kbase device 3180 * @kcpu_queue: KCPU queue 3181 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3182 */ 3183 #if MALI_USE_CSF 3184 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET( \ 3185 kbdev, \ 3186 kcpu_queue, \ 3187 execute_error \ 3188 ) \ 3189 do { \ 3190 int enabled = atomic_read(&kbdev->timeline_flags); \ 3191 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3192 __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set( \ 3193 __TL_DISPATCH_STREAM(kbdev, obj), \ 3194 kcpu_queue, \ 3195 execute_error \ 3196 ); \ 3197 } while (0) 3198 #else 3199 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET( \ 3200 kbdev, \ 3201 kcpu_queue, \ 3202 execute_error \ 3203 ) \ 3204 do { } while (0) 3205 #endif /* MALI_USE_CSF */ 3206 3207 /** 3208 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_START - KCPU Queue starts a Wait Operation on Cross Queue Sync Object 3209 * 3210 * @kbdev: Kbase device 3211 * @kcpu_queue: KCPU queue 3212 */ 3213 #if MALI_USE_CSF 3214 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_START( \ 3215 kbdev, \ 3216 kcpu_queue \ 3217 ) \ 3218 do { \ 3219 int enabled = atomic_read(&kbdev->timeline_flags); \ 3220 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3221 __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_start( \ 3222 __TL_DISPATCH_STREAM(kbdev, obj), \ 3223 kcpu_queue \ 3224 ); \ 3225 } while (0) 3226 #else 3227 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_START( \ 3228 kbdev, \ 3229 kcpu_queue \ 3230 ) \ 3231 do { } while (0) 3232 #endif /* MALI_USE_CSF */ 3233 3234 /** 3235 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_END - KCPU Queue ends a Wait Operation on Cross Queue Sync Object 3236 * 3237 * @kbdev: Kbase device 3238 * @kcpu_queue: KCPU queue 3239 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3240 */ 3241 #if MALI_USE_CSF 3242 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_END( \ 3243 kbdev, \ 3244 kcpu_queue, \ 3245 execute_error \ 3246 ) \ 3247 do { \ 3248 int enabled = atomic_read(&kbdev->timeline_flags); \ 3249 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3250 __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_end( \ 3251 __TL_DISPATCH_STREAM(kbdev, obj), \ 3252 kcpu_queue, \ 3253 execute_error \ 3254 ); \ 3255 } while (0) 3256 #else 3257 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_END( \ 3258 kbdev, \ 3259 kcpu_queue, \ 3260 execute_error \ 3261 ) \ 3262 do { } while (0) 3263 #endif /* MALI_USE_CSF */ 3264 3265 /** 3266 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET_OPERATION - KCPU Queue executes a Set Operation on Cross Queue Sync Object 3267 * 3268 * @kbdev: Kbase device 3269 * @kcpu_queue: KCPU queue 3270 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3271 */ 3272 #if MALI_USE_CSF 3273 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET_OPERATION( \ 3274 kbdev, \ 3275 kcpu_queue, \ 3276 execute_error \ 3277 ) \ 3278 do { \ 3279 int enabled = atomic_read(&kbdev->timeline_flags); \ 3280 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3281 __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set_operation( \ 3282 __TL_DISPATCH_STREAM(kbdev, obj), \ 3283 kcpu_queue, \ 3284 execute_error \ 3285 ); \ 3286 } while (0) 3287 #else 3288 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET_OPERATION( \ 3289 kbdev, \ 3290 kcpu_queue, \ 3291 execute_error \ 3292 ) \ 3293 do { } while (0) 3294 #endif /* MALI_USE_CSF */ 3295 3296 /** 3297 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START - KCPU Queue starts a Map Import 3298 * 3299 * @kbdev: Kbase device 3300 * @kcpu_queue: KCPU queue 3301 */ 3302 #if MALI_USE_CSF 3303 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \ 3304 kbdev, \ 3305 kcpu_queue \ 3306 ) \ 3307 do { \ 3308 int enabled = atomic_read(&kbdev->timeline_flags); \ 3309 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3310 __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start( \ 3311 __TL_DISPATCH_STREAM(kbdev, obj), \ 3312 kcpu_queue \ 3313 ); \ 3314 } while (0) 3315 #else 3316 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \ 3317 kbdev, \ 3318 kcpu_queue \ 3319 ) \ 3320 do { } while (0) 3321 #endif /* MALI_USE_CSF */ 3322 3323 /** 3324 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END - KCPU Queue ends a Map Import 3325 * 3326 * @kbdev: Kbase device 3327 * @kcpu_queue: KCPU queue 3328 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3329 */ 3330 #if MALI_USE_CSF 3331 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \ 3332 kbdev, \ 3333 kcpu_queue, \ 3334 execute_error \ 3335 ) \ 3336 do { \ 3337 int enabled = atomic_read(&kbdev->timeline_flags); \ 3338 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3339 __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end( \ 3340 __TL_DISPATCH_STREAM(kbdev, obj), \ 3341 kcpu_queue, \ 3342 execute_error \ 3343 ); \ 3344 } while (0) 3345 #else 3346 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \ 3347 kbdev, \ 3348 kcpu_queue, \ 3349 execute_error \ 3350 ) \ 3351 do { } while (0) 3352 #endif /* MALI_USE_CSF */ 3353 3354 /** 3355 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START - KCPU Queue starts an Unmap Import 3356 * 3357 * @kbdev: Kbase device 3358 * @kcpu_queue: KCPU queue 3359 */ 3360 #if MALI_USE_CSF 3361 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \ 3362 kbdev, \ 3363 kcpu_queue \ 3364 ) \ 3365 do { \ 3366 int enabled = atomic_read(&kbdev->timeline_flags); \ 3367 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3368 __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start( \ 3369 __TL_DISPATCH_STREAM(kbdev, obj), \ 3370 kcpu_queue \ 3371 ); \ 3372 } while (0) 3373 #else 3374 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \ 3375 kbdev, \ 3376 kcpu_queue \ 3377 ) \ 3378 do { } while (0) 3379 #endif /* MALI_USE_CSF */ 3380 3381 /** 3382 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END - KCPU Queue ends an Unmap Import 3383 * 3384 * @kbdev: Kbase device 3385 * @kcpu_queue: KCPU queue 3386 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3387 */ 3388 #if MALI_USE_CSF 3389 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \ 3390 kbdev, \ 3391 kcpu_queue, \ 3392 execute_error \ 3393 ) \ 3394 do { \ 3395 int enabled = atomic_read(&kbdev->timeline_flags); \ 3396 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3397 __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end( \ 3398 __TL_DISPATCH_STREAM(kbdev, obj), \ 3399 kcpu_queue, \ 3400 execute_error \ 3401 ); \ 3402 } while (0) 3403 #else 3404 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \ 3405 kbdev, \ 3406 kcpu_queue, \ 3407 execute_error \ 3408 ) \ 3409 do { } while (0) 3410 #endif /* MALI_USE_CSF */ 3411 3412 /** 3413 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START - KCPU Queue starts an Unmap Import ignoring reference count 3414 * 3415 * @kbdev: Kbase device 3416 * @kcpu_queue: KCPU queue 3417 */ 3418 #if MALI_USE_CSF 3419 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START( \ 3420 kbdev, \ 3421 kcpu_queue \ 3422 ) \ 3423 do { \ 3424 int enabled = atomic_read(&kbdev->timeline_flags); \ 3425 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3426 __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start( \ 3427 __TL_DISPATCH_STREAM(kbdev, obj), \ 3428 kcpu_queue \ 3429 ); \ 3430 } while (0) 3431 #else 3432 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START( \ 3433 kbdev, \ 3434 kcpu_queue \ 3435 ) \ 3436 do { } while (0) 3437 #endif /* MALI_USE_CSF */ 3438 3439 /** 3440 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END - KCPU Queue ends an Unmap Import ignoring reference count 3441 * 3442 * @kbdev: Kbase device 3443 * @kcpu_queue: KCPU queue 3444 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3445 */ 3446 #if MALI_USE_CSF 3447 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END( \ 3448 kbdev, \ 3449 kcpu_queue, \ 3450 execute_error \ 3451 ) \ 3452 do { \ 3453 int enabled = atomic_read(&kbdev->timeline_flags); \ 3454 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3455 __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end( \ 3456 __TL_DISPATCH_STREAM(kbdev, obj), \ 3457 kcpu_queue, \ 3458 execute_error \ 3459 ); \ 3460 } while (0) 3461 #else 3462 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END( \ 3463 kbdev, \ 3464 kcpu_queue, \ 3465 execute_error \ 3466 ) \ 3467 do { } while (0) 3468 #endif /* MALI_USE_CSF */ 3469 3470 /** 3471 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START - KCPU Queue starts an array of JIT Allocs 3472 * 3473 * @kbdev: Kbase device 3474 * @kcpu_queue: KCPU queue 3475 */ 3476 #if MALI_USE_CSF 3477 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \ 3478 kbdev, \ 3479 kcpu_queue \ 3480 ) \ 3481 do { \ 3482 int enabled = atomic_read(&kbdev->timeline_flags); \ 3483 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3484 __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start( \ 3485 __TL_DISPATCH_STREAM(kbdev, obj), \ 3486 kcpu_queue \ 3487 ); \ 3488 } while (0) 3489 #else 3490 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \ 3491 kbdev, \ 3492 kcpu_queue \ 3493 ) \ 3494 do { } while (0) 3495 #endif /* MALI_USE_CSF */ 3496 3497 /** 3498 * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - Begin array of KCPU Queue ends an array of JIT Allocs 3499 * 3500 * @kbdev: Kbase device 3501 * @kcpu_queue: KCPU queue 3502 */ 3503 #if MALI_USE_CSF 3504 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 3505 kbdev, \ 3506 kcpu_queue \ 3507 ) \ 3508 do { \ 3509 int enabled = atomic_read(&kbdev->timeline_flags); \ 3510 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3511 __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end( \ 3512 __TL_DISPATCH_STREAM(kbdev, obj), \ 3513 kcpu_queue \ 3514 ); \ 3515 } while (0) 3516 #else 3517 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 3518 kbdev, \ 3519 kcpu_queue \ 3520 ) \ 3521 do { } while (0) 3522 #endif /* MALI_USE_CSF */ 3523 3524 /** 3525 * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - Array item of KCPU Queue ends an array of JIT Allocs 3526 * 3527 * @kbdev: Kbase device 3528 * @kcpu_queue: KCPU queue 3529 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3530 * @jit_alloc_gpu_alloc_addr: The JIT allocated GPU virtual address 3531 * @jit_alloc_mmu_flags: The MMU flags for the JIT allocation 3532 */ 3533 #if MALI_USE_CSF 3534 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 3535 kbdev, \ 3536 kcpu_queue, \ 3537 execute_error, \ 3538 jit_alloc_gpu_alloc_addr, \ 3539 jit_alloc_mmu_flags \ 3540 ) \ 3541 do { \ 3542 int enabled = atomic_read(&kbdev->timeline_flags); \ 3543 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3544 __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end( \ 3545 __TL_DISPATCH_STREAM(kbdev, obj), \ 3546 kcpu_queue, \ 3547 execute_error, \ 3548 jit_alloc_gpu_alloc_addr, \ 3549 jit_alloc_mmu_flags \ 3550 ); \ 3551 } while (0) 3552 #else 3553 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 3554 kbdev, \ 3555 kcpu_queue, \ 3556 execute_error, \ 3557 jit_alloc_gpu_alloc_addr, \ 3558 jit_alloc_mmu_flags \ 3559 ) \ 3560 do { } while (0) 3561 #endif /* MALI_USE_CSF */ 3562 3563 /** 3564 * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - End array of KCPU Queue ends an array of JIT Allocs 3565 * 3566 * @kbdev: Kbase device 3567 * @kcpu_queue: KCPU queue 3568 */ 3569 #if MALI_USE_CSF 3570 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 3571 kbdev, \ 3572 kcpu_queue \ 3573 ) \ 3574 do { \ 3575 int enabled = atomic_read(&kbdev->timeline_flags); \ 3576 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3577 __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end( \ 3578 __TL_DISPATCH_STREAM(kbdev, obj), \ 3579 kcpu_queue \ 3580 ); \ 3581 } while (0) 3582 #else 3583 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 3584 kbdev, \ 3585 kcpu_queue \ 3586 ) \ 3587 do { } while (0) 3588 #endif /* MALI_USE_CSF */ 3589 3590 /** 3591 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START - KCPU Queue starts an array of JIT Frees 3592 * 3593 * @kbdev: Kbase device 3594 * @kcpu_queue: KCPU queue 3595 */ 3596 #if MALI_USE_CSF 3597 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START( \ 3598 kbdev, \ 3599 kcpu_queue \ 3600 ) \ 3601 do { \ 3602 int enabled = atomic_read(&kbdev->timeline_flags); \ 3603 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3604 __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start( \ 3605 __TL_DISPATCH_STREAM(kbdev, obj), \ 3606 kcpu_queue \ 3607 ); \ 3608 } while (0) 3609 #else 3610 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START( \ 3611 kbdev, \ 3612 kcpu_queue \ 3613 ) \ 3614 do { } while (0) 3615 #endif /* MALI_USE_CSF */ 3616 3617 /** 3618 * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END - Begin array of KCPU Queue ends an array of JIT Frees 3619 * 3620 * @kbdev: Kbase device 3621 * @kcpu_queue: KCPU queue 3622 */ 3623 #if MALI_USE_CSF 3624 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3625 kbdev, \ 3626 kcpu_queue \ 3627 ) \ 3628 do { \ 3629 int enabled = atomic_read(&kbdev->timeline_flags); \ 3630 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3631 __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end( \ 3632 __TL_DISPATCH_STREAM(kbdev, obj), \ 3633 kcpu_queue \ 3634 ); \ 3635 } while (0) 3636 #else 3637 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3638 kbdev, \ 3639 kcpu_queue \ 3640 ) \ 3641 do { } while (0) 3642 #endif /* MALI_USE_CSF */ 3643 3644 /** 3645 * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END - Array item of KCPU Queue ends an array of JIT Frees 3646 * 3647 * @kbdev: Kbase device 3648 * @kcpu_queue: KCPU queue 3649 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3650 * @jit_free_pages_used: The actual number of pages used by the JIT allocation 3651 */ 3652 #if MALI_USE_CSF 3653 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3654 kbdev, \ 3655 kcpu_queue, \ 3656 execute_error, \ 3657 jit_free_pages_used \ 3658 ) \ 3659 do { \ 3660 int enabled = atomic_read(&kbdev->timeline_flags); \ 3661 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3662 __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end( \ 3663 __TL_DISPATCH_STREAM(kbdev, obj), \ 3664 kcpu_queue, \ 3665 execute_error, \ 3666 jit_free_pages_used \ 3667 ); \ 3668 } while (0) 3669 #else 3670 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3671 kbdev, \ 3672 kcpu_queue, \ 3673 execute_error, \ 3674 jit_free_pages_used \ 3675 ) \ 3676 do { } while (0) 3677 #endif /* MALI_USE_CSF */ 3678 3679 /** 3680 * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END - End array of KCPU Queue ends an array of JIT Frees 3681 * 3682 * @kbdev: Kbase device 3683 * @kcpu_queue: KCPU queue 3684 */ 3685 #if MALI_USE_CSF 3686 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3687 kbdev, \ 3688 kcpu_queue \ 3689 ) \ 3690 do { \ 3691 int enabled = atomic_read(&kbdev->timeline_flags); \ 3692 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3693 __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end( \ 3694 __TL_DISPATCH_STREAM(kbdev, obj), \ 3695 kcpu_queue \ 3696 ); \ 3697 } while (0) 3698 #else 3699 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3700 kbdev, \ 3701 kcpu_queue \ 3702 ) \ 3703 do { } while (0) 3704 #endif /* MALI_USE_CSF */ 3705 3706 /** 3707 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER - KCPU Queue executes an Error Barrier 3708 * 3709 * @kbdev: Kbase device 3710 * @kcpu_queue: KCPU queue 3711 */ 3712 #if MALI_USE_CSF 3713 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER( \ 3714 kbdev, \ 3715 kcpu_queue \ 3716 ) \ 3717 do { \ 3718 int enabled = atomic_read(&kbdev->timeline_flags); \ 3719 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3720 __kbase_tlstream_tl_kbase_kcpuqueue_execute_error_barrier( \ 3721 __TL_DISPATCH_STREAM(kbdev, obj), \ 3722 kcpu_queue \ 3723 ); \ 3724 } while (0) 3725 #else 3726 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER( \ 3727 kbdev, \ 3728 kcpu_queue \ 3729 ) \ 3730 do { } while (0) 3731 #endif /* MALI_USE_CSF */ 3732 3733 /** 3734 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START - KCPU Queue starts a group suspend 3735 * 3736 * @kbdev: Kbase device 3737 * @kcpu_queue: KCPU queue 3738 */ 3739 #if MALI_USE_CSF 3740 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START( \ 3741 kbdev, \ 3742 kcpu_queue \ 3743 ) \ 3744 do { \ 3745 int enabled = atomic_read(&kbdev->timeline_flags); \ 3746 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3747 __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_start( \ 3748 __TL_DISPATCH_STREAM(kbdev, obj), \ 3749 kcpu_queue \ 3750 ); \ 3751 } while (0) 3752 #else 3753 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START( \ 3754 kbdev, \ 3755 kcpu_queue \ 3756 ) \ 3757 do { } while (0) 3758 #endif /* MALI_USE_CSF */ 3759 3760 /** 3761 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END - KCPU Queue ends a group suspend 3762 * 3763 * @kbdev: Kbase device 3764 * @kcpu_queue: KCPU queue 3765 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3766 */ 3767 #if MALI_USE_CSF 3768 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END( \ 3769 kbdev, \ 3770 kcpu_queue, \ 3771 execute_error \ 3772 ) \ 3773 do { \ 3774 int enabled = atomic_read(&kbdev->timeline_flags); \ 3775 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3776 __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_end( \ 3777 __TL_DISPATCH_STREAM(kbdev, obj), \ 3778 kcpu_queue, \ 3779 execute_error \ 3780 ); \ 3781 } while (0) 3782 #else 3783 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END( \ 3784 kbdev, \ 3785 kcpu_queue, \ 3786 execute_error \ 3787 ) \ 3788 do { } while (0) 3789 #endif /* MALI_USE_CSF */ 3790 3791 /** 3792 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_RELOADING - CSF FW is being reloaded 3793 * 3794 * @kbdev: Kbase device 3795 * @csffw_cycle: Cycle number of a CSFFW event 3796 */ 3797 #if MALI_USE_CSF 3798 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_RELOADING( \ 3799 kbdev, \ 3800 csffw_cycle \ 3801 ) \ 3802 do { \ 3803 int enabled = atomic_read(&kbdev->timeline_flags); \ 3804 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3805 __kbase_tlstream_tl_kbase_csffw_fw_reloading( \ 3806 __TL_DISPATCH_STREAM(kbdev, obj), \ 3807 csffw_cycle \ 3808 ); \ 3809 } while (0) 3810 #else 3811 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_RELOADING( \ 3812 kbdev, \ 3813 csffw_cycle \ 3814 ) \ 3815 do { } while (0) 3816 #endif /* MALI_USE_CSF */ 3817 3818 /** 3819 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_ENABLING - CSF FW is being enabled 3820 * 3821 * @kbdev: Kbase device 3822 * @csffw_cycle: Cycle number of a CSFFW event 3823 */ 3824 #if MALI_USE_CSF 3825 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_ENABLING( \ 3826 kbdev, \ 3827 csffw_cycle \ 3828 ) \ 3829 do { \ 3830 int enabled = atomic_read(&kbdev->timeline_flags); \ 3831 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3832 __kbase_tlstream_tl_kbase_csffw_fw_enabling( \ 3833 __TL_DISPATCH_STREAM(kbdev, obj), \ 3834 csffw_cycle \ 3835 ); \ 3836 } while (0) 3837 #else 3838 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_ENABLING( \ 3839 kbdev, \ 3840 csffw_cycle \ 3841 ) \ 3842 do { } while (0) 3843 #endif /* MALI_USE_CSF */ 3844 3845 /** 3846 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_SLEEP - CSF FW sleep is requested 3847 * 3848 * @kbdev: Kbase device 3849 * @csffw_cycle: Cycle number of a CSFFW event 3850 */ 3851 #if MALI_USE_CSF 3852 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_SLEEP( \ 3853 kbdev, \ 3854 csffw_cycle \ 3855 ) \ 3856 do { \ 3857 int enabled = atomic_read(&kbdev->timeline_flags); \ 3858 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3859 __kbase_tlstream_tl_kbase_csffw_fw_request_sleep( \ 3860 __TL_DISPATCH_STREAM(kbdev, obj), \ 3861 csffw_cycle \ 3862 ); \ 3863 } while (0) 3864 #else 3865 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_SLEEP( \ 3866 kbdev, \ 3867 csffw_cycle \ 3868 ) \ 3869 do { } while (0) 3870 #endif /* MALI_USE_CSF */ 3871 3872 /** 3873 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP - CSF FW wake up is requested 3874 * 3875 * @kbdev: Kbase device 3876 * @csffw_cycle: Cycle number of a CSFFW event 3877 */ 3878 #if MALI_USE_CSF 3879 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP( \ 3880 kbdev, \ 3881 csffw_cycle \ 3882 ) \ 3883 do { \ 3884 int enabled = atomic_read(&kbdev->timeline_flags); \ 3885 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3886 __kbase_tlstream_tl_kbase_csffw_fw_request_wakeup( \ 3887 __TL_DISPATCH_STREAM(kbdev, obj), \ 3888 csffw_cycle \ 3889 ); \ 3890 } while (0) 3891 #else 3892 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP( \ 3893 kbdev, \ 3894 csffw_cycle \ 3895 ) \ 3896 do { } while (0) 3897 #endif /* MALI_USE_CSF */ 3898 3899 /** 3900 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_HALT - CSF FW halt is requested 3901 * 3902 * @kbdev: Kbase device 3903 * @csffw_cycle: Cycle number of a CSFFW event 3904 */ 3905 #if MALI_USE_CSF 3906 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_HALT( \ 3907 kbdev, \ 3908 csffw_cycle \ 3909 ) \ 3910 do { \ 3911 int enabled = atomic_read(&kbdev->timeline_flags); \ 3912 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3913 __kbase_tlstream_tl_kbase_csffw_fw_request_halt( \ 3914 __TL_DISPATCH_STREAM(kbdev, obj), \ 3915 csffw_cycle \ 3916 ); \ 3917 } while (0) 3918 #else 3919 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_HALT( \ 3920 kbdev, \ 3921 csffw_cycle \ 3922 ) \ 3923 do { } while (0) 3924 #endif /* MALI_USE_CSF */ 3925 3926 /** 3927 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_DISABLING - CSF FW is being disabled 3928 * 3929 * @kbdev: Kbase device 3930 * @csffw_cycle: Cycle number of a CSFFW event 3931 */ 3932 #if MALI_USE_CSF 3933 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_DISABLING( \ 3934 kbdev, \ 3935 csffw_cycle \ 3936 ) \ 3937 do { \ 3938 int enabled = atomic_read(&kbdev->timeline_flags); \ 3939 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3940 __kbase_tlstream_tl_kbase_csffw_fw_disabling( \ 3941 __TL_DISPATCH_STREAM(kbdev, obj), \ 3942 csffw_cycle \ 3943 ); \ 3944 } while (0) 3945 #else 3946 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_DISABLING( \ 3947 kbdev, \ 3948 csffw_cycle \ 3949 ) \ 3950 do { } while (0) 3951 #endif /* MALI_USE_CSF */ 3952 3953 /** 3954 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_OFF - CSF FW is off 3955 * 3956 * @kbdev: Kbase device 3957 * @csffw_cycle: Cycle number of a CSFFW event 3958 */ 3959 #if MALI_USE_CSF 3960 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_OFF( \ 3961 kbdev, \ 3962 csffw_cycle \ 3963 ) \ 3964 do { \ 3965 int enabled = atomic_read(&kbdev->timeline_flags); \ 3966 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3967 __kbase_tlstream_tl_kbase_csffw_fw_off( \ 3968 __TL_DISPATCH_STREAM(kbdev, obj), \ 3969 csffw_cycle \ 3970 ); \ 3971 } while (0) 3972 #else 3973 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_OFF( \ 3974 kbdev, \ 3975 csffw_cycle \ 3976 ) \ 3977 do { } while (0) 3978 #endif /* MALI_USE_CSF */ 3979 3980 /** 3981 * KBASE_TLSTREAM_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW - An overflow has happened with the CSFFW Timeline stream 3982 * 3983 * @kbdev: Kbase device 3984 * @csffw_timestamp: Timestamp of a CSFFW event 3985 * @csffw_cycle: Cycle number of a CSFFW event 3986 */ 3987 #if MALI_USE_CSF 3988 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW( \ 3989 kbdev, \ 3990 csffw_timestamp, \ 3991 csffw_cycle \ 3992 ) \ 3993 do { \ 3994 int enabled = atomic_read(&kbdev->timeline_flags); \ 3995 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3996 __kbase_tlstream_tl_kbase_csffw_tlstream_overflow( \ 3997 __TL_DISPATCH_STREAM(kbdev, obj), \ 3998 csffw_timestamp, \ 3999 csffw_cycle \ 4000 ); \ 4001 } while (0) 4002 #else 4003 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW( \ 4004 kbdev, \ 4005 csffw_timestamp, \ 4006 csffw_cycle \ 4007 ) \ 4008 do { } while (0) 4009 #endif /* MALI_USE_CSF */ 4010 4011 /** 4012 * KBASE_TLSTREAM_AUX_PM_STATE - PM state 4013 * 4014 * @kbdev: Kbase device 4015 * @core_type: Core type (shader, tiler, l2 cache, l3 cache) 4016 * @core_state_bitset: 64bits bitmask reporting power state of the cores (1-ON, 0-OFF) 4017 */ 4018 #define KBASE_TLSTREAM_AUX_PM_STATE( \ 4019 kbdev, \ 4020 core_type, \ 4021 core_state_bitset \ 4022 ) \ 4023 do { \ 4024 int enabled = atomic_read(&kbdev->timeline_flags); \ 4025 if (enabled & TLSTREAM_ENABLED) \ 4026 __kbase_tlstream_aux_pm_state( \ 4027 __TL_DISPATCH_STREAM(kbdev, aux), \ 4028 core_type, \ 4029 core_state_bitset \ 4030 ); \ 4031 } while (0) 4032 4033 /** 4034 * KBASE_TLSTREAM_AUX_PAGEFAULT - Page fault 4035 * 4036 * @kbdev: Kbase device 4037 * @ctx_nr: Kernel context number 4038 * @as_nr: Address space number 4039 * @page_cnt_change: Number of pages to be added 4040 */ 4041 #define KBASE_TLSTREAM_AUX_PAGEFAULT( \ 4042 kbdev, \ 4043 ctx_nr, \ 4044 as_nr, \ 4045 page_cnt_change \ 4046 ) \ 4047 do { \ 4048 int enabled = atomic_read(&kbdev->timeline_flags); \ 4049 if (enabled & TLSTREAM_ENABLED) \ 4050 __kbase_tlstream_aux_pagefault( \ 4051 __TL_DISPATCH_STREAM(kbdev, aux), \ 4052 ctx_nr, \ 4053 as_nr, \ 4054 page_cnt_change \ 4055 ); \ 4056 } while (0) 4057 4058 /** 4059 * KBASE_TLSTREAM_AUX_PAGESALLOC - Total alloc pages change 4060 * 4061 * @kbdev: Kbase device 4062 * @ctx_nr: Kernel context number 4063 * @page_cnt: Number of pages used by the context 4064 */ 4065 #define KBASE_TLSTREAM_AUX_PAGESALLOC( \ 4066 kbdev, \ 4067 ctx_nr, \ 4068 page_cnt \ 4069 ) \ 4070 do { \ 4071 int enabled = atomic_read(&kbdev->timeline_flags); \ 4072 if (enabled & TLSTREAM_ENABLED) \ 4073 __kbase_tlstream_aux_pagesalloc( \ 4074 __TL_DISPATCH_STREAM(kbdev, aux), \ 4075 ctx_nr, \ 4076 page_cnt \ 4077 ); \ 4078 } while (0) 4079 4080 /** 4081 * KBASE_TLSTREAM_AUX_DEVFREQ_TARGET - New device frequency target 4082 * 4083 * @kbdev: Kbase device 4084 * @target_freq: New target frequency 4085 */ 4086 #define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET( \ 4087 kbdev, \ 4088 target_freq \ 4089 ) \ 4090 do { \ 4091 int enabled = atomic_read(&kbdev->timeline_flags); \ 4092 if (enabled & TLSTREAM_ENABLED) \ 4093 __kbase_tlstream_aux_devfreq_target( \ 4094 __TL_DISPATCH_STREAM(kbdev, aux), \ 4095 target_freq \ 4096 ); \ 4097 } while (0) 4098 4099 /** 4100 * KBASE_TLSTREAM_AUX_JIT_STATS - per-bin JIT statistics 4101 * 4102 * @kbdev: Kbase device 4103 * @ctx_nr: Kernel context number 4104 * @bid: JIT bin id 4105 * @max_allocs: Maximum allocations allowed in this bin. 4106 * @allocs: Number of active allocations in this bin 4107 * @va_pages: Number of virtual pages allocated in this bin 4108 * @ph_pages: Number of physical pages allocated in this bin 4109 */ 4110 #define KBASE_TLSTREAM_AUX_JIT_STATS( \ 4111 kbdev, \ 4112 ctx_nr, \ 4113 bid, \ 4114 max_allocs, \ 4115 allocs, \ 4116 va_pages, \ 4117 ph_pages \ 4118 ) \ 4119 do { \ 4120 int enabled = atomic_read(&kbdev->timeline_flags); \ 4121 if (enabled & TLSTREAM_ENABLED) \ 4122 __kbase_tlstream_aux_jit_stats( \ 4123 __TL_DISPATCH_STREAM(kbdev, aux), \ 4124 ctx_nr, \ 4125 bid, \ 4126 max_allocs, \ 4127 allocs, \ 4128 va_pages, \ 4129 ph_pages \ 4130 ); \ 4131 } while (0) 4132 4133 /** 4134 * KBASE_TLSTREAM_AUX_TILER_HEAP_STATS - Tiler Heap statistics 4135 * 4136 * @kbdev: Kbase device 4137 * @ctx_nr: Kernel context number 4138 * @heap_id: Unique id used to represent a heap under a context 4139 * @va_pages: Number of virtual pages allocated in this bin 4140 * @ph_pages: Number of physical pages allocated in this bin 4141 * @max_chunks: The maximum number of chunks that the heap should be allowed to use 4142 * @chunk_size: Size of each chunk in tiler heap, in bytes 4143 * @chunk_count: The number of chunks currently allocated in the tiler heap 4144 * @target_in_flight: Number of render-passes that the driver should attempt to keep in flight for which allocation of new chunks is allowed 4145 * @nr_in_flight: Number of render-passes that are in flight 4146 */ 4147 #define KBASE_TLSTREAM_AUX_TILER_HEAP_STATS( \ 4148 kbdev, \ 4149 ctx_nr, \ 4150 heap_id, \ 4151 va_pages, \ 4152 ph_pages, \ 4153 max_chunks, \ 4154 chunk_size, \ 4155 chunk_count, \ 4156 target_in_flight, \ 4157 nr_in_flight \ 4158 ) \ 4159 do { \ 4160 int enabled = atomic_read(&kbdev->timeline_flags); \ 4161 if (enabled & TLSTREAM_ENABLED) \ 4162 __kbase_tlstream_aux_tiler_heap_stats( \ 4163 __TL_DISPATCH_STREAM(kbdev, aux), \ 4164 ctx_nr, \ 4165 heap_id, \ 4166 va_pages, \ 4167 ph_pages, \ 4168 max_chunks, \ 4169 chunk_size, \ 4170 chunk_count, \ 4171 target_in_flight, \ 4172 nr_in_flight \ 4173 ); \ 4174 } while (0) 4175 4176 /** 4177 * KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT - event on a given job slot 4178 * 4179 * @kbdev: Kbase device 4180 * @ctx: Name of the context object 4181 * @slot_nr: Job slot number 4182 * @atom_nr: Sequential number of an atom 4183 * @event: Event type. One of TL_JS_EVENT values 4184 */ 4185 #define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT( \ 4186 kbdev, \ 4187 ctx, \ 4188 slot_nr, \ 4189 atom_nr, \ 4190 event \ 4191 ) \ 4192 do { \ 4193 int enabled = atomic_read(&kbdev->timeline_flags); \ 4194 if (enabled & TLSTREAM_ENABLED) \ 4195 __kbase_tlstream_aux_event_job_slot( \ 4196 __TL_DISPATCH_STREAM(kbdev, aux), \ 4197 ctx, \ 4198 slot_nr, \ 4199 atom_nr, \ 4200 event \ 4201 ); \ 4202 } while (0) 4203 4204 /** 4205 * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START - enter protected mode start 4206 * 4207 * @kbdev: Kbase device 4208 * @gpu: Name of the GPU object 4209 */ 4210 #define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START( \ 4211 kbdev, \ 4212 gpu \ 4213 ) \ 4214 do { \ 4215 int enabled = atomic_read(&kbdev->timeline_flags); \ 4216 if (enabled & TLSTREAM_ENABLED) \ 4217 __kbase_tlstream_aux_protected_enter_start( \ 4218 __TL_DISPATCH_STREAM(kbdev, aux), \ 4219 gpu \ 4220 ); \ 4221 } while (0) 4222 4223 /** 4224 * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END - enter protected mode end 4225 * 4226 * @kbdev: Kbase device 4227 * @gpu: Name of the GPU object 4228 */ 4229 #define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END( \ 4230 kbdev, \ 4231 gpu \ 4232 ) \ 4233 do { \ 4234 int enabled = atomic_read(&kbdev->timeline_flags); \ 4235 if (enabled & TLSTREAM_ENABLED) \ 4236 __kbase_tlstream_aux_protected_enter_end( \ 4237 __TL_DISPATCH_STREAM(kbdev, aux), \ 4238 gpu \ 4239 ); \ 4240 } while (0) 4241 4242 /** 4243 * KBASE_TLSTREAM_AUX_MMU_COMMAND - mmu commands with synchronicity info 4244 * 4245 * @kbdev: Kbase device 4246 * @kernel_ctx_id: Unique ID for the KBase Context 4247 * @mmu_cmd_id: MMU Command ID (e.g AS_COMMAND_UPDATE) 4248 * @mmu_synchronicity: Indicates whether the command is related to current running job that needs to be resolved to make it progress (synchronous, e.g. grow on page fault, JIT) or not (asynchronous, e.g. IOCTL calls from user-space). This param will be 0 if it is an asynchronous operation. 4249 * @mmu_lock_addr: start address of regions to be locked/unlocked/invalidated 4250 * @mmu_lock_page_num: number of pages to be locked/unlocked/invalidated 4251 */ 4252 #define KBASE_TLSTREAM_AUX_MMU_COMMAND( \ 4253 kbdev, \ 4254 kernel_ctx_id, \ 4255 mmu_cmd_id, \ 4256 mmu_synchronicity, \ 4257 mmu_lock_addr, \ 4258 mmu_lock_page_num \ 4259 ) \ 4260 do { \ 4261 int enabled = atomic_read(&kbdev->timeline_flags); \ 4262 if (enabled & TLSTREAM_ENABLED) \ 4263 __kbase_tlstream_aux_mmu_command( \ 4264 __TL_DISPATCH_STREAM(kbdev, aux), \ 4265 kernel_ctx_id, \ 4266 mmu_cmd_id, \ 4267 mmu_synchronicity, \ 4268 mmu_lock_addr, \ 4269 mmu_lock_page_num \ 4270 ); \ 4271 } while (0) 4272 4273 /** 4274 * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START - leave protected mode start 4275 * 4276 * @kbdev: Kbase device 4277 * @gpu: Name of the GPU object 4278 */ 4279 #define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START( \ 4280 kbdev, \ 4281 gpu \ 4282 ) \ 4283 do { \ 4284 int enabled = atomic_read(&kbdev->timeline_flags); \ 4285 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 4286 __kbase_tlstream_aux_protected_leave_start( \ 4287 __TL_DISPATCH_STREAM(kbdev, aux), \ 4288 gpu \ 4289 ); \ 4290 } while (0) 4291 4292 /** 4293 * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END - leave protected mode end 4294 * 4295 * @kbdev: Kbase device 4296 * @gpu: Name of the GPU object 4297 */ 4298 #define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END( \ 4299 kbdev, \ 4300 gpu \ 4301 ) \ 4302 do { \ 4303 int enabled = atomic_read(&kbdev->timeline_flags); \ 4304 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 4305 __kbase_tlstream_aux_protected_leave_end( \ 4306 __TL_DISPATCH_STREAM(kbdev, aux), \ 4307 gpu \ 4308 ); \ 4309 } while (0) 4310 4311 /* Gator tracepoints are hooked into TLSTREAM interface. 4312 * When the following tracepoints are called, corresponding 4313 * Gator tracepoint will be called as well. 4314 */ 4315 4316 #if defined(CONFIG_MALI_BIFROST_GATOR_SUPPORT) 4317 /* `event` is one of TL_JS_EVENT values here. 4318 * The values of TL_JS_EVENT are guaranteed to match 4319 * with corresponding GATOR_JOB_SLOT values. 4320 */ 4321 #undef KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT 4322 #define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, \ 4323 context, slot_nr, atom_nr, event) \ 4324 do { \ 4325 int enabled = atomic_read(&kbdev->timeline_flags); \ 4326 kbase_trace_mali_job_slots_event(kbdev->id, \ 4327 GATOR_MAKE_EVENT(event, slot_nr), \ 4328 context, (u8) atom_nr); \ 4329 if (enabled & TLSTREAM_ENABLED) \ 4330 __kbase_tlstream_aux_event_job_slot( \ 4331 __TL_DISPATCH_STREAM(kbdev, aux), \ 4332 context, slot_nr, atom_nr, event); \ 4333 } while (0) 4334 4335 #undef KBASE_TLSTREAM_AUX_PM_STATE 4336 #define KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state) \ 4337 do { \ 4338 int enabled = atomic_read(&kbdev->timeline_flags); \ 4339 kbase_trace_mali_pm_status(kbdev->id, \ 4340 core_type, state); \ 4341 if (enabled & TLSTREAM_ENABLED) \ 4342 __kbase_tlstream_aux_pm_state( \ 4343 __TL_DISPATCH_STREAM(kbdev, aux), \ 4344 core_type, state); \ 4345 } while (0) 4346 4347 #undef KBASE_TLSTREAM_AUX_PAGEFAULT 4348 #define KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, \ 4349 ctx_nr, as_nr, page_cnt_change) \ 4350 do { \ 4351 int enabled = atomic_read(&kbdev->timeline_flags); \ 4352 kbase_trace_mali_page_fault_insert_pages(kbdev->id, \ 4353 as_nr, \ 4354 page_cnt_change); \ 4355 if (enabled & TLSTREAM_ENABLED) \ 4356 __kbase_tlstream_aux_pagefault( \ 4357 __TL_DISPATCH_STREAM(kbdev, aux), \ 4358 ctx_nr, as_nr, page_cnt_change); \ 4359 } while (0) 4360 4361 /* kbase_trace_mali_total_alloc_pages_change is handled differently here. 4362 * We stream the total amount of pages allocated for `kbdev` rather 4363 * than `page_count`, which is per-context. 4364 */ 4365 #undef KBASE_TLSTREAM_AUX_PAGESALLOC 4366 #define KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, ctx_nr, page_cnt) \ 4367 do { \ 4368 int enabled = atomic_read(&kbdev->timeline_flags); \ 4369 u32 global_pages_count = \ 4370 atomic_read(&kbdev->memdev.used_pages); \ 4371 \ 4372 kbase_trace_mali_total_alloc_pages_change(kbdev->id, \ 4373 global_pages_count); \ 4374 if (enabled & TLSTREAM_ENABLED) \ 4375 __kbase_tlstream_aux_pagesalloc( \ 4376 __TL_DISPATCH_STREAM(kbdev, aux), \ 4377 ctx_nr, page_cnt); \ 4378 } while (0) 4379 #endif /* CONFIG_MALI_BIFROST_GATOR_SUPPORT */ 4380 4381 /* clang-format on */ 4382 #endif 4383