1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * 4 * (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved. 5 * 6 * This program is free software and is provided to you under the terms of the 7 * GNU General Public License version 2 as published by the Free Software 8 * Foundation, and any use by you of this program is subject to the terms 9 * of such GNU license. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, you can access it online at 18 * http://www.gnu.org/licenses/gpl-2.0.html. 19 * 20 */ 21 22 /* Definitions (types, defines, etcs) common to the CSF. 23 * They are placed here to allow the hierarchy of header files to work. 24 */ 25 26 #ifndef _KBASE_CSF_DEFS_H_ 27 #define _KBASE_CSF_DEFS_H_ 28 29 #include <linux/types.h> 30 #include <linux/wait.h> 31 32 #include "mali_kbase_csf_firmware.h" 33 #include "mali_kbase_refcount_defs.h" 34 #include "mali_kbase_csf_event.h" 35 #include <uapi/gpu/arm/bifrost/csf/mali_kbase_csf_errors_dumpfault.h> 36 37 #if IS_ENABLED(CONFIG_MALI_CORESIGHT) 38 #include <debug/backend/mali_kbase_debug_coresight_internal_csf.h> 39 #endif /* IS_ENABLED(CONFIG_MALI_CORESIGHT) */ 40 41 /* Maximum number of KCPU command queues to be created per GPU address space. 42 */ 43 #define KBASEP_MAX_KCPU_QUEUES ((size_t)256) 44 45 /* Maximum number of GPU command queue groups to be created per GPU address 46 * space. 47 */ 48 #define MAX_QUEUE_GROUP_NUM (256) 49 50 /* Maximum number of GPU tiler heaps to allow to be created per GPU address 51 * space. 52 */ 53 #define MAX_TILER_HEAPS (128) 54 55 #define CSF_FIRMWARE_ENTRY_READ (1ul << 0) 56 #define CSF_FIRMWARE_ENTRY_WRITE (1ul << 1) 57 #define CSF_FIRMWARE_ENTRY_EXECUTE (1ul << 2) 58 #define CSF_FIRMWARE_ENTRY_CACHE_MODE (3ul << 3) 59 #define CSF_FIRMWARE_ENTRY_PROTECTED (1ul << 5) 60 #define CSF_FIRMWARE_ENTRY_SHARED (1ul << 30) 61 #define CSF_FIRMWARE_ENTRY_ZERO (1ul << 31) 62 63 /** 64 * enum kbase_csf_queue_bind_state - bind state of the queue 65 * 66 * @KBASE_CSF_QUEUE_UNBOUND: Set when the queue is registered or when the link 67 * between queue and the group to which it was bound or being bound is removed. 68 * @KBASE_CSF_QUEUE_BIND_IN_PROGRESS: Set when the first part of bind operation 69 * has completed i.e. CS_QUEUE_BIND ioctl. 70 * @KBASE_CSF_QUEUE_BOUND: Set when the bind operation has completed i.e. IO 71 * pages have been mapped in the process address space. 72 */ 73 enum kbase_csf_queue_bind_state { 74 KBASE_CSF_QUEUE_UNBOUND, 75 KBASE_CSF_QUEUE_BIND_IN_PROGRESS, 76 KBASE_CSF_QUEUE_BOUND, 77 }; 78 79 /** 80 * enum kbase_csf_reset_gpu_state - state of the gpu reset 81 * 82 * @KBASE_CSF_RESET_GPU_NOT_PENDING: Set when the GPU reset isn't pending 83 * 84 * @KBASE_CSF_RESET_GPU_PREPARED: Set when kbase_prepare_to_reset_gpu() has 85 * been called. This is just for debugging checks to encourage callers to call 86 * kbase_prepare_to_reset_gpu() before kbase_reset_gpu(). 87 * 88 * @KBASE_CSF_RESET_GPU_COMMITTED: Set when the GPU reset process has been 89 * committed and so will definitely happen, but the procedure to reset the GPU 90 * has not yet begun. Other threads must finish accessing the HW before we 91 * reach %KBASE_CSF_RESET_GPU_HAPPENING. 92 * 93 * @KBASE_CSF_RESET_GPU_HAPPENING: Set when the GPU reset process is occurring 94 * (silent or otherwise), and is actively accessing the HW. Any changes to the 95 * HW in other threads might get lost, overridden, or corrupted. 96 * 97 * @KBASE_CSF_RESET_GPU_COMMITTED_SILENT: Set when the GPU reset process has 98 * been committed but has not started happening. This is used when resetting 99 * the GPU as part of normal behavior (e.g. when exiting protected mode). 100 * Other threads must finish accessing the HW before we reach 101 * %KBASE_CSF_RESET_GPU_HAPPENING. 102 * 103 * @KBASE_CSF_RESET_GPU_FAILED: Set when an error is encountered during the 104 * GPU reset process. No more work could then be executed on GPU, unloading 105 * the Driver module is the only option. 106 */ 107 enum kbase_csf_reset_gpu_state { 108 KBASE_CSF_RESET_GPU_NOT_PENDING, 109 KBASE_CSF_RESET_GPU_PREPARED, 110 KBASE_CSF_RESET_GPU_COMMITTED, 111 KBASE_CSF_RESET_GPU_HAPPENING, 112 KBASE_CSF_RESET_GPU_COMMITTED_SILENT, 113 KBASE_CSF_RESET_GPU_FAILED, 114 }; 115 116 /** 117 * enum kbase_csf_group_state - state of the GPU command queue group 118 * 119 * @KBASE_CSF_GROUP_INACTIVE: Group is inactive and won't be 120 * considered by scheduler for running on 121 * CSG slot. 122 * @KBASE_CSF_GROUP_RUNNABLE: Group is in the list of runnable groups 123 * and is subjected to time-slice based 124 * scheduling. A start request would be 125 * sent (or already has been sent) if the 126 * group is assigned the CS 127 * group slot for the fist time. 128 * @KBASE_CSF_GROUP_IDLE: Group is currently on a CSG slot 129 * but all the CSs bound to the group have 130 * become either idle or waiting on sync 131 * object. 132 * Group could be evicted from the slot on 133 * the next tick if there are no spare 134 * slots left after scheduling non-idle 135 * queue groups. If the group is kept on 136 * slot then it would be moved to the 137 * RUNNABLE state, also if one of the 138 * queues bound to the group is kicked it 139 * would be moved to the RUNNABLE state. 140 * If the group is evicted from the slot it 141 * would be moved to either 142 * KBASE_CSF_GROUP_SUSPENDED_ON_IDLE or 143 * KBASE_CSF_GROUP_SUSPENDED_ON_WAIT_SYNC 144 * state. 145 * @KBASE_CSF_GROUP_SUSPENDED: Group was evicted from the CSG slot 146 * and is not running but is still in the 147 * list of runnable groups and subjected 148 * to time-slice based scheduling. A resume 149 * request would be sent when a CSG slot is 150 * re-assigned to the group and once the 151 * resume is complete group would be moved 152 * back to the RUNNABLE state. 153 * @KBASE_CSF_GROUP_SUSPENDED_ON_IDLE: Same as KBASE_CSF_GROUP_SUSPENDED except 154 * that queue group also became idle before 155 * the suspension. This state helps 156 * Scheduler avoid scheduling the idle 157 * groups over the non-idle groups in the 158 * subsequent ticks. If one of the queues 159 * bound to the group is kicked it would be 160 * moved to the SUSPENDED state. 161 * @KBASE_CSF_GROUP_SUSPENDED_ON_WAIT_SYNC: Same as GROUP_SUSPENDED_ON_IDLE 162 * except that at least one CS 163 * bound to this group was 164 * waiting for synchronization object 165 * before the suspension. 166 * @KBASE_CSF_GROUP_FAULT_EVICTED: Group is evicted from the scheduler due 167 * to a fault condition, pending to be 168 * terminated. 169 * @KBASE_CSF_GROUP_TERMINATED: Group is no longer schedulable and is 170 * pending to be deleted by Client, all the 171 * queues bound to it have been unbound. 172 */ 173 enum kbase_csf_group_state { 174 KBASE_CSF_GROUP_INACTIVE, 175 KBASE_CSF_GROUP_RUNNABLE, 176 KBASE_CSF_GROUP_IDLE, 177 KBASE_CSF_GROUP_SUSPENDED, 178 KBASE_CSF_GROUP_SUSPENDED_ON_IDLE, 179 KBASE_CSF_GROUP_SUSPENDED_ON_WAIT_SYNC, 180 KBASE_CSF_GROUP_FAULT_EVICTED, 181 KBASE_CSF_GROUP_TERMINATED, 182 }; 183 184 /** 185 * enum kbase_csf_csg_slot_state - state of the command queue group slots under 186 * the scheduler control. 187 * 188 * @CSG_SLOT_READY: The slot is clean and ready to be programmed with a 189 * queue group. 190 * @CSG_SLOT_READY2RUN: The slot has been programmed with a queue group, i.e. a 191 * start or resume request has been sent to the firmware. 192 * @CSG_SLOT_RUNNING: The queue group is running on the slot, acknowledgment 193 * of a start or resume request has been obtained from the 194 * firmware. 195 * @CSG_SLOT_DOWN2STOP: The suspend or terminate request for the queue group on 196 * the slot has been sent to the firmware. 197 * @CSG_SLOT_STOPPED: The queue group is removed from the slot, acknowledgment 198 * of suspend or terminate request has been obtained from 199 * the firmware. 200 * @CSG_SLOT_READY2RUN_TIMEDOUT: The start or resume request sent on the slot 201 * for the queue group timed out. 202 * @CSG_SLOT_DOWN2STOP_TIMEDOUT: The suspend or terminate request for queue 203 * group on the slot timed out. 204 */ 205 enum kbase_csf_csg_slot_state { 206 CSG_SLOT_READY, 207 CSG_SLOT_READY2RUN, 208 CSG_SLOT_RUNNING, 209 CSG_SLOT_DOWN2STOP, 210 CSG_SLOT_STOPPED, 211 CSG_SLOT_READY2RUN_TIMEDOUT, 212 CSG_SLOT_DOWN2STOP_TIMEDOUT, 213 }; 214 215 /** 216 * enum kbase_csf_scheduler_state - state of the scheduler operational phases. 217 * 218 * @SCHED_BUSY: The scheduler is busy performing on tick schedule 219 * operations, the state of CSG slots 220 * can't be changed. 221 * @SCHED_INACTIVE: The scheduler is inactive, it is allowed to modify the 222 * state of CSG slots by in-cycle 223 * priority scheduling. 224 * @SCHED_SUSPENDED: The scheduler is in low-power mode with scheduling 225 * operations suspended and is not holding the power 226 * management reference. This can happen if the GPU 227 * becomes idle for a duration exceeding a threshold, 228 * or due to a system triggered suspend action. 229 * @SCHED_SLEEPING: The scheduler is in low-power mode with scheduling 230 * operations suspended and is not holding the power 231 * management reference. This state is set, only for the 232 * GPUs that supports the sleep feature, when GPU idle 233 * notification is received. The state is changed to 234 * @SCHED_SUSPENDED from the runtime suspend callback 235 * function after the suspend of CSGs. 236 */ 237 enum kbase_csf_scheduler_state { 238 SCHED_BUSY, 239 SCHED_INACTIVE, 240 SCHED_SUSPENDED, 241 SCHED_SLEEPING, 242 }; 243 244 /** 245 * enum kbase_queue_group_priority - Kbase internal relative priority list. 246 * 247 * @KBASE_QUEUE_GROUP_PRIORITY_REALTIME: The realtime queue group priority. 248 * @KBASE_QUEUE_GROUP_PRIORITY_HIGH: The high queue group priority. 249 * @KBASE_QUEUE_GROUP_PRIORITY_MEDIUM: The medium queue group priority. 250 * @KBASE_QUEUE_GROUP_PRIORITY_LOW: The low queue group priority. 251 * @KBASE_QUEUE_GROUP_PRIORITY_COUNT: The number of priority levels. 252 */ 253 enum kbase_queue_group_priority { 254 KBASE_QUEUE_GROUP_PRIORITY_REALTIME = 0, 255 KBASE_QUEUE_GROUP_PRIORITY_HIGH, 256 KBASE_QUEUE_GROUP_PRIORITY_MEDIUM, 257 KBASE_QUEUE_GROUP_PRIORITY_LOW, 258 KBASE_QUEUE_GROUP_PRIORITY_COUNT 259 }; 260 261 /** 262 * enum kbase_timeout_selector - The choice of which timeout to get scaled 263 * using the lowest GPU frequency. 264 * @CSF_FIRMWARE_TIMEOUT: Response timeout from CSF firmware. 265 * @CSF_PM_TIMEOUT: Timeout for GPU Power Management to reach the desired 266 * Shader, L2 and MCU state. 267 * @CSF_GPU_RESET_TIMEOUT: Waiting timeout for GPU reset to complete. 268 * @CSF_CSG_SUSPEND_TIMEOUT: Timeout given for all active CSGs to be suspended. 269 * @CSF_FIRMWARE_BOOT_TIMEOUT: Maximum time to wait for firmware to boot. 270 * @CSF_FIRMWARE_PING_TIMEOUT: Maximum time to wait for firmware to respond 271 * to a ping from KBase. 272 * @CSF_SCHED_PROTM_PROGRESS_TIMEOUT: Timeout used to prevent protected mode execution hang. 273 * @MMU_AS_INACTIVE_WAIT_TIMEOUT: Maximum waiting time in ms for the completion 274 * of a MMU operation 275 * @KBASE_TIMEOUT_SELECTOR_COUNT: Number of timeout selectors. Must be last in 276 * the enum. 277 */ 278 enum kbase_timeout_selector { 279 CSF_FIRMWARE_TIMEOUT, 280 CSF_PM_TIMEOUT, 281 CSF_GPU_RESET_TIMEOUT, 282 CSF_CSG_SUSPEND_TIMEOUT, 283 CSF_FIRMWARE_BOOT_TIMEOUT, 284 CSF_FIRMWARE_PING_TIMEOUT, 285 CSF_SCHED_PROTM_PROGRESS_TIMEOUT, 286 MMU_AS_INACTIVE_WAIT_TIMEOUT, 287 288 /* Must be the last in the enum */ 289 KBASE_TIMEOUT_SELECTOR_COUNT 290 }; 291 292 /** 293 * struct kbase_csf_notification - Event or error generated as part of command 294 * queue execution 295 * 296 * @data: Event or error data returned to userspace 297 * @link: Link to the linked list, &struct_kbase_csf_context.error_list. 298 */ 299 struct kbase_csf_notification { 300 struct base_csf_notification data; 301 struct list_head link; 302 }; 303 304 /** 305 * struct kbase_queue - Object representing a GPU command queue. 306 * 307 * @kctx: Pointer to the base context with which this GPU command queue 308 * is associated. 309 * @user_io_gpu_va: The start GPU VA address of this queue's userio pages. Only 310 * valid (i.e. not 0 ) when the queue is enabled and its owner 311 * group has a runtime bound csg_reg (group region). 312 * @phys: Pointer to the physical pages allocated for the 313 * pair or User mode input/output page 314 * @user_io_addr: Pointer to the permanent kernel mapping of User mode 315 * input/output pages. The pages can be accessed through 316 * the mapping without any cache maintenance. 317 * @handle: Handle returned with bind ioctl for creating a 318 * contiguous User mode mapping of input/output pages & 319 * the hardware doorbell page. 320 * @doorbell_nr: Index of the hardware doorbell page assigned to the 321 * queue. 322 * @db_file_offset: File offset value that is assigned to userspace mapping 323 * created on bind to access the doorbell page. 324 * It is in page units. 325 * @link: Link to the linked list of GPU command queues created per 326 * GPU address space. 327 * @refcount: Reference count, stands for the number of times the queue 328 * has been referenced. The reference is taken when it is 329 * created, when it is bound to the group and also when the 330 * @oom_event_work work item is queued 331 * for it. 332 * @group: Pointer to the group to which this queue is bound. 333 * @queue_reg: Pointer to the VA region allocated for CS buffer. 334 * @oom_event_work: Work item corresponding to the out of memory event for 335 * chunked tiler heap being used for this queue. 336 * @base_addr: Base address of the CS buffer. 337 * @size: Size of the CS buffer. 338 * @priority: Priority of this queue within the group. 339 * @bind_state: Bind state of the queue as enum @kbase_csf_queue_bind_state 340 * @csi_index: The ID of the assigned CS hardware interface. 341 * @enabled: Indicating whether the CS is running, or not. 342 * @status_wait: Value of CS_STATUS_WAIT register of the CS will 343 * be kept when the CS gets blocked by sync wait. 344 * CS_STATUS_WAIT provides information on conditions queue is 345 * blocking on. This is set when the group, to which queue is 346 * bound, is suspended after getting blocked, i.e. in 347 * KBASE_CSF_GROUP_SUSPENDED_ON_WAIT_SYNC state. 348 * @sync_ptr: Value of CS_STATUS_WAIT_SYNC_POINTER register of the CS 349 * will be kept when the CS gets blocked by 350 * sync wait. CS_STATUS_WAIT_SYNC_POINTER contains the address 351 * of synchronization object being waited on. 352 * Valid only when @status_wait is set. 353 * @sync_value: Value of CS_STATUS_WAIT_SYNC_VALUE register of the CS 354 * will be kept when the CS gets blocked by 355 * sync wait. CS_STATUS_WAIT_SYNC_VALUE contains the value 356 * tested against the synchronization object. 357 * Valid only when @status_wait is set. 358 * @sb_status: Value indicates which of the scoreboard entries in the queue 359 * are non-zero 360 * @blocked_reason: Value shows if the queue is blocked, and if so, 361 * the reason why it is blocked 362 * @trace_buffer_base: CS trace buffer base address. 363 * @trace_offset_ptr: Pointer to the CS trace buffer offset variable. 364 * @trace_buffer_size: CS trace buffer size for the queue. 365 * @trace_cfg: CS trace configuration parameters. 366 * @error: GPU command queue fatal information to pass to user space. 367 * @cs_error_work: Work item to handle the CS fatal event reported for this 368 * queue or the CS fault event if dump on fault is enabled 369 * and acknowledgment for CS fault event needs to be done 370 * after dumping is complete. 371 * @cs_error_info: Records additional information about the CS fatal event or 372 * about CS fault event if dump on fault is enabled. 373 * @cs_error: Records information about the CS fatal event or 374 * about CS fault event if dump on fault is enabled. 375 * @cs_error_fatal: Flag to track if the CS fault or CS fatal event occurred. 376 * @pending: Indicating whether the queue has new submitted work. 377 * @extract_ofs: The current EXTRACT offset, this is only updated when handling 378 * the GLB IDLE IRQ if the idle timeout value is non-0 in order 379 * to help detect a queue's true idle status. 380 * @saved_cmd_ptr: The command pointer value for the GPU queue, saved when the 381 * group to which queue is bound is suspended. 382 * This can be useful in certain cases to know that till which 383 * point the execution reached in the Linear command buffer. 384 */ 385 struct kbase_queue { 386 struct kbase_context *kctx; 387 u64 user_io_gpu_va; 388 struct tagged_addr phys[2]; 389 char *user_io_addr; 390 u64 handle; 391 int doorbell_nr; 392 unsigned long db_file_offset; 393 struct list_head link; 394 kbase_refcount_t refcount; 395 struct kbase_queue_group *group; 396 struct kbase_va_region *queue_reg; 397 struct work_struct oom_event_work; 398 u64 base_addr; 399 u32 size; 400 u8 priority; 401 s8 csi_index; 402 enum kbase_csf_queue_bind_state bind_state; 403 bool enabled; 404 u32 status_wait; 405 u64 sync_ptr; 406 u32 sync_value; 407 u32 sb_status; 408 u32 blocked_reason; 409 u64 trace_buffer_base; 410 u64 trace_offset_ptr; 411 u32 trace_buffer_size; 412 u32 trace_cfg; 413 struct kbase_csf_notification error; 414 struct work_struct cs_error_work; 415 u64 cs_error_info; 416 u32 cs_error; 417 bool cs_error_fatal; 418 atomic_t pending; 419 u64 extract_ofs; 420 #if IS_ENABLED(CONFIG_DEBUG_FS) 421 u64 saved_cmd_ptr; 422 #endif /* CONFIG_DEBUG_FS */ 423 }; 424 425 /** 426 * struct kbase_normal_suspend_buffer - Object representing a normal 427 * suspend buffer for queue group. 428 * @gpu_va: The start GPU VA address of the bound suspend buffer. Note, this 429 * field is only valid when the owner group has a region bound at 430 * runtime. 431 * @phy: Array of physical memory pages allocated for the normal- 432 * mode suspend buffer. 433 */ 434 struct kbase_normal_suspend_buffer { 435 u64 gpu_va; 436 struct tagged_addr *phy; 437 }; 438 439 /** 440 * struct kbase_protected_suspend_buffer - Object representing a protected 441 * suspend buffer for queue group. 442 * @gpu_va: The start GPU VA address of the bound protected mode suspend buffer. 443 * Note, this field is only valid when the owner group has a region 444 * bound at runtime. 445 * @pma: Array of pointer to protected mode allocations containing 446 * information about memory pages allocated for protected mode 447 * suspend buffer. 448 * @alloc_retries: Number of times we retried allocing physical pages 449 * for protected suspend buffers. 450 */ 451 struct kbase_protected_suspend_buffer { 452 u64 gpu_va; 453 struct protected_memory_allocation **pma; 454 u8 alloc_retries; 455 }; 456 457 /** 458 * struct kbase_queue_group - Object representing a GPU command queue group. 459 * 460 * @kctx: Pointer to the kbase context with which this queue group 461 * is associated. 462 * @normal_suspend_buf: Object representing the normal suspend buffer. 463 * Normal-mode suspend buffer that is used for 464 * group context switch. 465 * @protected_suspend_buf: Object representing the protected suspend 466 * buffer. Protected-mode suspend buffer that is 467 * used for group context switch. 468 * @handle: Handle which identifies this queue group. 469 * @csg_nr: Number/index of the CSG to which this queue group is 470 * mapped; KBASEP_CSG_NR_INVALID indicates that the queue 471 * group is not scheduled. 472 * @priority: Priority of the queue group, 0 being the highest, 473 * BASE_QUEUE_GROUP_PRIORITY_COUNT - 1 being the lowest. 474 * @tiler_max: Maximum number of tiler endpoints the group is allowed 475 * to use. 476 * @fragment_max: Maximum number of fragment endpoints the group is 477 * allowed to use. 478 * @compute_max: Maximum number of compute endpoints the group is 479 * allowed to use. 480 * @csi_handlers: Requested CSI exception handler flags for the group. 481 * @tiler_mask: Mask of tiler endpoints the group is allowed to use. 482 * @fragment_mask: Mask of fragment endpoints the group is allowed to use. 483 * @compute_mask: Mask of compute endpoints the group is allowed to use. 484 * @group_uid: 32-bit wide unsigned identifier for the group, unique 485 * across all kbase devices and contexts. 486 * @link: Link to this queue group in the 'runnable_groups' list of 487 * the corresponding kctx. 488 * @link_to_schedule: Link to this queue group in the list of prepared groups 489 * to be scheduled, if the group is runnable/suspended. 490 * If the group is idle or waiting for CQS, it would be a 491 * link to the list of idle/blocked groups list. 492 * @run_state: Current state of the queue group. 493 * @prepared_seq_num: Indicates the position of queue group in the list of 494 * prepared groups to be scheduled. 495 * @scan_seq_num: Scan out sequence number before adjusting for dynamic 496 * idle conditions. It is used for setting a group's 497 * onslot priority. It could differ from prepared_seq_number 498 * when there are idle groups. 499 * @faulted: Indicates that a GPU fault occurred for the queue group. 500 * This flag persists until the fault has been queued to be 501 * reported to userspace. 502 * @cs_unrecoverable: Flag to unblock the thread waiting for CSG termination in 503 * case of CS_FATAL_EXCEPTION_TYPE_CS_UNRECOVERABLE 504 * @reevaluate_idle_status : Flag set when work is submitted for the normal group 505 * or it becomes unblocked during protected mode. The 506 * flag helps Scheduler confirm if the group actually 507 * became non idle or not. 508 * @bound_queues: Array of registered queues bound to this queue group. 509 * @doorbell_nr: Index of the hardware doorbell page assigned to the 510 * group. 511 * @protm_event_work: Work item corresponding to the protected mode entry 512 * event for this queue. 513 * @protm_pending_bitmap: Bit array to keep a track of CSs that 514 * have pending protected mode entry requests. 515 * @error_fatal: An error of type BASE_GPU_QUEUE_GROUP_ERROR_FATAL to be 516 * returned to userspace if such an error has occurred. 517 * @error_timeout: An error of type BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT 518 * to be returned to userspace if such an error has occurred. 519 * @error_tiler_oom: An error of type BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM 520 * to be returned to userspace if such an error has occurred. 521 * @timer_event_work: Work item to handle the progress timeout fatal event 522 * for the group. 523 * @deschedule_deferred_cnt: Counter keeping a track of the number of threads 524 * that tried to deschedule the group and had to defer 525 * the descheduling due to the dump on fault. 526 * @csg_reg: An opaque pointer to the runtime bound shared regions. It is 527 * dynamically managed by the scheduler and can be NULL if the 528 * group is off-slot. 529 * @csg_reg_bind_retries: Runtime MCU shared region map operation attempted counts. 530 * It is accumulated on consecutive mapping attempt failures. On 531 * reaching a preset limit, the group is regarded as suffered 532 * a fatal error and triggers a fatal error notification. 533 */ 534 struct kbase_queue_group { 535 struct kbase_context *kctx; 536 struct kbase_normal_suspend_buffer normal_suspend_buf; 537 struct kbase_protected_suspend_buffer protected_suspend_buf; 538 u8 handle; 539 s8 csg_nr; 540 u8 priority; 541 542 u8 tiler_max; 543 u8 fragment_max; 544 u8 compute_max; 545 u8 csi_handlers; 546 547 u64 tiler_mask; 548 u64 fragment_mask; 549 u64 compute_mask; 550 551 u32 group_uid; 552 553 struct list_head link; 554 struct list_head link_to_schedule; 555 enum kbase_csf_group_state run_state; 556 u32 prepared_seq_num; 557 u32 scan_seq_num; 558 bool faulted; 559 bool cs_unrecoverable; 560 bool reevaluate_idle_status; 561 562 struct kbase_queue *bound_queues[MAX_SUPPORTED_STREAMS_PER_GROUP]; 563 564 int doorbell_nr; 565 struct work_struct protm_event_work; 566 DECLARE_BITMAP(protm_pending_bitmap, MAX_SUPPORTED_STREAMS_PER_GROUP); 567 568 struct kbase_csf_notification error_fatal; 569 struct kbase_csf_notification error_timeout; 570 struct kbase_csf_notification error_tiler_oom; 571 572 struct work_struct timer_event_work; 573 574 /** 575 * @dvs_buf: Address and size of scratch memory. 576 * 577 * Used to store intermediate DVS data by the GPU. 578 */ 579 u64 dvs_buf; 580 #if IS_ENABLED(CONFIG_DEBUG_FS) 581 u32 deschedule_deferred_cnt; 582 #endif 583 void *csg_reg; 584 u8 csg_reg_bind_retries; 585 }; 586 587 /** 588 * struct kbase_csf_kcpu_queue_context - Object representing the kernel CPU 589 * queues for a GPU address space. 590 * 591 * @lock: Lock preventing concurrent access to @array and the @in_use bitmap. 592 * @array: Array of pointers to kernel CPU command queues. 593 * @in_use: Bitmap which indicates which kernel CPU command queues are in use. 594 * @cmd_seq_num: The sequence number assigned to an enqueued command, 595 * in incrementing order (older commands shall have a 596 * smaller number). 597 * @jit_lock: Lock to serialise JIT operations. 598 * @jit_cmds_head: A list of the just-in-time memory commands, both 599 * allocate & free, in submission order, protected 600 * by kbase_csf_kcpu_queue_context.lock. 601 * @jit_blocked_queues: A list of KCPU command queues blocked by a pending 602 * just-in-time memory allocation command which will be 603 * reattempted after the impending free of other active 604 * allocations. 605 */ 606 struct kbase_csf_kcpu_queue_context { 607 struct mutex lock; 608 struct kbase_kcpu_command_queue *array[KBASEP_MAX_KCPU_QUEUES]; 609 DECLARE_BITMAP(in_use, KBASEP_MAX_KCPU_QUEUES); 610 atomic64_t cmd_seq_num; 611 612 struct mutex jit_lock; 613 struct list_head jit_cmds_head; 614 struct list_head jit_blocked_queues; 615 }; 616 617 /** 618 * struct kbase_csf_cpu_queue_context - Object representing the cpu queue 619 * information. 620 * 621 * @buffer: Buffer containing CPU queue information provided by Userspace. 622 * @buffer_size: The size of @buffer. 623 * @dump_req_status: Indicates the current status for CPU queues dump request. 624 * @dump_cmp: Dumping cpu queue completion event. 625 */ 626 struct kbase_csf_cpu_queue_context { 627 char *buffer; 628 size_t buffer_size; 629 atomic_t dump_req_status; 630 struct completion dump_cmp; 631 }; 632 633 /** 634 * struct kbase_csf_heap_context_allocator - Allocator of heap contexts 635 * 636 * @kctx: Pointer to the kbase context with which this allocator is 637 * associated. 638 * @region: Pointer to a GPU memory region from which heap context structures 639 * are allocated. NULL if no heap contexts have been allocated. 640 * @gpu_va: GPU virtual address of the start of the region from which heap 641 * context structures are allocated. 0 if no heap contexts have been 642 * allocated. 643 * @lock: Lock preventing concurrent access to the @in_use bitmap. 644 * @in_use: Bitmap that indicates which heap context structures are currently 645 * allocated (in @region). 646 * @heap_context_size_aligned: Size of a heap context structure, in bytes, 647 * aligned to GPU cacheline size. 648 * 649 * Heap context structures are allocated by the kernel for use by the firmware. 650 * The current implementation subdivides a single GPU memory region for use as 651 * a sparse array. 652 */ 653 struct kbase_csf_heap_context_allocator { 654 struct kbase_context *kctx; 655 struct kbase_va_region *region; 656 u64 gpu_va; 657 struct mutex lock; 658 DECLARE_BITMAP(in_use, MAX_TILER_HEAPS); 659 u32 heap_context_size_aligned; 660 }; 661 662 /** 663 * struct kbase_csf_tiler_heap_context - Object representing the tiler heaps 664 * context for a GPU address space. 665 * 666 * @lock: Lock to prevent the concurrent access to tiler heaps (after the 667 * initialization), a tiler heap can be terminated whilst an OoM 668 * event is being handled for it. 669 * @list: List of tiler heaps. 670 * @ctx_alloc: Allocator for heap context structures. 671 * @nr_of_heaps: Total number of tiler heaps that were added during the 672 * life time of the context. 673 * 674 * This contains all of the CSF state relating to chunked tiler heaps for one 675 * @kbase_context. It is not the same as a heap context structure allocated by 676 * the kernel for use by the firmware. 677 */ 678 struct kbase_csf_tiler_heap_context { 679 struct mutex lock; 680 struct list_head list; 681 struct kbase_csf_heap_context_allocator ctx_alloc; 682 u64 nr_of_heaps; 683 }; 684 685 /** 686 * struct kbase_csf_ctx_heap_reclaim_info - Object representing the data section of 687 * a kctx for tiler heap reclaim manger 688 * @mgr_link: Link for hooking up to the heap reclaim manger's kctx lists 689 * @nr_freed_pages: Number of freed pages from the the kctx, after its attachment 690 * to the reclaim manager. This is used for tracking reclaim's 691 * free operation progress. 692 * @nr_est_unused_pages: Estimated number of pages that could be freed for the kctx 693 * when all its CSGs are off-slot, on attaching to the reclaim 694 * manager. 695 * @on_slot_grps: Number of on-slot groups from this kctx. In principle, if a 696 * kctx has groups on-slot, the scheduler will detach it from 697 * the tiler heap reclaim manager, i.e. no tiler heap memory 698 * reclaiming operations on the kctx. 699 */ 700 struct kbase_csf_ctx_heap_reclaim_info { 701 struct list_head mgr_link; 702 u32 nr_freed_pages; 703 u32 nr_est_unused_pages; 704 u8 on_slot_grps; 705 }; 706 707 /** 708 * struct kbase_csf_scheduler_context - Object representing the scheduler's 709 * context for a GPU address space. 710 * 711 * @runnable_groups: Lists of runnable GPU command queue groups in the kctx, 712 * one per queue group relative-priority level. 713 * @num_runnable_grps: Total number of runnable groups across all priority 714 * levels in @runnable_groups. 715 * @idle_wait_groups: A list of GPU command queue groups in which all enabled 716 * GPU command queues are idle and at least one of them 717 * is blocked on a sync wait operation. 718 * @num_idle_wait_grps: Length of the @idle_wait_groups list. 719 * @sync_update_wq: Dedicated workqueue to process work items corresponding 720 * to the sync_update events by sync_set/sync_add 721 * instruction execution on CSs bound to groups 722 * of @idle_wait_groups list. 723 * @sync_update_work: work item to process the sync_update events by 724 * sync_set / sync_add instruction execution on command 725 * streams bound to groups of @idle_wait_groups list. 726 * @ngrp_to_schedule: Number of groups added for the context to the 727 * 'groups_to_schedule' list of scheduler instance. 728 * @heap_info: Heap reclaim information data of the kctx. As the 729 * reclaim action needs to be coordinated with the scheduler 730 * operations, any manipulations on the data needs holding 731 * the scheduler's mutex lock. 732 */ 733 struct kbase_csf_scheduler_context { 734 struct list_head runnable_groups[KBASE_QUEUE_GROUP_PRIORITY_COUNT]; 735 u32 num_runnable_grps; 736 struct list_head idle_wait_groups; 737 u32 num_idle_wait_grps; 738 struct workqueue_struct *sync_update_wq; 739 struct work_struct sync_update_work; 740 u32 ngrp_to_schedule; 741 struct kbase_csf_ctx_heap_reclaim_info heap_info; 742 }; 743 744 /** 745 * enum kbase_csf_event_callback_action - return type for CSF event callbacks. 746 * 747 * @KBASE_CSF_EVENT_CALLBACK_FIRST: Never set explicitly. 748 * It doesn't correspond to any action or type of event callback. 749 * 750 * @KBASE_CSF_EVENT_CALLBACK_KEEP: The callback will remain registered. 751 * 752 * @KBASE_CSF_EVENT_CALLBACK_REMOVE: The callback will be removed 753 * immediately upon return. 754 * 755 * @KBASE_CSF_EVENT_CALLBACK_LAST: Never set explicitly. 756 * It doesn't correspond to any action or type of event callback. 757 */ 758 enum kbase_csf_event_callback_action { 759 KBASE_CSF_EVENT_CALLBACK_FIRST = 0, 760 KBASE_CSF_EVENT_CALLBACK_KEEP, 761 KBASE_CSF_EVENT_CALLBACK_REMOVE, 762 KBASE_CSF_EVENT_CALLBACK_LAST, 763 }; 764 765 /** 766 * struct kbase_csf_event - Object representing CSF event and error 767 * 768 * @callback_list: List of callbacks which are registered to serve CSF 769 * events. 770 * @error_list: List for CS fatal errors in CSF context. 771 * Link of fatal error is &struct_kbase_csf_notification.link. 772 * @lock: Lock protecting access to @callback_list and 773 * @error_list. 774 */ 775 struct kbase_csf_event { 776 struct list_head callback_list; 777 struct list_head error_list; 778 spinlock_t lock; 779 }; 780 781 /** 782 * struct kbase_csf_user_reg_context - Object containing members to manage the mapping 783 * of USER Register page for a context. 784 * 785 * @vma: Pointer to the VMA corresponding to the virtual mapping 786 * of the USER register page. 787 * @file_offset: File offset value that is assigned to userspace mapping 788 * of the USER Register page. It is in page units. 789 * @link: Links the context to the device list when mapping is pointing to 790 * either the dummy or the real Register page. 791 */ 792 struct kbase_csf_user_reg_context { 793 struct vm_area_struct *vma; 794 u32 file_offset; 795 struct list_head link; 796 }; 797 798 /** 799 * struct kbase_csf_context - Object representing CSF for a GPU address space. 800 * 801 * @event_pages_head: A list of pages allocated for the event memory used by 802 * the synchronization objects. A separate list would help 803 * in the fast lookup, since the list is expected to be short 804 * as one page would provide the memory for up to 1K 805 * synchronization objects. 806 * KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES is the upper 807 * bound on the size of event memory. 808 * @cookies: Bitmask containing of KBASE_CSF_NUM_USER_IO_PAGES_HANDLE 809 * bits, used for creating the User mode CPU mapping in a 810 * deferred manner of a pair of User mode input/output pages 811 * & a hardware doorbell page. 812 * The pages are allocated when a GPU command queue is 813 * bound to a CSG in kbase_csf_queue_bind. 814 * This helps returning unique handles to Userspace from 815 * kbase_csf_queue_bind and later retrieving the pointer to 816 * queue in the mmap handler. 817 * @user_pages_info: Array containing pointers to queue 818 * structures, used in conjunction with cookies bitmask for 819 * providing a mechansim to create a CPU mapping of 820 * input/output pages & hardware doorbell page. 821 * @lock: Serializes accesses to all members, except for ones that 822 * have their own locks. 823 * @queue_groups: Array of registered GPU command queue groups. 824 * @queue_list: Linked list of GPU command queues not yet deregistered. 825 * Note that queues can persist after deregistration if the 826 * userspace mapping created for them on bind operation 827 * hasn't been removed. 828 * @kcpu_queues: Kernel CPU command queues. 829 * @event: CSF event object. 830 * @tiler_heaps: Chunked tiler memory heaps. 831 * @wq: Dedicated workqueue to process work items corresponding 832 * to the OoM events raised for chunked tiler heaps being 833 * used by GPU command queues, and progress timeout events. 834 * @link: Link to this csf context in the 'runnable_kctxs' list of 835 * the scheduler instance 836 * @sched: Object representing the scheduler's context 837 * @pending_submission_work: Work item to process pending kicked GPU command queues. 838 * @cpu_queue: CPU queue information. Only be available when DEBUG_FS 839 * is enabled. 840 * @user_reg: Collective information to support mapping to USER Register page. 841 */ 842 struct kbase_csf_context { 843 struct list_head event_pages_head; 844 DECLARE_BITMAP(cookies, KBASE_CSF_NUM_USER_IO_PAGES_HANDLE); 845 struct kbase_queue *user_pages_info[ 846 KBASE_CSF_NUM_USER_IO_PAGES_HANDLE]; 847 struct mutex lock; 848 struct kbase_queue_group *queue_groups[MAX_QUEUE_GROUP_NUM]; 849 struct list_head queue_list; 850 struct kbase_csf_kcpu_queue_context kcpu_queues; 851 struct kbase_csf_event event; 852 struct kbase_csf_tiler_heap_context tiler_heaps; 853 struct workqueue_struct *wq; 854 struct list_head link; 855 struct kbase_csf_scheduler_context sched; 856 struct work_struct pending_submission_work; 857 #if IS_ENABLED(CONFIG_DEBUG_FS) 858 struct kbase_csf_cpu_queue_context cpu_queue; 859 #endif 860 struct kbase_csf_user_reg_context user_reg; 861 }; 862 863 /** 864 * struct kbase_csf_reset_gpu - Object containing the members required for 865 * GPU reset handling. 866 * @workq: Workqueue to execute the GPU reset work item @work. 867 * @work: Work item for performing the GPU reset. 868 * @wait: Wait queue used to wait for the GPU reset completion. 869 * @sem: RW Semaphore to ensure no other thread attempts to use the 870 * GPU whilst a reset is in process. Unlike traditional 871 * semaphores and wait queues, this allows Linux's lockdep 872 * mechanism to check for deadlocks involving reset waits. 873 * @state: Tracks if the GPU reset is in progress or not. 874 * The state is represented by enum @kbase_csf_reset_gpu_state. 875 */ 876 struct kbase_csf_reset_gpu { 877 struct workqueue_struct *workq; 878 struct work_struct work; 879 wait_queue_head_t wait; 880 struct rw_semaphore sem; 881 atomic_t state; 882 }; 883 884 /** 885 * struct kbase_csf_csg_slot - Object containing members for tracking the state 886 * of CSG slots. 887 * @resident_group: pointer to the queue group that is resident on the CSG slot. 888 * @state: state of the slot as per enum @kbase_csf_csg_slot_state. 889 * @trigger_jiffies: value of jiffies when change in slot state is recorded. 890 * @priority: dynamic priority assigned to CSG slot. 891 */ 892 struct kbase_csf_csg_slot { 893 struct kbase_queue_group *resident_group; 894 atomic_t state; 895 unsigned long trigger_jiffies; 896 u8 priority; 897 }; 898 899 /** 900 * struct kbase_csf_sched_heap_reclaim_mgr - Object for managing tiler heap reclaim 901 * kctx lists inside the CSF device's scheduler. 902 * 903 * @heap_reclaim: Tiler heap reclaim shrinker object. 904 * @ctx_lists: Array of kctx lists, size matching CSG defined priorities. The 905 * lists track the kctxs attached to the reclaim manager. 906 * @unused_pages: Estimated number of unused pages from the @ctxlist array. The 907 * number is indicative for use with reclaim shrinker's count method. 908 */ 909 struct kbase_csf_sched_heap_reclaim_mgr { 910 struct shrinker heap_reclaim; 911 struct list_head ctx_lists[KBASE_QUEUE_GROUP_PRIORITY_COUNT]; 912 atomic_t unused_pages; 913 }; 914 915 /** 916 * struct kbase_csf_mcu_shared_regions - Control data for managing the MCU shared 917 * interface segment regions for scheduler 918 * operations 919 * 920 * @array_csg_regs: Base pointer of an internally created array_csg_regs[]. 921 * @unused_csg_regs: List contains unused csg_regs items. When an item is bound to a 922 * group that is placed onto on-slot by the scheduler, it is dropped 923 * from the list (i.e busy active). The Scheduler will put an active 924 * item back when it's becoming off-slot (not in use). 925 * @dummy_phys: An array of dummy phys[nr_susp_pages] pages for use with normal 926 * and pmode suspend buffers, as a default replacement of a CSG's pages 927 * for the MMU mapping when the csg_reg is not bound to a group. 928 * @pma_phys: Pre-allocated array phy[nr_susp_pages] for transitional use with 929 * protected suspend buffer MMU map operations. 930 * @userio_mem_rd_flags: Userio input page's read access mapping configuration flags. 931 * @dummy_phys_allocated: Indicating the @p dummy_phy page is allocated when true. 932 */ 933 struct kbase_csf_mcu_shared_regions { 934 void *array_csg_regs; 935 struct list_head unused_csg_regs; 936 struct tagged_addr *dummy_phys; 937 struct tagged_addr *pma_phys; 938 unsigned long userio_mem_rd_flags; 939 bool dummy_phys_allocated; 940 }; 941 942 /** 943 * struct kbase_csf_scheduler - Object representing the scheduler used for 944 * CSF for an instance of GPU platform device. 945 * @lock: Lock to serialize the scheduler operations and 946 * access to the data members. 947 * @interrupt_lock: Lock to protect members accessed by interrupt 948 * handler. 949 * @state: The operational phase the scheduler is in. Primarily 950 * used for indicating what in-cycle schedule actions 951 * are allowed. 952 * @doorbell_inuse_bitmap: Bitmap of hardware doorbell pages keeping track of 953 * which pages are currently available for assignment 954 * to clients. 955 * @csg_inuse_bitmap: Bitmap to keep a track of CSG slots 956 * that are currently in use. 957 * @csg_slots: The array for tracking the state of CS 958 * group slots. 959 * @runnable_kctxs: List of Kbase contexts that have runnable command 960 * queue groups. 961 * @groups_to_schedule: List of runnable queue groups prepared on every 962 * scheduler tick. The dynamic priority of the CSG 963 * slot assigned to a group will depend upon the 964 * position of group in the list. 965 * @ngrp_to_schedule: Number of groups in the @groups_to_schedule list, 966 * incremented when a group is added to the list, used 967 * to record the position of group in the list. 968 * @num_active_address_spaces: Number of GPU address space slots that would get 969 * used to program the groups in @groups_to_schedule 970 * list on all the available CSG 971 * slots. 972 * @num_csg_slots_for_tick: Number of CSG slots that can be 973 * active in the given tick/tock. This depends on the 974 * value of @num_active_address_spaces. 975 * @remaining_tick_slots: Tracking the number of remaining available slots 976 * for @num_csg_slots_for_tick during the scheduling 977 * operation in a tick/tock. 978 * @idle_groups_to_schedule: List of runnable queue groups, in which all GPU 979 * command queues became idle or are waiting for 980 * synchronization object, prepared on every 981 * scheduler tick. The groups in this list are 982 * appended to the tail of @groups_to_schedule list 983 * after the scan out so that the idle groups aren't 984 * preferred for scheduling over the non-idle ones. 985 * @csg_scan_count_for_tick: CSG scanout count for assign the scan_seq_num for 986 * each scanned out group during scheduling operation 987 * in a tick/tock. 988 * @total_runnable_grps: Total number of runnable groups across all KCTXs. 989 * @csgs_events_enable_mask: Use for temporary masking off asynchronous events 990 * from firmware (such as OoM events) before a group 991 * is suspended. 992 * @csg_slots_idle_mask: Bit array for storing the mask of CS 993 * group slots for which idle notification was 994 * received. 995 * @csg_slots_prio_update: Bit array for tracking slots that have an on-slot 996 * priority update operation. 997 * @last_schedule: Time in jiffies recorded when the last "tick" or 998 * "tock" schedule operation concluded. Used for 999 * evaluating the exclusion window for in-cycle 1000 * schedule operation. 1001 * @timer_enabled: Whether the CSF scheduler wakes itself up for 1002 * periodic scheduling tasks. If this value is 0 1003 * then it will only perform scheduling under the 1004 * influence of external factors e.g., IRQs, IOCTLs. 1005 * @wq: Dedicated workqueue to execute the @tick_work. 1006 * @tick_timer: High-resolution timer employed to schedule tick 1007 * workqueue items (kernel-provided delayed_work 1008 * items do not use hrtimer and for some reason do 1009 * not provide sufficiently reliable periodicity). 1010 * @tick_work: Work item that performs the "schedule on tick" 1011 * operation to implement timeslice-based scheduling. 1012 * @tock_work: Work item that would perform the schedule on tock 1013 * operation to implement the asynchronous scheduling. 1014 * @pending_tock_work: Indicates that the tock work item should re-execute 1015 * once it's finished instead of going back to sleep. 1016 * @ping_work: Work item that would ping the firmware at regular 1017 * intervals, only if there is a single active CSG 1018 * slot, to check if firmware is alive and would 1019 * initiate a reset if the ping request isn't 1020 * acknowledged. 1021 * @top_ctx: Pointer to the Kbase context corresponding to the 1022 * @top_grp. 1023 * @top_grp: Pointer to queue group inside @groups_to_schedule 1024 * list that was assigned the highest slot priority. 1025 * @active_protm_grp: Indicates if firmware has been permitted to let GPU 1026 * enter protected mode with the given group. On exit 1027 * from protected mode the pointer is reset to NULL. 1028 * This pointer is set and PROTM_ENTER request is sent 1029 * atomically with @interrupt_lock held. 1030 * This pointer being set doesn't necessarily indicates 1031 * that GPU is in protected mode, kbdev->protected_mode 1032 * needs to be checked for that. 1033 * @idle_wq: Workqueue for executing GPU idle notification 1034 * handler. 1035 * @gpu_idle_work: Work item for facilitating the scheduler to bring 1036 * the GPU to a low-power mode on becoming idle. 1037 * @fast_gpu_idle_handling: Indicates whether to relax many of the checks 1038 * normally done in the GPU idle worker. This is 1039 * set to true when handling the GLB IDLE IRQ if the 1040 * idle hysteresis timeout is 0, since it makes it 1041 * possible to receive this IRQ before the extract 1042 * offset is published (which would cause more 1043 * extensive GPU idle checks to fail). 1044 * @gpu_no_longer_idle: Effective only when the GPU idle worker has been 1045 * queued for execution, this indicates whether the 1046 * GPU has become non-idle since the last time the 1047 * idle notification was received. 1048 * @non_idle_offslot_grps: Count of off-slot non-idle groups. Reset during 1049 * the scheduler active phase in a tick. It then 1050 * tracks the count of non-idle groups across all the 1051 * other phases. 1052 * @non_idle_scanout_grps: Count on the non-idle groups in the scan-out 1053 * list at the scheduling prepare stage. 1054 * @pm_active_count: Count indicating if the scheduler is owning a power 1055 * management reference count. Reference is taken when 1056 * the count becomes 1 and is dropped when the count 1057 * becomes 0. It is used to enable the power up of MCU 1058 * after GPU and L2 cache have been powered up. So when 1059 * this count is zero, MCU will not be powered up. 1060 * @csg_scheduling_period_ms: Duration of Scheduling tick in milliseconds. 1061 * @tick_timer_active: Indicates whether the @tick_timer is effectively 1062 * active or not, as the callback function of 1063 * @tick_timer will enqueue @tick_work only if this 1064 * flag is true. This is mainly useful for the case 1065 * when scheduling tick needs to be advanced from 1066 * interrupt context, without actually deactivating 1067 * the @tick_timer first and then enqueing @tick_work. 1068 * @tick_protm_pending_seq: Scan out sequence number of the group that has 1069 * protected mode execution pending for the queue(s) 1070 * bound to it and will be considered first for the 1071 * protected mode execution compared to other such 1072 * groups. It is updated on every tick/tock. 1073 * @interrupt_lock is used to serialize the access. 1074 * @protm_enter_time: GPU protected mode enter time. 1075 * @reclaim_mgr: CSGs tiler heap manager object. 1076 * @mcu_regs_data: Scheduler MCU shared regions data for managing the 1077 * shared interface mappings for on-slot queues and 1078 * CSG suspend buffers. 1079 */ 1080 struct kbase_csf_scheduler { 1081 struct mutex lock; 1082 spinlock_t interrupt_lock; 1083 enum kbase_csf_scheduler_state state; 1084 DECLARE_BITMAP(doorbell_inuse_bitmap, CSF_NUM_DOORBELL); 1085 DECLARE_BITMAP(csg_inuse_bitmap, MAX_SUPPORTED_CSGS); 1086 struct kbase_csf_csg_slot *csg_slots; 1087 struct list_head runnable_kctxs; 1088 struct list_head groups_to_schedule; 1089 u32 ngrp_to_schedule; 1090 u32 num_active_address_spaces; 1091 u32 num_csg_slots_for_tick; 1092 u32 remaining_tick_slots; 1093 struct list_head idle_groups_to_schedule; 1094 u32 csg_scan_count_for_tick; 1095 u32 total_runnable_grps; 1096 DECLARE_BITMAP(csgs_events_enable_mask, MAX_SUPPORTED_CSGS); 1097 DECLARE_BITMAP(csg_slots_idle_mask, MAX_SUPPORTED_CSGS); 1098 DECLARE_BITMAP(csg_slots_prio_update, MAX_SUPPORTED_CSGS); 1099 unsigned long last_schedule; 1100 bool timer_enabled; 1101 struct workqueue_struct *wq; 1102 struct hrtimer tick_timer; 1103 struct work_struct tick_work; 1104 struct delayed_work tock_work; 1105 atomic_t pending_tock_work; 1106 struct delayed_work ping_work; 1107 struct kbase_context *top_ctx; 1108 struct kbase_queue_group *top_grp; 1109 struct kbase_queue_group *active_protm_grp; 1110 struct workqueue_struct *idle_wq; 1111 struct work_struct gpu_idle_work; 1112 bool fast_gpu_idle_handling; 1113 atomic_t gpu_no_longer_idle; 1114 atomic_t non_idle_offslot_grps; 1115 u32 non_idle_scanout_grps; 1116 u32 pm_active_count; 1117 unsigned int csg_scheduling_period_ms; 1118 bool tick_timer_active; 1119 u32 tick_protm_pending_seq; 1120 ktime_t protm_enter_time; 1121 struct kbase_csf_sched_heap_reclaim_mgr reclaim_mgr; 1122 struct kbase_csf_mcu_shared_regions mcu_regs_data; 1123 }; 1124 1125 /* 1126 * Number of GPU cycles per unit of the global progress timeout. 1127 */ 1128 #define GLB_PROGRESS_TIMER_TIMEOUT_SCALE ((u64)1024) 1129 1130 /* 1131 * Maximum value of the global progress timeout. 1132 */ 1133 #define GLB_PROGRESS_TIMER_TIMEOUT_MAX \ 1134 ((GLB_PROGRESS_TIMER_TIMEOUT_MASK >> \ 1135 GLB_PROGRESS_TIMER_TIMEOUT_SHIFT) * \ 1136 GLB_PROGRESS_TIMER_TIMEOUT_SCALE) 1137 1138 /* 1139 * Default GLB_PWROFF_TIMER_TIMEOUT value in unit of micro-seconds. 1140 */ 1141 #define DEFAULT_GLB_PWROFF_TIMEOUT_US (800) 1142 1143 /* 1144 * In typical operations, the management of the shader core power transitions 1145 * is delegated to the MCU/firmware. However, if the host driver is configured 1146 * to take direct control, one needs to disable the MCU firmware GLB_PWROFF 1147 * timer. 1148 */ 1149 #define DISABLE_GLB_PWROFF_TIMER (0) 1150 1151 /* Index of the GPU_ACTIVE counter within the CSHW counter block */ 1152 #define GPU_ACTIVE_CNT_IDX (4) 1153 1154 /* 1155 * Maximum number of sessions that can be managed by the IPA Control component. 1156 */ 1157 #if MALI_UNIT_TEST 1158 #define KBASE_IPA_CONTROL_MAX_SESSIONS ((size_t)8) 1159 #else 1160 #define KBASE_IPA_CONTROL_MAX_SESSIONS ((size_t)2) 1161 #endif 1162 1163 /** 1164 * enum kbase_ipa_core_type - Type of counter block for performance counters 1165 * 1166 * @KBASE_IPA_CORE_TYPE_CSHW: CS Hardware counters. 1167 * @KBASE_IPA_CORE_TYPE_MEMSYS: Memory System counters. 1168 * @KBASE_IPA_CORE_TYPE_TILER: Tiler counters. 1169 * @KBASE_IPA_CORE_TYPE_SHADER: Shader Core counters. 1170 * @KBASE_IPA_CORE_TYPE_NUM: Number of core types. 1171 */ 1172 enum kbase_ipa_core_type { 1173 KBASE_IPA_CORE_TYPE_CSHW = 0, 1174 KBASE_IPA_CORE_TYPE_MEMSYS, 1175 KBASE_IPA_CORE_TYPE_TILER, 1176 KBASE_IPA_CORE_TYPE_SHADER, 1177 KBASE_IPA_CORE_TYPE_NUM 1178 }; 1179 1180 /* 1181 * Number of configurable counters per type of block on the IPA Control 1182 * interface. 1183 */ 1184 #define KBASE_IPA_CONTROL_NUM_BLOCK_COUNTERS ((size_t)8) 1185 1186 /* 1187 * Total number of configurable counters existing on the IPA Control interface. 1188 */ 1189 #define KBASE_IPA_CONTROL_MAX_COUNTERS \ 1190 ((size_t)KBASE_IPA_CORE_TYPE_NUM * KBASE_IPA_CONTROL_NUM_BLOCK_COUNTERS) 1191 1192 /** 1193 * struct kbase_ipa_control_prfcnt - Session for a single performance counter 1194 * 1195 * @latest_raw_value: Latest raw value read from the counter. 1196 * @scaling_factor: Factor raw value shall be multiplied by. 1197 * @accumulated_diff: Partial sum of scaled and normalized values from 1198 * previous samples. This represent all the values 1199 * that were read before the latest raw value. 1200 * @type: Type of counter block for performance counter. 1201 * @select_idx: Index of the performance counter as configured on 1202 * the IPA Control interface. 1203 * @gpu_norm: Indicating whether values shall be normalized by 1204 * GPU frequency. If true, returned values represent 1205 * an interval of time expressed in seconds (when the 1206 * scaling factor is set to 1). 1207 */ 1208 struct kbase_ipa_control_prfcnt { 1209 u64 latest_raw_value; 1210 u64 scaling_factor; 1211 u64 accumulated_diff; 1212 enum kbase_ipa_core_type type; 1213 u8 select_idx; 1214 bool gpu_norm; 1215 }; 1216 1217 /** 1218 * struct kbase_ipa_control_session - Session for an IPA Control client 1219 * 1220 * @prfcnts: Sessions for individual performance counters. 1221 * @num_prfcnts: Number of performance counters. 1222 * @active: Indicates whether this slot is in use or not 1223 * @last_query_time: Time of last query, in ns 1224 * @protm_time: Amount of time (in ns) that GPU has been in protected 1225 */ 1226 struct kbase_ipa_control_session { 1227 struct kbase_ipa_control_prfcnt prfcnts[KBASE_IPA_CONTROL_MAX_COUNTERS]; 1228 size_t num_prfcnts; 1229 bool active; 1230 u64 last_query_time; 1231 u64 protm_time; 1232 }; 1233 1234 /** 1235 * struct kbase_ipa_control_prfcnt_config - Performance counter configuration 1236 * 1237 * @idx: Index of the performance counter inside the block, as specified 1238 * in the GPU architecture. 1239 * @refcount: Number of client sessions bound to this counter. 1240 * 1241 * This structure represents one configurable performance counter of 1242 * the IPA Control interface. The entry may be mapped to a specific counter 1243 * by one or more client sessions. The counter is considered to be unused 1244 * if it isn't part of any client session. 1245 */ 1246 struct kbase_ipa_control_prfcnt_config { 1247 u8 idx; 1248 u8 refcount; 1249 }; 1250 1251 /** 1252 * struct kbase_ipa_control_prfcnt_block - Block of performance counters 1253 * 1254 * @select: Current performance counter configuration. 1255 * @num_available_counters: Number of counters that are not already configured. 1256 * 1257 */ 1258 struct kbase_ipa_control_prfcnt_block { 1259 struct kbase_ipa_control_prfcnt_config select[KBASE_IPA_CONTROL_NUM_BLOCK_COUNTERS]; 1260 size_t num_available_counters; 1261 }; 1262 1263 /** 1264 * struct kbase_ipa_control - Manager of the IPA Control interface. 1265 * 1266 * @blocks: Current configuration of performance counters 1267 * for the IPA Control interface. 1268 * @sessions: State of client sessions, storing information 1269 * like performance counters the client subscribed to 1270 * and latest value read from each counter. 1271 * @lock: Spinlock to serialize access by concurrent clients. 1272 * @rtm_listener_data: Private data for allocating a GPU frequency change 1273 * listener. 1274 * @num_active_sessions: Number of sessions opened by clients. 1275 * @cur_gpu_rate: Current GPU top-level operating frequency, in Hz. 1276 * @rtm_listener_data: Private data for allocating a GPU frequency change 1277 * listener. 1278 * @protm_start: Time (in ns) at which the GPU entered protected mode 1279 */ 1280 struct kbase_ipa_control { 1281 struct kbase_ipa_control_prfcnt_block blocks[KBASE_IPA_CORE_TYPE_NUM]; 1282 struct kbase_ipa_control_session sessions[KBASE_IPA_CONTROL_MAX_SESSIONS]; 1283 spinlock_t lock; 1284 void *rtm_listener_data; 1285 size_t num_active_sessions; 1286 u32 cur_gpu_rate; 1287 u64 protm_start; 1288 }; 1289 1290 /** 1291 * struct kbase_csf_firmware_interface - Interface in the MCU firmware 1292 * 1293 * @node: Interface objects are on the kbase_device:csf.firmware_interfaces 1294 * list using this list_head to link them 1295 * @phys: Array of the physical (tagged) addresses making up this interface 1296 * @reuse_pages: Flag used to identify if the FW interface entry reuses 1297 * physical pages allocated for another FW interface entry. 1298 * @is_small_page: Flag used to identify if small pages are used for 1299 * the FW interface entry. 1300 * @name: NULL-terminated string naming the interface 1301 * @num_pages: Number of entries in @phys and @pma (and length of the interface) 1302 * @num_pages_aligned: Same as @num_pages except for the case when @is_small_page 1303 * is false and @reuse_pages is false and therefore will be 1304 * aligned to NUM_4K_PAGES_IN_2MB_PAGE. 1305 * @virtual: Starting GPU virtual address this interface is mapped at 1306 * @flags: bitmask of CSF_FIRMWARE_ENTRY_* conveying the interface attributes 1307 * @data_start: Offset into firmware image at which the interface data starts 1308 * @data_end: Offset into firmware image at which the interface data ends 1309 * @virtual_exe_start: Starting GPU execution virtual address of this interface 1310 * @kernel_map: A kernel mapping of the memory or NULL if not required to be 1311 * mapped in the kernel 1312 * @pma: Array of pointers to protected memory allocations. 1313 */ 1314 struct kbase_csf_firmware_interface { 1315 struct list_head node; 1316 struct tagged_addr *phys; 1317 bool reuse_pages; 1318 bool is_small_page; 1319 char *name; 1320 u32 num_pages; 1321 u32 num_pages_aligned; 1322 u32 virtual; 1323 u32 flags; 1324 u32 data_start; 1325 u32 data_end; 1326 u32 virtual_exe_start; 1327 void *kernel_map; 1328 struct protected_memory_allocation **pma; 1329 }; 1330 1331 /* 1332 * struct kbase_csf_hwcnt - Object containing members for handling the dump of 1333 * HW counters. 1334 * 1335 * @request_pending: Flag set when HWC requested and used for HWC sample 1336 * done interrupt. 1337 * @enable_pending: Flag set when HWC enable status change and used for 1338 * enable done interrupt. 1339 */ 1340 struct kbase_csf_hwcnt { 1341 bool request_pending; 1342 bool enable_pending; 1343 }; 1344 1345 /* 1346 * struct kbase_csf_mcu_fw - Object containing device loaded MCU firmware data. 1347 * 1348 * @size: Loaded firmware data size. Meaningful only when the 1349 * other field @p data is not NULL. 1350 * @data: Pointer to the device retained firmware data. If NULL 1351 * means not loaded yet or error in loading stage. 1352 */ 1353 struct kbase_csf_mcu_fw { 1354 size_t size; 1355 u8 *data; 1356 }; 1357 1358 /* 1359 * Firmware log polling period. 1360 */ 1361 #define KBASE_CSF_FIRMWARE_LOG_POLL_PERIOD_MS 25 1362 1363 /** 1364 * enum kbase_csf_firmware_log_mode - Firmware log operating mode 1365 * 1366 * @KBASE_CSF_FIRMWARE_LOG_MODE_MANUAL: Manual mode, firmware log can be read 1367 * manually by the userspace (and it will also be dumped automatically into 1368 * dmesg on GPU reset). 1369 * 1370 * @KBASE_CSF_FIRMWARE_LOG_MODE_AUTO_PRINT: Automatic printing mode, firmware log 1371 * will be periodically emptied into dmesg, manual reading through debugfs is 1372 * disabled. 1373 */ 1374 enum kbase_csf_firmware_log_mode { 1375 KBASE_CSF_FIRMWARE_LOG_MODE_MANUAL, 1376 KBASE_CSF_FIRMWARE_LOG_MODE_AUTO_PRINT 1377 }; 1378 1379 /** 1380 * struct kbase_csf_firmware_log - Object containing members for handling firmware log. 1381 * 1382 * @mode: Firmware log operating mode. 1383 * @busy: Indicating whether a firmware log operation is in progress. 1384 * @poll_work: Work item that would poll firmware log buffer 1385 * at regular intervals to perform any periodic 1386 * activities required by current log mode. 1387 * @dump_buf: Buffer used for dumping the log. 1388 * @func_call_list_va_start: Virtual address of the start of the call list of FW log functions. 1389 * @func_call_list_va_end: Virtual address of the end of the call list of FW log functions. 1390 */ 1391 struct kbase_csf_firmware_log { 1392 enum kbase_csf_firmware_log_mode mode; 1393 atomic_t busy; 1394 struct delayed_work poll_work; 1395 u8 *dump_buf; 1396 u32 func_call_list_va_start; 1397 u32 func_call_list_va_end; 1398 }; 1399 1400 /** 1401 * struct kbase_csf_firmware_core_dump - Object containing members for handling 1402 * firmware core dump. 1403 * 1404 * @mcu_regs_addr: GPU virtual address of the start of the MCU registers buffer 1405 * in Firmware. 1406 * @version: Version of the FW image header core dump data format. Bits 1407 * 7:0 specify version minor and 15:8 specify version major. 1408 * @available: Flag to identify if the FW core dump buffer is available. 1409 * True if entry is available in the FW image header and version 1410 * is supported, False otherwise. 1411 */ 1412 struct kbase_csf_firmware_core_dump { 1413 u32 mcu_regs_addr; 1414 u16 version; 1415 bool available; 1416 }; 1417 1418 #if IS_ENABLED(CONFIG_DEBUG_FS) 1419 /** 1420 * struct kbase_csf_dump_on_fault - Faulty information to deliver to the daemon 1421 * 1422 * @error_code: Error code. 1423 * @kctx_tgid: tgid value of the Kbase context for which the fault happened. 1424 * @kctx_id: id of the Kbase context for which the fault happened. 1425 * @enabled: Flag to indicate that 'csf_fault' debugfs has been opened 1426 * so dump on fault is enabled. 1427 * @fault_wait_wq: Waitqueue on which user space client is blocked till kbase 1428 * reports a fault. 1429 * @dump_wait_wq: Waitqueue on which kbase threads are blocked till user space client 1430 * completes the dump on fault. 1431 * @lock: Lock to protect this struct members from concurrent access. 1432 */ 1433 struct kbase_csf_dump_on_fault { 1434 enum dumpfault_error_type error_code; 1435 u32 kctx_tgid; 1436 u32 kctx_id; 1437 atomic_t enabled; 1438 wait_queue_head_t fault_wait_wq; 1439 wait_queue_head_t dump_wait_wq; 1440 spinlock_t lock; 1441 }; 1442 #endif /* CONFIG_DEBUG_FS*/ 1443 1444 /** 1445 * struct kbase_csf_user_reg - Object containing members to manage the mapping 1446 * of USER Register page for all contexts 1447 * 1448 * @dummy_page: Address of a dummy page that is mapped in place 1449 * of the real USER Register page just before the GPU 1450 * is powered down. The USER Register page is mapped 1451 * in the address space of every process, that created 1452 * a Base context, to enable the access to LATEST_FLUSH 1453 * register from userspace. 1454 * @filp: Pointer to a dummy file, that along with @file_offset, 1455 * facilitates the use of unique file offset for the userspace mapping 1456 * created for USER Register page. 1457 * The userspace mapping is made to point to this file 1458 * inside the mmap handler. 1459 * @file_offset: Counter that is incremented every time Userspace creates a mapping of 1460 * USER Register page, to provide a unique file offset range for 1461 * @filp file, so that the CPU PTE of the Userspace mapping can be zapped 1462 * through the kernel function unmap_mapping_range(). 1463 * It is incremented in page units. 1464 * @list: Linked list to maintain user processes(contexts) 1465 * having the mapping to USER Register page. 1466 * It's protected by &kbase_csf_device.reg_lock. 1467 */ 1468 struct kbase_csf_user_reg { 1469 struct tagged_addr dummy_page; 1470 struct file *filp; 1471 u32 file_offset; 1472 struct list_head list; 1473 }; 1474 1475 /** 1476 * struct kbase_csf_device - Object representing CSF for an instance of GPU 1477 * platform device. 1478 * 1479 * @mcu_mmu: MMU page tables for the MCU firmware 1480 * @firmware_interfaces: List of interfaces defined in the firmware image 1481 * @firmware_config: List of configuration options within the firmware 1482 * image 1483 * @firmware_timeline_metadata: List of timeline meta-data within the firmware 1484 * image 1485 * @fw_cfg_kobj: Pointer to the kobject corresponding to the sysf 1486 * directory that contains a sub-directory for each 1487 * of the configuration option present in the 1488 * firmware image. 1489 * @firmware_trace_buffers: List of trace buffers described in the firmware 1490 * image. 1491 * @shared_interface: Pointer to the interface object containing info for 1492 * the memory area shared between firmware & host. 1493 * @shared_reg_rbtree: RB tree of the memory regions allocated from the 1494 * shared interface segment in MCU firmware address 1495 * space. 1496 * @db_filp: Pointer to a dummy file, that alongwith 1497 * @db_file_offsets, facilitates the use of unqiue 1498 * file offset for the userspace mapping created 1499 * for Hw Doorbell pages. The userspace mapping 1500 * is made to point to this file inside the mmap 1501 * handler. 1502 * @db_file_offsets: Counter that is incremented every time a GPU 1503 * command queue is bound to provide a unique file 1504 * offset range for @db_filp file, so that pte of 1505 * Doorbell page can be zapped through the kernel 1506 * function unmap_mapping_range(). It is incremented 1507 * in page units. 1508 * @dummy_db_page: Address of the dummy page that is mapped in place 1509 * of the real Hw doorbell page for the active GPU 1510 * command queues after they are stopped or after the 1511 * GPU is powered down. 1512 * @reg_lock: Lock to serialize the MCU firmware related actions 1513 * that affect all contexts such as allocation of 1514 * regions from shared interface area, assignment of 1515 * hardware doorbell pages, assignment of CSGs, 1516 * sending global requests. 1517 * @event_wait: Wait queue to wait for receiving csf events, i.e. 1518 * the interrupt from CSF firmware, or scheduler state 1519 * changes. 1520 * @interrupt_received: Flag set when the interrupt is received from CSF fw 1521 * @global_iface: The result of parsing the global interface 1522 * structure set up by the firmware, including the 1523 * CSGs, CSs, and their properties 1524 * @scheduler: The CS scheduler instance. 1525 * @reset: Contain members required for GPU reset handling. 1526 * @progress_timeout: Maximum number of GPU clock cycles without forward 1527 * progress to allow, for all tasks running on 1528 * hardware endpoints (e.g. shader cores), before 1529 * terminating a GPU command queue group. 1530 * Must not exceed @GLB_PROGRESS_TIMER_TIMEOUT_MAX. 1531 * @pma_dev: Pointer to protected memory allocator device. 1532 * @firmware_inited: Flag for indicating that the cold-boot stage of 1533 * the MCU has completed. 1534 * @firmware_reloaded: Flag for indicating a firmware reload operation 1535 * in GPU reset has completed. 1536 * @firmware_reload_needed: Flag for indicating that the firmware needs to be 1537 * reloaded as part of the GPU reset action. 1538 * @firmware_full_reload_needed: Flag for indicating that the firmware needs to 1539 * be fully re-loaded. This may be set when the 1540 * boot or re-init of MCU fails after a successful 1541 * soft reset. 1542 * @firmware_hctl_core_pwr: Flag for indicating that the host diver is in 1543 * charge of the shader core's power transitions, and 1544 * the mcu_core_pwroff timeout feature is disabled 1545 * (i.e. configured 0 in the register field). If 1546 * false, the control is delegated to the MCU. 1547 * @firmware_reload_work: Work item for facilitating the procedural actions 1548 * on reloading the firmware. 1549 * @glb_init_request_pending: Flag to indicate that Global requests have been 1550 * sent to the FW after MCU was re-enabled and their 1551 * acknowledgement is pending. 1552 * @fw_error_work: Work item for handling the firmware internal error 1553 * fatal event. 1554 * @ipa_control: IPA Control component manager. 1555 * @mcu_core_pwroff_dur_us: Sysfs attribute for the glb_pwroff timeout input 1556 * in unit of micro-seconds. The firmware does not use 1557 * it directly. 1558 * @mcu_core_pwroff_dur_count: The counterpart of the glb_pwroff timeout input 1559 * in interface required format, ready to be used 1560 * directly in the firmware. 1561 * @mcu_core_pwroff_reg_shadow: The actual value that has been programed into 1562 * the glb_pwoff register. This is separated from 1563 * the @p mcu_core_pwroff_dur_count as an update 1564 * to the latter is asynchronous. 1565 * @gpu_idle_hysteresis_us: Sysfs attribute for the idle hysteresis time 1566 * window in unit of microseconds. The firmware does not 1567 * use it directly. 1568 * @gpu_idle_dur_count: The counterpart of the hysteresis time window in 1569 * interface required format, ready to be used 1570 * directly in the firmware. 1571 * @fw_timeout_ms: Timeout value (in milliseconds) used when waiting 1572 * for any request sent to the firmware. 1573 * @hwcnt: Contain members required for handling the dump of 1574 * HW counters. 1575 * @fw: Copy of the loaded MCU firmware image. 1576 * @fw_log: Contain members required for handling firmware log. 1577 * @fw_core_dump: Contain members required for handling the firmware 1578 * core dump. 1579 * @dof: Structure for dump on fault. 1580 * @user_reg: Collective information to support the mapping to 1581 * USER Register page for user processes. 1582 */ 1583 struct kbase_csf_device { 1584 struct kbase_mmu_table mcu_mmu; 1585 struct list_head firmware_interfaces; 1586 struct list_head firmware_config; 1587 struct list_head firmware_timeline_metadata; 1588 struct kobject *fw_cfg_kobj; 1589 struct kbase_csf_trace_buffers firmware_trace_buffers; 1590 void *shared_interface; 1591 struct rb_root shared_reg_rbtree; 1592 struct file *db_filp; 1593 u32 db_file_offsets; 1594 struct tagged_addr dummy_db_page; 1595 struct mutex reg_lock; 1596 wait_queue_head_t event_wait; 1597 bool interrupt_received; 1598 struct kbase_csf_global_iface global_iface; 1599 struct kbase_csf_scheduler scheduler; 1600 struct kbase_csf_reset_gpu reset; 1601 atomic64_t progress_timeout; 1602 struct protected_memory_allocator_device *pma_dev; 1603 bool firmware_inited; 1604 bool firmware_reloaded; 1605 bool firmware_reload_needed; 1606 bool firmware_full_reload_needed; 1607 bool firmware_hctl_core_pwr; 1608 struct work_struct firmware_reload_work; 1609 bool glb_init_request_pending; 1610 struct work_struct fw_error_work; 1611 struct kbase_ipa_control ipa_control; 1612 u32 mcu_core_pwroff_dur_us; 1613 u32 mcu_core_pwroff_dur_count; 1614 u32 mcu_core_pwroff_reg_shadow; 1615 u32 gpu_idle_hysteresis_us; 1616 u32 gpu_idle_dur_count; 1617 unsigned int fw_timeout_ms; 1618 struct kbase_csf_hwcnt hwcnt; 1619 struct kbase_csf_mcu_fw fw; 1620 struct kbase_csf_firmware_log fw_log; 1621 struct kbase_csf_firmware_core_dump fw_core_dump; 1622 #if IS_ENABLED(CONFIG_DEBUG_FS) 1623 struct kbase_csf_dump_on_fault dof; 1624 #endif /* CONFIG_DEBUG_FS */ 1625 #if IS_ENABLED(CONFIG_MALI_CORESIGHT) 1626 /** 1627 * @coresight: Coresight device structure. 1628 */ 1629 struct kbase_debug_coresight_device coresight; 1630 #endif /* IS_ENABLED(CONFIG_MALI_CORESIGHT) */ 1631 struct kbase_csf_user_reg user_reg; 1632 }; 1633 1634 /** 1635 * struct kbase_as - Object representing an address space of GPU. 1636 * @number: Index at which this address space structure is present 1637 * in an array of address space structures embedded inside 1638 * the &struct kbase_device. 1639 * @pf_wq: Workqueue for processing work items related to 1640 * Page fault, Bus fault and GPU fault handling. 1641 * @work_pagefault: Work item for the Page fault handling. 1642 * @work_busfault: Work item for the Bus fault handling. 1643 * @work_gpufault: Work item for the GPU fault handling. 1644 * @pf_data: Data relating to Page fault. 1645 * @bf_data: Data relating to Bus fault. 1646 * @gf_data: Data relating to GPU fault. 1647 * @current_setup: Stores the MMU configuration for this address space. 1648 * @is_unresponsive: Flag to indicate MMU is not responding. 1649 * Set if a MMU command isn't completed within 1650 * &kbase_device:mmu_as_inactive_wait_time_ms. 1651 * Clear by kbase_ctx_sched_restore_all_as() after GPU reset completes. 1652 */ 1653 struct kbase_as { 1654 int number; 1655 struct workqueue_struct *pf_wq; 1656 struct work_struct work_pagefault; 1657 struct work_struct work_busfault; 1658 struct work_struct work_gpufault; 1659 struct kbase_fault pf_data; 1660 struct kbase_fault bf_data; 1661 struct kbase_fault gf_data; 1662 struct kbase_mmu_setup current_setup; 1663 bool is_unresponsive; 1664 }; 1665 1666 #endif /* _KBASE_CSF_DEFS_H_ */ 1667