| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/csf/ |
| H A D | mali_kbase_debug_csf_fault.c | 38 spin_lock_irqsave(&kbdev->csf.dof.lock, flags); in kbasep_fault_occurred() 39 ret = (kbdev->csf.dof.error_code != DF_NO_ERROR); in kbasep_fault_occurred() 40 spin_unlock_irqrestore(&kbdev->csf.dof.lock, flags); in kbasep_fault_occurred() 52 wait_event(kbdev->csf.dof.dump_wait_wq, kbase_debug_csf_fault_dump_complete(kbdev)); in kbase_debug_csf_fault_wait_completion() 63 wake_up_interruptible(&kbdev->csf.dof.fault_wait_wq); in kbase_debug_csf_fault_wakeup() 83 spin_lock_irqsave(&kbdev->csf.dof.lock, flags); in kbase_debug_csf_fault_notify() 86 if (kbdev->csf.dof.error_code) { in kbase_debug_csf_fault_notify() 91 kbdev->csf.dof.kctx_tgid = kctx ? kctx->tgid : 0; in kbase_debug_csf_fault_notify() 92 kbdev->csf.dof.kctx_id = kctx ? kctx->id : 0; in kbase_debug_csf_fault_notify() 93 kbdev->csf.dof.error_code = error; in kbase_debug_csf_fault_notify() [all …]
|
| H A D | mali_kbase_csf_cpu_queue_debugfs.c | 31 if (atomic_cmpxchg(&kctx->csf.cpu_queue.dump_req_status, in kbase_csf_cpu_queue_read_dump_req() 54 mutex_lock(&kctx->csf.lock); in kbasep_csf_cpu_queue_debugfs_show() 55 if (atomic_read(&kctx->csf.cpu_queue.dump_req_status) != in kbasep_csf_cpu_queue_debugfs_show() 58 mutex_unlock(&kctx->csf.lock); in kbasep_csf_cpu_queue_debugfs_show() 62 atomic_set(&kctx->csf.cpu_queue.dump_req_status, BASE_CSF_CPU_QUEUE_DUMP_ISSUED); in kbasep_csf_cpu_queue_debugfs_show() 63 init_completion(&kctx->csf.cpu_queue.dump_cmp); in kbasep_csf_cpu_queue_debugfs_show() 65 mutex_unlock(&kctx->csf.lock); in kbasep_csf_cpu_queue_debugfs_show() 70 wait_for_completion_timeout(&kctx->csf.cpu_queue.dump_cmp, in kbasep_csf_cpu_queue_debugfs_show() 73 mutex_lock(&kctx->csf.lock); in kbasep_csf_cpu_queue_debugfs_show() 74 if (kctx->csf.cpu_queue.buffer) { in kbasep_csf_cpu_queue_debugfs_show() [all …]
|
| H A D | Kbuild | 22 csf/mali_kbase_csf_firmware_cfg.o \ 23 csf/mali_kbase_csf_trace_buffer.o \ 24 csf/mali_kbase_csf.o \ 25 csf/mali_kbase_csf_scheduler.o \ 26 csf/mali_kbase_csf_kcpu.o \ 27 csf/mali_kbase_csf_tiler_heap.o \ 28 csf/mali_kbase_csf_timeout.o \ 29 csf/mali_kbase_csf_tl_reader.o \ 30 csf/mali_kbase_csf_heap_context_alloc.o \ 31 csf/mali_kbase_csf_reset_gpu.o \ [all …]
|
| H A D | mali_kbase_csf_reset_gpu.c | 101 down_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_prevent_and_wait() 103 if (atomic_read(&kbdev->csf.reset.state) == in kbase_reset_gpu_prevent_and_wait() 105 up_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_prevent_and_wait() 110 up_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_prevent_and_wait() 120 if (!down_read_trylock(&kbdev->csf.reset.sem)) in kbase_reset_gpu_try_prevent() 123 if (atomic_read(&kbdev->csf.reset.state) == in kbase_reset_gpu_try_prevent() 125 up_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_try_prevent() 130 up_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_try_prevent() 139 up_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_allow() 146 lockdep_assert_held_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_assert_prevented() [all …]
|
| H A D | mali_kbase_csf_scheduler.c | 115 struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; in wait_for_dump_complete_on_group_deschedule() 117 lockdep_assert_held(&kctx->csf.lock); in wait_for_dump_complete_on_group_deschedule() 128 mutex_unlock(&kctx->csf.lock); in wait_for_dump_complete_on_group_deschedule() 130 mutex_lock(&kctx->csf.lock); in wait_for_dump_complete_on_group_deschedule() 161 struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; in schedule_actions_trigger_df() 195 struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; in wait_for_scheduler_to_exit_sleep() 225 kbdev->csf.event_wait, in wait_for_scheduler_to_exit_sleep() 249 struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; in force_scheduler_to_exit_sleep() 315 csf.scheduler.tick_timer); in tick_timer_callback() 332 struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler; in start_tick_timer() [all …]
|
| H A D | mali_kbase_csf_event.c | 57 spin_lock_irqsave(&kctx->csf.event.lock, flags); in kbase_csf_event_wait_add() 58 list_add_tail(&event_cb->link, &kctx->csf.event.callback_list); in kbase_csf_event_wait_add() 62 spin_unlock_irqrestore(&kctx->csf.event.lock, flags); in kbase_csf_event_wait_add() 76 spin_lock_irqsave(&kctx->csf.event.lock, flags); in kbase_csf_event_wait_remove() 78 list_for_each_entry(event_cb, &kctx->csf.event.callback_list, link) { in kbase_csf_event_wait_remove() 88 spin_unlock_irqrestore(&kctx->csf.event.lock, flags); in kbase_csf_event_wait_remove() 137 spin_lock_irqsave(&kctx->csf.event.lock, flags); in kbase_csf_event_signal() 140 event_cb, next_event_cb, &kctx->csf.event.callback_list, link) { in kbase_csf_event_signal() 153 spin_unlock_irqrestore(&kctx->csf.event.lock, flags); in kbase_csf_event_signal() 161 spin_lock_irqsave(&kctx->csf.event.lock, flags); in kbase_csf_event_term() [all …]
|
| H A D | mali_kbase_csf_tiler_heap_reclaim.c | 42 if (!list_empty(&kctx->csf.sched.runnable_groups[prio])) in get_kctx_highest_csg_priority() 45 if (prio != KBASE_QUEUE_GROUP_PRIORITY_REALTIME && kctx->csf.sched.num_idle_wait_grps) { in get_kctx_highest_csg_priority() 48 list_for_each_entry(group, &kctx->csf.sched.idle_wait_groups, link) { in get_kctx_highest_csg_priority() 59 struct kbase_csf_scheduler *const scheduler = &kctx->kbdev->csf.scheduler; in detach_ctx_from_heap_reclaim_mgr() 60 struct kbase_csf_ctx_heap_reclaim_info *info = &kctx->csf.sched.heap_info; in detach_ctx_from_heap_reclaim_mgr() 82 struct kbase_csf_ctx_heap_reclaim_info *const info = &kctx->csf.sched.heap_info; in attach_ctx_to_heap_reclaim_mgr() 83 struct kbase_csf_scheduler *const scheduler = &kctx->kbdev->csf.scheduler; in attach_ctx_to_heap_reclaim_mgr() 107 struct kbase_csf_ctx_heap_reclaim_info *info = &kctx->csf.sched.heap_info; in kbase_csf_tiler_heap_reclaim_sched_notify_grp_active() 109 lockdep_assert_held(&kctx->kbdev->csf.scheduler.lock); in kbase_csf_tiler_heap_reclaim_sched_notify_grp_active() 124 struct kbase_csf_ctx_heap_reclaim_info *const info = &kctx->csf.sched.heap_info; in kbase_csf_tiler_heap_reclaim_sched_notify_grp_evict() [all …]
|
| H A D | mali_kbase_csf_firmware_no_mali.c | 159 kbdev->csf.shared_interface = interface; in invent_memory_setup_entry() 160 list_add(&interface->node, &kbdev->csf.firmware_interfaces); in invent_memory_setup_entry() 168 struct kbase_csf_global_iface *iface = &kbdev->csf.global_iface; in free_global_iface() 222 struct dummy_firmware_interface *interface = kbdev->csf.shared_interface; in invent_capabilities() 223 struct kbase_csf_global_iface *iface = &kbdev->csf.global_iface; in invent_capabilities() 464 iface = &kbdev->csf.global_iface; in csf_doorbell_prfcnt() 600 container_of(data, struct kbase_device, csf.fw_error_work); in firmware_error_worker() 609 &kbdev->csf.global_iface; in global_request_complete() 630 kbase_csf_timeout_in_jiffies(kbdev->csf.fw_timeout_ms); in wait_for_global_request() 634 remaining = wait_event_timeout(kbdev->csf.event_wait, in wait_for_global_request() [all …]
|
| H A D | mali_kbase_csf.c | 87 if (unlikely(kctx->csf.user_reg.vma)) in kbasep_ctx_user_reg_page_mapping_term() 90 if (WARN_ON_ONCE(!list_empty(&kctx->csf.user_reg.link))) in kbasep_ctx_user_reg_page_mapping_term() 91 list_del_init(&kctx->csf.user_reg.link); in kbasep_ctx_user_reg_page_mapping_term() 103 INIT_LIST_HEAD(&kctx->csf.user_reg.link); in kbasep_ctx_user_reg_page_mapping_init() 104 kctx->csf.user_reg.vma = NULL; in kbasep_ctx_user_reg_page_mapping_init() 105 kctx->csf.user_reg.file_offset = 0; in kbasep_ctx_user_reg_page_mapping_init() 115 lockdep_assert_held(&kctx->csf.lock); in put_user_pages_mmap_handle() 123 if (!WARN_ON(kctx->csf.user_pages_info[cookie_nr] != queue)) { in put_user_pages_mmap_handle() 125 kctx->csf.user_pages_info[cookie_nr] = NULL; in put_user_pages_mmap_handle() 126 bitmap_set(kctx->csf.cookies, cookie_nr, 1); in put_user_pages_mmap_handle() [all …]
|
| H A D | mali_kbase_csf_mcu_shared_reg.c | 105 struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data; in update_mapping_with_dummy_pages() 114 struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data; in insert_dummy_pages() 118 return kbase_mmu_insert_pages(kbdev, &kbdev->csf.mcu_mmu, vpfn, shared_regs->dummy_phys, in insert_dummy_pages() 126 lockdep_assert_held(&group->kctx->kbdev->csf.scheduler.lock); in notify_group_csg_reg_map_done() 137 lockdep_assert_held(&kbdev->csf.scheduler.lock); in notify_group_csg_reg_map_error() 170 struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data; in userio_pages_replace_phys() 173 lockdep_assert_held(&kbdev->csf.scheduler.lock); in userio_pages_replace_phys() 202 const u32 nr_susp_pages = PFN_UP(kbdev->csf.global_iface.groups[0].suspend_size); in csg_reg_update_on_csis() 203 const u32 nr_csis = kbdev->csf.global_iface.groups[0].stream_num; in csg_reg_update_on_csis() 208 lockdep_assert_held(&kbdev->csf.scheduler.lock); in csg_reg_update_on_csis() [all …]
|
| H A D | mali_kbase_csf_firmware.c | 197 kbdev->csf.shared_interface; in setup_shared_iface_static_region() 204 reg = kbase_alloc_free_region(kbdev, &kbdev->csf.shared_reg_rbtree, 0, in setup_shared_iface_static_region() 207 mutex_lock(&kbdev->csf.reg_lock); in setup_shared_iface_static_region() 210 mutex_unlock(&kbdev->csf.reg_lock); in setup_shared_iface_static_region() 276 remaining = wait_event_timeout(kbdev->csf.event_wait, in wait_for_firmware_boot() 277 kbdev->csf.interrupt_received == true, wait_timeout); in wait_for_firmware_boot() 282 kbdev->csf.interrupt_received = false; in wait_for_firmware_boot() 354 kbase_mmu_update(kbdev, &kbdev->csf.mcu_mmu, MCU_AS_NR); in load_mmu_tables() 476 struct kbase_csf_mcu_fw *const mcu_fw = &kbdev->csf.fw; in reload_fw_image() 492 list_for_each_entry(interface, &kbdev->csf.firmware_interfaces, node) { in reload_fw_image() [all …]
|
| H A D | mali_kbase_csf_scheduler.h | 279 mutex_lock(&kbdev->csf.scheduler.lock); in kbase_csf_scheduler_lock() 289 mutex_unlock(&kbdev->csf.scheduler.lock); in kbase_csf_scheduler_unlock() 305 spin_lock_irqsave(&kbdev->csf.scheduler.interrupt_lock, *flags); in kbase_csf_scheduler_spin_lock() 318 spin_unlock_irqrestore(&kbdev->csf.scheduler.interrupt_lock, flags); in kbase_csf_scheduler_spin_unlock() 330 lockdep_assert_held(&kbdev->csf.scheduler.interrupt_lock); in kbase_csf_scheduler_spin_lock_assert_held() 373 return (kbdev->csf.scheduler.active_protm_grp != NULL); in kbase_csf_scheduler_protected_mode_in_use() 470 lockdep_assert_held(&kbdev->csf.scheduler.interrupt_lock); in kbase_csf_scheduler_all_csgs_idle() 471 return bitmap_equal(kbdev->csf.scheduler.csg_slots_idle_mask, in kbase_csf_scheduler_all_csgs_idle() 472 kbdev->csf.scheduler.csg_inuse_bitmap, in kbase_csf_scheduler_all_csgs_idle() 473 kbdev->csf.global_iface.group_num); in kbase_csf_scheduler_all_csgs_idle() [all …]
|
| H A D | mali_kbase_csf_tiler_heap.c | 181 lockdep_assert_held(&kctx->csf.tiler_heaps.lock); in init_chunk() 407 mutex_lock(&heap->kctx->csf.tiler_heaps.lock); in create_chunk() 409 mutex_unlock(&heap->kctx->csf.tiler_heaps.lock); in create_chunk() 523 kbase_csf_heap_context_allocator_free(&kctx->csf.tiler_heaps.ctx_alloc, in delete_heap() 560 lockdep_assert_held(&kctx->csf.tiler_heaps.lock); in find_tiler_heap() 562 list_for_each_entry(heap, &kctx->csf.tiler_heaps.list, link) { in find_tiler_heap() 578 lockdep_assert_held(&heap->kctx->csf.tiler_heaps.lock); in find_chunk() 593 &kctx->csf.tiler_heaps.ctx_alloc, kctx); in kbase_csf_tiler_heap_context_init() 598 INIT_LIST_HEAD(&kctx->csf.tiler_heaps.list); in kbase_csf_tiler_heap_context_init() 599 mutex_init(&kctx->csf.tiler_heaps.lock); in kbase_csf_tiler_heap_context_init() [all …]
|
| H A D | mali_kbase_csf_trace_buffer.c | 134 if (list_empty(&kbdev->csf.firmware_trace_buffers.list)) { in kbase_csf_firmware_trace_buffers_init() 143 &kbdev->csf.firmware_trace_buffers.mcu_rw); in kbase_csf_firmware_trace_buffers_init() 152 &kbdev->csf.firmware_trace_buffers.mcu_write); in kbase_csf_firmware_trace_buffers_init() 158 list_for_each_entry(trace_buffer, &kbdev->csf.firmware_trace_buffers.list, node) { in kbase_csf_firmware_trace_buffers_init() 174 (kbdev->csf.firmware_trace_buffers.mcu_rw.va_reg->start_pfn << PAGE_SHIFT) + in kbase_csf_firmware_trace_buffers_init() 177 kbdev->csf.firmware_trace_buffers.mcu_rw.cpu_addr + in kbase_csf_firmware_trace_buffers_init() 180 (kbdev->csf.firmware_trace_buffers.mcu_write.va_reg->start_pfn << PAGE_SHIFT) + in kbase_csf_firmware_trace_buffers_init() 183 kbdev->csf.firmware_trace_buffers.mcu_write.cpu_addr + in kbase_csf_firmware_trace_buffers_init() 228 if (list_empty(&kbdev->csf.firmware_trace_buffers.list)) in kbase_csf_firmware_trace_buffers_term() 231 while (!list_empty(&kbdev->csf.firmware_trace_buffers.list)) { in kbase_csf_firmware_trace_buffers_term() [all …]
|
| H A D | mali_kbase_csf_csg_debugfs.c | 51 &kbdev->csf.global_iface.groups[csg_nr]; in csg_slot_status_update_finish() 62 const u32 max_csg_slots = kbdev->csf.global_iface.group_num; in csg_slots_status_update_finish() 66 lockdep_assert_held(&kbdev->csf.scheduler.lock); in csg_slots_status_update_finish() 81 const u32 max_csg_slots = kbdev->csf.global_iface.group_num; in wait_csg_slots_status_update_finish() 84 lockdep_assert_held(&kbdev->csf.scheduler.lock); in wait_csg_slots_status_update_finish() 89 remaining = wait_event_timeout(kbdev->csf.event_wait, in wait_csg_slots_status_update_finish() 105 u32 max_csg_slots = kbdev->csf.global_iface.group_num; in kbase_csf_debugfs_update_active_groups_status() 110 lockdep_assert_held(&kbdev->csf.scheduler.lock); in kbase_csf_debugfs_update_active_groups_status() 118 if (kbdev->csf.scheduler.state == SCHED_SLEEPING) { in kbase_csf_debugfs_update_active_groups_status() 122 kbdev->csf.scheduler.csg_inuse_bitmap, max_csg_slots); in kbase_csf_debugfs_update_active_groups_status() [all …]
|
| H A D | mali_kbase_csf_protected_memory.c | 42 kbdev->csf.pma_dev = NULL; in kbase_csf_protected_memory_init() 46 kbdev->csf.pma_dev = platform_get_drvdata(pdev); in kbase_csf_protected_memory_init() 47 if (!kbdev->csf.pma_dev) { in kbase_csf_protected_memory_init() 50 } else if (!try_module_get(kbdev->csf.pma_dev->owner)) { in kbase_csf_protected_memory_init() 66 if (kbdev->csf.pma_dev) in kbase_csf_protected_memory_term() 67 module_put(kbdev->csf.pma_dev->owner); in kbase_csf_protected_memory_term() 79 kbdev->csf.pma_dev; in kbase_csf_protected_memory_alloc() 142 kbdev->csf.pma_dev; in kbase_csf_protected_memory_free()
|
| H A D | mali_kbase_csf_firmware_log.c | 110 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log; in kbasep_csf_firmware_log_debugfs_read() 156 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log; in kbase_csf_firmware_log_mode_read() 165 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log; in kbase_csf_firmware_log_mode_write() 213 container_of(work, struct kbase_device, csf.fw_log.poll_work.work); in kbase_csf_firmware_log_poll() 214 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log; in kbase_csf_firmware_log_poll() 224 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log; in kbase_csf_firmware_log_init() 253 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log; in kbase_csf_firmware_log_term() 264 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log; in kbase_csf_firmware_log_dump_buffer() 325 kbdev->csf.fw_log.func_call_list_va_start = entry[0]; in kbase_csf_firmware_log_parse_logging_call_list_entry() 326 kbdev->csf.fw_log.func_call_list_va_end = entry[1]; in kbase_csf_firmware_log_parse_logging_call_list_entry() [all …]
|
| H A D | mali_kbase_csf_firmware_cfg.c | 251 kbdev->csf.fw_cfg_kobj = kobject_create_and_add( in kbase_csf_firmware_cfg_init() 253 if (!kbdev->csf.fw_cfg_kobj) { in kbase_csf_firmware_cfg_init() 254 kobject_put(kbdev->csf.fw_cfg_kobj); in kbase_csf_firmware_cfg_init() 261 list_for_each_entry(config, &kbdev->csf.firmware_config, node) { in kbase_csf_firmware_cfg_init() 268 kbdev->csf.fw_cfg_kobj, "%s", config->name); in kbase_csf_firmware_cfg_init() 285 while (!list_empty(&kbdev->csf.firmware_config)) { in kbase_csf_firmware_cfg_term() 288 config = list_first_entry(&kbdev->csf.firmware_config, in kbase_csf_firmware_cfg_term() 299 kobject_del(kbdev->csf.fw_cfg_kobj); in kbase_csf_firmware_cfg_term() 300 kobject_put(kbdev->csf.fw_cfg_kobj); in kbase_csf_firmware_cfg_term() 329 list_add(&config->node, &kbdev->csf.firmware_config); in kbase_csf_firmware_cfg_option_entry_parse()
|
| H A D | mali_kbase_csf_kcpu_debugfs.c | 81 lockdep_assert_held(&kctx->csf.kcpu_queues.lock); in kbasep_csf_kcpu_debugfs_print_queue() 133 mutex_lock(&kctx->csf.kcpu_queues.lock); in kbasep_csf_kcpu_debugfs_show() 135 idx = find_first_bit(kctx->csf.kcpu_queues.in_use, in kbasep_csf_kcpu_debugfs_show() 140 kctx->csf.kcpu_queues.array[idx]; in kbasep_csf_kcpu_debugfs_show() 145 kctx->csf.kcpu_queues.array[idx]); in kbasep_csf_kcpu_debugfs_show() 147 idx = find_next_bit(kctx->csf.kcpu_queues.in_use, in kbasep_csf_kcpu_debugfs_show() 151 mutex_unlock(&kctx->csf.kcpu_queues.lock); in kbasep_csf_kcpu_debugfs_show()
|
| H A D | mali_kbase_csf_kcpu.c | 189 &kctx->csf.kcpu_queues.jit_blocked_queues; in kbase_jit_add_to_pending_alloc_list() 193 lockdep_assert_held(&kctx->csf.kcpu_queues.jit_lock); in kbase_jit_add_to_pending_alloc_list() 196 &kctx->csf.kcpu_queues.jit_blocked_queues, in kbase_jit_add_to_pending_alloc_list() 243 mutex_lock(&kctx->csf.kcpu_queues.jit_lock); in kbase_kcpu_jit_allocate_process() 268 list_for_each_entry(jit_cmd, &kctx->csf.kcpu_queues.jit_cmds_head, info.jit_alloc.node) { in kbase_kcpu_jit_allocate_process() 338 mutex_unlock(&kctx->csf.kcpu_queues.jit_lock); in kbase_kcpu_jit_allocate_process() 356 mutex_unlock(&kctx->csf.kcpu_queues.jit_lock); in kbase_kcpu_jit_allocate_process() 415 mutex_lock(&kctx->csf.kcpu_queues.jit_lock); in kbase_kcpu_jit_allocate_prepare() 417 &kctx->csf.kcpu_queues.jit_cmds_head); in kbase_kcpu_jit_allocate_prepare() 418 mutex_unlock(&kctx->csf.kcpu_queues.jit_lock); in kbase_kcpu_jit_allocate_prepare() [all …]
|
| H A D | mali_kbase_debug_csf_fault.h | 82 return atomic_read(&kbdev->csf.dof.enabled); in kbase_debug_csf_fault_dump_enabled() 100 spin_lock_irqsave(&kbdev->csf.dof.lock, flags); in kbase_debug_csf_fault_dump_complete() 101 ret = (kbdev->csf.dof.error_code == DF_NO_ERROR); in kbase_debug_csf_fault_dump_complete() 102 spin_unlock_irqrestore(&kbdev->csf.dof.lock, flags); in kbase_debug_csf_fault_dump_complete()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/debug/backend/ |
| H A D | mali_kbase_debug_coresight_csf.c | 319 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags); in kbase_debug_coresight_csf_register() 320 list_for_each_entry(client_entry, &kbdev->csf.coresight.clients, link) { in kbase_debug_coresight_csf_register() 331 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags); in kbase_debug_coresight_csf_register() 347 list_add(&client->link, &kbdev->csf.coresight.clients); in kbase_debug_coresight_csf_register() 348 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags); in kbase_debug_coresight_csf_register() 376 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags); in kbase_debug_coresight_csf_unregister() 379 while (retry && !list_empty(&kbdev->csf.coresight.configs)) { in kbase_debug_coresight_csf_unregister() 381 list_for_each_entry(config_entry, &kbdev->csf.coresight.configs, link) { in kbase_debug_coresight_csf_unregister() 383 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags); in kbase_debug_coresight_csf_unregister() 385 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags); in kbase_debug_coresight_csf_unregister() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/tl/backend/ |
| H A D | mali_kbase_timeline_csf.c | 73 kbdev->csf.global_iface.group_num, in kbase_create_timeline_objects() 87 mutex_lock(&kbdev->csf.scheduler.lock); in kbase_create_timeline_objects() 89 for (slot_i = 0; slot_i < kbdev->csf.global_iface.group_num; slot_i++) { in kbase_create_timeline_objects() 92 kbdev->csf.scheduler.csg_slots[slot_i].resident_group; in kbase_create_timeline_objects() 108 mutex_unlock(&kbdev->csf.scheduler.lock); in kbase_create_timeline_objects() 119 mutex_lock(&kctx->csf.kcpu_queues.lock); in kbase_create_timeline_objects() 163 kctx->csf.kcpu_queues.array[i]; in kbase_create_timeline_objects() 172 mutex_unlock(&kctx->csf.kcpu_queues.lock); in kbase_create_timeline_objects()
|
| /OK3568_Linux_fs/u-boot/arch/arm/cpu/arm926ejs/mxs/ |
| H A D | Makefile | 47 csf=$(word 3,$^) ; \ 48 sed "s@VENDOR@$(VENDOR)@g;s@BOARD@$(BOARD)@g" "$$csf" | \ 68 spl/u-boot-spl.csf: spl/u-boot-spl.ivt spl/u-boot-spl.bin board/$(VENDOR)/$(BOARD)/sign/u-boot-spl.… 71 u-boot.csf: u-boot.ivt u-boot.bin board/$(VENDOR)/$(BOARD)/sign/u-boot.csf 74 %.sig: %.csf
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/hwcnt/backend/ |
| H A D | mali_kbase_hwcnt_backend_csf_if_fw.c | 105 ctx->kbdev->csf.scheduler.interrupt_lock) in kbasep_hwcnt_backend_csf_if_fw_lock() 121 ctx->kbdev->csf.scheduler.interrupt_lock) in kbasep_hwcnt_backend_csf_if_fw_unlock() 245 prfcnt_size = kbdev->csf.global_iface.prfcnt_size; in kbasep_hwcnt_backend_csf_if_fw_get_prfcnt_info() 348 ret = kbase_mmu_insert_pages(kbdev, &kbdev->csf.mcu_mmu, gpu_va_base >> PAGE_SHIFT, phys, in kbasep_hwcnt_backend_csf_if_fw_ring_buf_alloc() 483 WARN_ON(kbase_mmu_teardown_pages(fw_ctx->kbdev, &fw_ctx->kbdev->csf.mcu_mmu, in kbasep_hwcnt_backend_csf_if_fw_ring_buf_free() 518 global_iface = &kbdev->csf.global_iface; in kbasep_hwcnt_backend_csf_if_fw_dump_enable() 543 kbdev->csf.hwcnt.enable_pending = true; in kbasep_hwcnt_backend_csf_if_fw_dump_enable() 581 global_iface = &kbdev->csf.global_iface; in kbasep_hwcnt_backend_csf_if_fw_dump_disable() 584 kbdev->csf.hwcnt.enable_pending = true; in kbasep_hwcnt_backend_csf_if_fw_dump_disable() 599 kbdev->csf.hwcnt.request_pending = false; in kbasep_hwcnt_backend_csf_if_fw_dump_disable() [all …]
|