| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/backend/gpu/ |
| H A D | mali_kbase_pm_backend.c | 49 int kbase_pm_runtime_init(struct kbase_device *kbdev) in kbase_pm_runtime_init() argument 55 kbdev->pm.backend.callback_power_on = in kbase_pm_runtime_init() 57 kbdev->pm.backend.callback_power_off = in kbase_pm_runtime_init() 59 kbdev->pm.backend.callback_power_suspend = in kbase_pm_runtime_init() 61 kbdev->pm.backend.callback_power_resume = in kbase_pm_runtime_init() 63 kbdev->pm.callback_power_runtime_init = in kbase_pm_runtime_init() 65 kbdev->pm.callback_power_runtime_term = in kbase_pm_runtime_init() 67 kbdev->pm.backend.callback_power_runtime_on = in kbase_pm_runtime_init() 69 kbdev->pm.backend.callback_power_runtime_off = in kbase_pm_runtime_init() 71 kbdev->pm.backend.callback_power_runtime_idle = in kbase_pm_runtime_init() [all …]
|
| H A D | mali_kbase_pm_driver.c | 100 struct kbase_device *kbdev, 104 static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev); 107 bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev) in kbase_pm_is_mcu_desired() argument 109 lockdep_assert_held(&kbdev->hwaccess_lock); in kbase_pm_is_mcu_desired() 111 if (unlikely(!kbdev->csf.firmware_inited)) in kbase_pm_is_mcu_desired() 114 if (kbdev->csf.scheduler.pm_active_count && in kbase_pm_is_mcu_desired() 115 kbdev->pm.backend.mcu_desired) in kbase_pm_is_mcu_desired() 119 if (kbdev->pm.backend.gpu_wakeup_override) in kbase_pm_is_mcu_desired() 128 return (kbdev->pm.backend.mcu_desired && in kbase_pm_is_mcu_desired() 129 kbase_pm_no_mcu_core_pwroff(kbdev) && in kbase_pm_is_mcu_desired() [all …]
|
| H A D | mali_kbase_instr_backend.c | 32 static int wait_prfcnt_ready(struct kbase_device *kbdev) in wait_prfcnt_ready() argument 37 const u32 prfcnt_active = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS)) & in wait_prfcnt_ready() 43 dev_err(kbdev->dev, "PRFCNT_ACTIVE bit stuck\n"); in wait_prfcnt_ready() 47 int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev, in kbase_instr_hwcnt_enable_internal() argument 56 lockdep_assert_held(&kbdev->hwaccess_lock); in kbase_instr_hwcnt_enable_internal() 62 spin_lock_irqsave(&kbdev->hwcnt.lock, flags); in kbase_instr_hwcnt_enable_internal() 64 if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) { in kbase_instr_hwcnt_enable_internal() 66 spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); in kbase_instr_hwcnt_enable_internal() 70 if (kbase_is_gpu_removed(kbdev)) { in kbase_instr_hwcnt_enable_internal() 72 spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); in kbase_instr_hwcnt_enable_internal() [all …]
|
| H A D | mali_kbase_pm_internal.h | 42 void kbase_pm_dev_idle(struct kbase_device *kbdev); 51 void kbase_pm_dev_activate(struct kbase_device *kbdev); 67 u64 kbase_pm_get_present_cores(struct kbase_device *kbdev, 83 u64 kbase_pm_get_active_cores(struct kbase_device *kbdev, 99 u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev, 115 u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev, 130 void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume); 151 bool kbase_pm_clock_off(struct kbase_device *kbdev); 160 void kbase_pm_enable_interrupts(struct kbase_device *kbdev); 173 void kbase_pm_disable_interrupts(struct kbase_device *kbdev); [all …]
|
| H A D | mali_kbase_jm_hw.c | 43 static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev); 44 static u64 kbasep_apply_limited_core_mask(const struct kbase_device *kbdev, 47 static u64 kbase_job_write_affinity(struct kbase_device *kbdev, base_jd_core_req core_req, in kbase_job_write_affinity() argument 60 unsigned int num_core_groups = kbdev->gpu_props.num_core_groups; in kbase_job_write_affinity() 62 &kbdev->gpu_props.props.coherency_info; in kbase_job_write_affinity() 64 affinity = kbdev->pm.backend.shaders_avail & in kbase_job_write_affinity() 65 kbdev->pm.debug_core_mask[js]; in kbase_job_write_affinity() 75 affinity &= kbdev->gpu_props.curr_config.shader_present; in kbase_job_write_affinity() 78 affinity = kbdev->pm.backend.shaders_avail & in kbase_job_write_affinity() 79 kbdev->pm.debug_core_mask[js]; in kbase_job_write_affinity() [all …]
|
| H A D | mali_kbase_jm_rb.c | 58 static void kbase_gpu_release_atom(struct kbase_device *kbdev, 69 static void kbase_gpu_enqueue_atom(struct kbase_device *kbdev, in kbase_gpu_enqueue_atom() argument 72 struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[katom->slot_nr]; in kbase_gpu_enqueue_atom() 76 lockdep_assert_held(&kbdev->hwaccess_lock); in kbase_gpu_enqueue_atom() 96 static struct kbase_jd_atom *kbase_gpu_dequeue_atom(struct kbase_device *kbdev, unsigned int js, in kbase_gpu_dequeue_atom() argument 99 struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js]; in kbase_gpu_dequeue_atom() 107 lockdep_assert_held(&kbdev->hwaccess_lock); in kbase_gpu_dequeue_atom() 111 kbase_gpu_release_atom(kbdev, katom, end_timestamp); in kbase_gpu_dequeue_atom() 120 struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, unsigned int js, int idx) in kbase_gpu_inspect() argument 122 struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js]; in kbase_gpu_inspect() [all …]
|
| H A D | mali_kbase_pm_policy.c | 48 void kbase_pm_policy_init(struct kbase_device *kbdev) in kbase_pm_policy_init() argument 51 struct device_node *np = kbdev->dev->of_node; in kbase_pm_policy_init() 72 default_policy->init(kbdev); in kbase_pm_policy_init() 75 spin_lock_irqsave(&kbdev->hwaccess_lock, flags); in kbase_pm_policy_init() 76 kbdev->pm.backend.pm_current_policy = default_policy; in kbase_pm_policy_init() 77 kbdev->pm.backend.csf_pm_sched_flags = default_policy->pm_sched_flags; in kbase_pm_policy_init() 78 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); in kbase_pm_policy_init() 81 kbdev->pm.backend.pm_current_policy = default_policy; in kbase_pm_policy_init() 85 void kbase_pm_policy_term(struct kbase_device *kbdev) in kbase_pm_policy_term() argument 87 kbdev->pm.backend.pm_current_policy->term(kbdev); in kbase_pm_policy_term() [all …]
|
| H A D | mali_kbase_devfreq.c | 66 static unsigned long get_voltage(struct kbase_device *kbdev, unsigned long freq) in get_voltage() argument 75 opp = dev_pm_opp_find_freq_exact(kbdev->dev, freq, true); in get_voltage() 78 dev_err(kbdev->dev, "Failed to get opp (%d)\n", PTR_ERR_OR_ZERO(opp)); in get_voltage() 94 void kbase_devfreq_opp_translate(struct kbase_device *kbdev, unsigned long freq, in kbase_devfreq_opp_translate() argument 99 for (i = 0; i < kbdev->num_opps; i++) { in kbase_devfreq_opp_translate() 100 if (kbdev->devfreq_table[i].opp_freq == freq) { in kbase_devfreq_opp_translate() 103 *core_mask = kbdev->devfreq_table[i].core_mask; in kbase_devfreq_opp_translate() 104 for (j = 0; j < kbdev->nr_clocks; j++) { in kbase_devfreq_opp_translate() 106 kbdev->devfreq_table[i].real_freqs[j]; in kbase_devfreq_opp_translate() 108 kbdev->devfreq_table[i].opp_volts[j]; in kbase_devfreq_opp_translate() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/device/ |
| H A D | mali_kbase_device.c | 85 static int kbase_device_all_as_init(struct kbase_device *kbdev) in kbase_device_all_as_init() argument 89 for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { in kbase_device_all_as_init() 90 err = kbase_mmu_as_init(kbdev, i); in kbase_device_all_as_init() 97 kbase_mmu_as_term(kbdev, i); in kbase_device_all_as_init() 103 static void kbase_device_all_as_term(struct kbase_device *kbdev) in kbase_device_all_as_term() argument 107 for (i = 0; i < kbdev->nr_hw_address_spaces; i++) in kbase_device_all_as_term() 108 kbase_mmu_as_term(kbdev, i); in kbase_device_all_as_term() 111 int kbase_device_pcm_dev_init(struct kbase_device *const kbdev) in kbase_device_pcm_dev_init() argument 121 prio_ctrl_node = of_parse_phandle(kbdev->dev->of_node, in kbase_device_pcm_dev_init() 124 dev_info(kbdev->dev, in kbase_device_pcm_dev_init() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/backend/gpu/ |
| H A D | mali_kbase_pm_policy.c | 131 static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev, in kbase_timeline_pm_cores_func() argument 142 KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code); in kbase_timeline_pm_cores_func() 146 static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev, in kbase_timeline_pm_cores_func() argument 158 static void kbasep_pm_do_poweroff_cores(struct kbase_device *kbdev) in kbasep_pm_do_poweroff_cores() argument 160 u64 prev_shader_state = kbdev->pm.backend.desired_shader_state; in kbasep_pm_do_poweroff_cores() 161 u64 prev_tiler_state = kbdev->pm.backend.desired_tiler_state; in kbasep_pm_do_poweroff_cores() 163 lockdep_assert_held(&kbdev->hwaccess_lock); in kbasep_pm_do_poweroff_cores() 165 kbdev->pm.backend.desired_shader_state &= in kbasep_pm_do_poweroff_cores() 166 ~kbdev->pm.backend.shader_poweroff_pending; in kbasep_pm_do_poweroff_cores() 167 kbdev->pm.backend.desired_tiler_state &= in kbasep_pm_do_poweroff_cores() [all …]
|
| H A D | mali_kbase_pm_backend.c | 39 void kbase_pm_register_access_enable(struct kbase_device *kbdev) in kbase_pm_register_access_enable() argument 46 callbacks->power_on_callback(kbdev); in kbase_pm_register_access_enable() 48 kbdev->pm.backend.gpu_powered = true; in kbase_pm_register_access_enable() 51 void kbase_pm_register_access_disable(struct kbase_device *kbdev) in kbase_pm_register_access_disable() argument 58 callbacks->power_off_callback(kbdev); in kbase_pm_register_access_disable() 60 kbdev->pm.backend.gpu_powered = false; in kbase_pm_register_access_disable() 63 int kbase_hwaccess_pm_init(struct kbase_device *kbdev) in kbase_hwaccess_pm_init() argument 68 KBASE_DEBUG_ASSERT(kbdev != NULL); in kbase_hwaccess_pm_init() 70 mutex_init(&kbdev->pm.lock); in kbase_hwaccess_pm_init() 72 kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait", in kbase_hwaccess_pm_init() [all …]
|
| H A D | mali_kbase_pm_driver.c | 96 struct kbase_device *kbdev, 140 static void mali_cci_flush_l2(struct kbase_device *kbdev) in mali_cci_flush_l2() argument 154 kbase_reg_write(kbdev, in mali_cci_flush_l2() 159 raw = kbase_reg_read(kbdev, in mali_cci_flush_l2() 166 raw = kbase_reg_read(kbdev, in mali_cci_flush_l2() 185 static void kbase_pm_invoke(struct kbase_device *kbdev, in kbase_pm_invoke() argument 194 lockdep_assert_held(&kbdev->hwaccess_lock); in kbase_pm_invoke() 228 u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY); in kbase_pm_invoke() 242 KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u, in kbase_pm_invoke() 246 KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL, in kbase_pm_invoke() [all …]
|
| H A D | mali_kbase_instr_backend.c | 37 static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev) in kbasep_instr_hwcnt_cacheclean() argument 43 spin_lock_irqsave(&kbdev->hwcnt.lock, flags); in kbasep_instr_hwcnt_cacheclean() 44 KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state == in kbasep_instr_hwcnt_cacheclean() 48 spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags); in kbasep_instr_hwcnt_cacheclean() 49 irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL); in kbasep_instr_hwcnt_cacheclean() 50 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), in kbasep_instr_hwcnt_cacheclean() 52 spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags); in kbasep_instr_hwcnt_cacheclean() 56 KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0); in kbasep_instr_hwcnt_cacheclean() 57 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), in kbasep_instr_hwcnt_cacheclean() 59 kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANING; in kbasep_instr_hwcnt_cacheclean() [all …]
|
| H A D | mali_kbase_jm_hw.c | 39 dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) 42 static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev); 47 static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js, in kbasep_jm_is_js_free() argument 50 return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), kctx); in kbasep_jm_is_js_free() 53 void kbase_job_hw_submit(struct kbase_device *kbdev, in kbase_job_hw_submit() argument 61 KBASE_DEBUG_ASSERT(kbdev); in kbase_job_hw_submit() 67 KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx)); in kbase_job_hw_submit() 69 kbase_js_debug_log_current_affinities(kbdev); in kbase_job_hw_submit() 70 KBASE_DEBUG_ASSERT(!kbase_js_affinity_would_violate(kbdev, js, in kbase_job_hw_submit() 73 kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), in kbase_job_hw_submit() [all …]
|
| H A D | mali_kbase_devfreq.c | 72 static unsigned long opp_translate(struct kbase_device *kbdev, in opp_translate() argument 77 for (i = 0; i < kbdev->num_opps; i++) { in opp_translate() 78 if (kbdev->opp_table[i].opp_freq == freq) { in opp_translate() 79 *core_mask = kbdev->opp_table[i].core_mask; in opp_translate() 80 return kbdev->opp_table[i].real_freq; in opp_translate() 85 *core_mask = kbdev->gpu_props.props.raw_props.shader_present; in opp_translate() 93 struct kbase_device *kbdev = dev_get_drvdata(dev); in kbase_devfreq_target() local 97 unsigned long old_freq = kbdev->current_freq; in kbase_devfreq_target() 116 if (kbdev->current_nominal_freq == nominal_freq) { in kbase_devfreq_target() 119 if (kbdev->current_voltage == voltage) in kbase_devfreq_target() [all …]
|
| H A D | mali_kbase_jm_rb.c | 42 static void kbase_gpu_release_atom(struct kbase_device *kbdev, 53 static void kbase_gpu_enqueue_atom(struct kbase_device *kbdev, in kbase_gpu_enqueue_atom() argument 56 struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[katom->slot_nr]; in kbase_gpu_enqueue_atom() 60 lockdep_assert_held(&kbdev->hwaccess_lock); in kbase_gpu_enqueue_atom() 80 static struct kbase_jd_atom *kbase_gpu_dequeue_atom(struct kbase_device *kbdev, in kbase_gpu_dequeue_atom() argument 84 struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js]; in kbase_gpu_dequeue_atom() 92 lockdep_assert_held(&kbdev->hwaccess_lock); in kbase_gpu_dequeue_atom() 96 kbase_gpu_release_atom(kbdev, katom, end_timestamp); in kbase_gpu_dequeue_atom() 102 kbase_js_debug_log_current_affinities(kbdev); in kbase_gpu_dequeue_atom() 107 struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js, in kbase_gpu_inspect() argument [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/csf/ |
| H A D | mali_kbase_csf_reset_gpu.c | 99 int kbase_reset_gpu_prevent_and_wait(struct kbase_device *kbdev) in kbase_reset_gpu_prevent_and_wait() argument 101 down_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_prevent_and_wait() 103 if (atomic_read(&kbdev->csf.reset.state) == in kbase_reset_gpu_prevent_and_wait() 105 up_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_prevent_and_wait() 109 if (WARN_ON(kbase_reset_gpu_is_active(kbdev))) { in kbase_reset_gpu_prevent_and_wait() 110 up_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_prevent_and_wait() 118 int kbase_reset_gpu_try_prevent(struct kbase_device *kbdev) in kbase_reset_gpu_try_prevent() argument 120 if (!down_read_trylock(&kbdev->csf.reset.sem)) in kbase_reset_gpu_try_prevent() 123 if (atomic_read(&kbdev->csf.reset.state) == in kbase_reset_gpu_try_prevent() 125 up_read(&kbdev->csf.reset.sem); in kbase_reset_gpu_try_prevent() [all …]
|
| H A D | mali_kbase_csf_firmware_no_mali.c | 149 static int invent_memory_setup_entry(struct kbase_device *kbdev) in invent_memory_setup_entry() argument 159 kbdev->csf.shared_interface = interface; in invent_memory_setup_entry() 160 list_add(&interface->node, &kbdev->csf.firmware_interfaces); in invent_memory_setup_entry() 166 static void free_global_iface(struct kbase_device *kbdev) in free_global_iface() argument 168 struct kbase_csf_global_iface *iface = &kbdev->csf.global_iface; in free_global_iface() 181 static int invent_cmd_stream_group_info(struct kbase_device *kbdev, in invent_cmd_stream_group_info() argument 190 ginfo->kbdev = kbdev; in invent_cmd_stream_group_info() 208 stream->kbdev = kbdev; in invent_cmd_stream_group_info() 220 static int invent_capabilities(struct kbase_device *kbdev) in invent_capabilities() argument 222 struct dummy_firmware_interface *interface = kbdev->csf.shared_interface; in invent_capabilities() [all …]
|
| H A D | mali_kbase_csf_firmware.c | 194 static int setup_shared_iface_static_region(struct kbase_device *kbdev) in setup_shared_iface_static_region() argument 197 kbdev->csf.shared_interface; in setup_shared_iface_static_region() 204 reg = kbase_alloc_free_region(kbdev, &kbdev->csf.shared_reg_rbtree, 0, in setup_shared_iface_static_region() 207 mutex_lock(&kbdev->csf.reg_lock); in setup_shared_iface_static_region() 208 ret = kbase_add_va_region_rbtree(kbdev, reg, in setup_shared_iface_static_region() 210 mutex_unlock(&kbdev->csf.reg_lock); in setup_shared_iface_static_region() 220 static int wait_mcu_status_value(struct kbase_device *kbdev, u32 val) in wait_mcu_status_value() argument 226 (kbase_reg_read(kbdev, GPU_CONTROL_REG(MCU_STATUS)) != val)) { in wait_mcu_status_value() 232 void kbase_csf_firmware_disable_mcu(struct kbase_device *kbdev) in kbase_csf_firmware_disable_mcu() argument 234 KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_DISABLING(kbdev, kbase_backend_get_cycle_cnt(kbdev)); in kbase_csf_firmware_disable_mcu() [all …]
|
| H A D | mali_kbase_csf_scheduler.c | 71 struct kbase_device *const kbdev, 75 static void scheduler_enable_tick_timer_nolock(struct kbase_device *kbdev); 76 static int suspend_active_queue_groups(struct kbase_device *kbdev, 78 static int suspend_active_groups_on_powerdown(struct kbase_device *kbdev, 113 struct kbase_device *kbdev = group->kctx->kbdev; in wait_for_dump_complete_on_group_deschedule() local 115 struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; in wait_for_dump_complete_on_group_deschedule() 120 if (likely(!kbase_debug_csf_fault_dump_enabled(kbdev))) in wait_for_dump_complete_on_group_deschedule() 123 while ((!kbase_debug_csf_fault_dump_complete(kbdev) || in wait_for_dump_complete_on_group_deschedule() 129 kbase_debug_csf_fault_wait_completion(kbdev); in wait_for_dump_complete_on_group_deschedule() 157 static void schedule_actions_trigger_df(struct kbase_device *kbdev, in schedule_actions_trigger_df() argument [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/ |
| H A D | mali_kbase_pm.c | 41 int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags) in kbase_pm_powerup() argument 43 return kbase_hwaccess_pm_powerup(kbdev, flags); in kbase_pm_powerup() 46 void kbase_pm_halt(struct kbase_device *kbdev) in kbase_pm_halt() argument 48 kbase_hwaccess_pm_halt(kbdev); in kbase_pm_halt() 51 void kbase_pm_context_active(struct kbase_device *kbdev) in kbase_pm_context_active() argument 53 (void)kbase_pm_context_active_handle_suspend(kbdev, in kbase_pm_context_active() 57 int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, in kbase_pm_context_active_handle_suspend() argument 62 KBASE_DEBUG_ASSERT(kbdev != NULL); in kbase_pm_context_active_handle_suspend() 63 dev_dbg(kbdev->dev, "%s - reason = %d, pid = %d\n", __func__, in kbase_pm_context_active_handle_suspend() 65 kbase_pm_lock(kbdev); in kbase_pm_context_active_handle_suspend() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/device/backend/ |
| H A D | mali_kbase_device_csf.c | 54 static void kbase_device_firmware_hwcnt_term(struct kbase_device *kbdev) in kbase_device_firmware_hwcnt_term() argument 56 if (kbdev->csf.firmware_inited) { in kbase_device_firmware_hwcnt_term() 57 kbase_kinstr_prfcnt_term(kbdev->kinstr_prfcnt_ctx); in kbase_device_firmware_hwcnt_term() 58 kbase_vinstr_term(kbdev->vinstr_ctx); in kbase_device_firmware_hwcnt_term() 59 kbase_hwcnt_virtualizer_term(kbdev->hwcnt_gpu_virt); in kbase_device_firmware_hwcnt_term() 60 kbase_hwcnt_backend_csf_metadata_term(&kbdev->hwcnt_gpu_iface); in kbase_device_firmware_hwcnt_term() 61 kbase_csf_firmware_unload_term(kbdev); in kbase_device_firmware_hwcnt_term() 71 static int kbase_backend_late_init(struct kbase_device *kbdev) in kbase_backend_late_init() argument 75 err = kbase_hwaccess_pm_init(kbdev); in kbase_backend_late_init() 79 err = kbase_reset_gpu_init(kbdev); in kbase_backend_late_init() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/platform/devicetree/ |
| H A D | mali_kbase_runtime_pm.c | 32 static void enable_gpu_power_control(struct kbase_device *kbdev) in enable_gpu_power_control() argument 37 for (i = 0; i < kbdev->nr_regulators; i++) { in enable_gpu_power_control() 38 if (WARN_ON(kbdev->regulators[i] == NULL)) in enable_gpu_power_control() 40 else if (!regulator_is_enabled(kbdev->regulators[i])) in enable_gpu_power_control() 41 WARN_ON(regulator_enable(kbdev->regulators[i])); in enable_gpu_power_control() 45 for (i = 0; i < kbdev->nr_clocks; i++) { in enable_gpu_power_control() 46 if (WARN_ON(kbdev->clocks[i] == NULL)) in enable_gpu_power_control() 48 else if (!__clk_is_enabled(kbdev->clocks[i])) in enable_gpu_power_control() 49 WARN_ON(clk_prepare_enable(kbdev->clocks[i])); in enable_gpu_power_control() 53 static void disable_gpu_power_control(struct kbase_device *kbdev) in disable_gpu_power_control() argument [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/tl/ |
| H A D | mali_kbase_tracepoints.h | 58 #define __TL_DISPATCH_STREAM(kbdev, stype) \ argument 60 ((u8 *)kbdev->timeline + __ ## stype ## _stream_offset)) 875 kbdev, \ argument 881 int enabled = atomic_read(&kbdev->timeline_flags); \ 884 __TL_DISPATCH_STREAM(kbdev, obj), \ 900 kbdev, \ argument 906 int enabled = atomic_read(&kbdev->timeline_flags); \ 909 __TL_DISPATCH_STREAM(kbdev, obj), \ 925 kbdev, \ argument 931 int enabled = atomic_read(&kbdev->timeline_flags); \ [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/arbiter/ |
| H A D | mali_kbase_arbiter_pm.c | 49 static void kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device *kbdev); 51 struct kbase_device *kbdev); 134 static void kbase_arbiter_pm_vm_set_state(struct kbase_device *kbdev, in kbase_arbiter_pm_vm_set_state() argument 137 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state; in kbase_arbiter_pm_vm_set_state() 139 dev_dbg(kbdev->dev, "VM set_state %s -> %s", in kbase_arbiter_pm_vm_set_state() 147 KBASE_KTRACE_ADD(kbdev, ARB_VM_STATE, NULL, new_state); in kbase_arbiter_pm_vm_set_state() 163 struct kbase_device *kbdev = arb_vm_state->kbdev; in kbase_arbiter_pm_suspend_wq() local 166 dev_dbg(kbdev->dev, ">%s\n", __func__); in kbase_arbiter_pm_suspend_wq() 173 dev_dbg(kbdev->dev, ">kbase_pm_driver_suspend\n"); in kbase_arbiter_pm_suspend_wq() 174 kbase_pm_driver_suspend(kbdev); in kbase_arbiter_pm_suspend_wq() [all …]
|