| /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/ |
| H A D | mali_kbase_context.c | 42 struct kbase_context *kctx; in kbase_create_context() local 48 kctx = vzalloc(sizeof(*kctx)); in kbase_create_context() 50 if (!kctx) in kbase_create_context() 56 kctx->kbdev = kbdev; in kbase_create_context() 57 kctx->as_nr = KBASEP_AS_NR_INVALID; in kbase_create_context() 58 atomic_set(&kctx->refcount, 0); in kbase_create_context() 60 kbase_ctx_flag_set(kctx, KCTX_COMPAT); in kbase_create_context() 62 kctx->timeline.owner_tgid = task_tgid_nr(current); in kbase_create_context() 64 atomic_set(&kctx->setup_complete, 0); in kbase_create_context() 65 atomic_set(&kctx->setup_in_progress, 0); in kbase_create_context() [all …]
|
| H A D | mali_kbase_js.c | 71 struct kbase_device *kbdev, struct kbase_context *kctx, 77 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx, 83 struct kbase_context *kctx) in kbasep_js_trace_get_refcnt() argument 85 return atomic_read(&kctx->refcount); in kbasep_js_trace_get_refcnt() 89 struct kbase_context *kctx) in kbasep_js_trace_get_refcnt() argument 92 CSTD_UNUSED(kctx); in kbasep_js_trace_get_refcnt() 140 struct kbase_context *kctx) in kbasep_js_runpool_retain_ctx_nolock() argument 147 KBASE_DEBUG_ASSERT(kctx != NULL); in kbasep_js_runpool_retain_ctx_nolock() 152 as_nr = kctx->as_nr; in kbasep_js_runpool_retain_ctx_nolock() 153 if (atomic_read(&kctx->refcount) > 0) { in kbasep_js_runpool_retain_ctx_nolock() [all …]
|
| H A D | mali_kbase_mmu.c | 37 #define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) argument 66 static void kbase_mmu_flush_invalidate(struct kbase_context *kctx, 98 static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, 120 struct kbase_context *kctx; in page_fault_worker() local 135 kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no); in page_fault_worker() 136 if (WARN_ON(!kctx)) { in page_fault_worker() 141 KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev); in page_fault_worker() 145 kbase_mmu_report_fault_and_kill(kctx, faulting_as, in page_fault_worker() 147 kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, in page_fault_worker() 161 kbase_mmu_report_fault_and_kill(kctx, faulting_as, in page_fault_worker() [all …]
|
| H A D | mali_kbase_mem.c | 46 static struct rb_root *kbase_reg_flags_to_rbtree(struct kbase_context *kctx, in kbase_reg_flags_to_rbtree() argument 53 rbtree = &kctx->reg_rbtree_custom; in kbase_reg_flags_to_rbtree() 56 rbtree = &kctx->reg_rbtree_exec; in kbase_reg_flags_to_rbtree() 59 rbtree = &kctx->reg_rbtree_same; in kbase_reg_flags_to_rbtree() 62 rbtree = &kctx->reg_rbtree_same; in kbase_reg_flags_to_rbtree() 71 static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx, in kbase_gpu_va_to_rbtree() argument 77 if (kbase_ctx_flag(kctx, KCTX_COMPAT)) { in kbase_gpu_va_to_rbtree() 80 rbtree = &kctx->reg_rbtree_custom; in kbase_gpu_va_to_rbtree() 82 rbtree = &kctx->reg_rbtree_exec; in kbase_gpu_va_to_rbtree() 84 rbtree = &kctx->reg_rbtree_same; in kbase_gpu_va_to_rbtree() [all …]
|
| H A D | mali_kbase_ctx_sched.c | 68 static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx) in kbasep_ctx_sched_find_as_for_ctx() argument 70 struct kbase_device *const kbdev = kctx->kbdev; in kbasep_ctx_sched_find_as_for_ctx() 76 if ((kctx->as_nr != KBASEP_AS_NR_INVALID) && in kbasep_ctx_sched_find_as_for_ctx() 77 (kbdev->as_free & (1u << kctx->as_nr))) in kbasep_ctx_sched_find_as_for_ctx() 78 return kctx->as_nr; in kbasep_ctx_sched_find_as_for_ctx() 90 int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx) in kbase_ctx_sched_retain_ctx() argument 92 struct kbase_device *const kbdev = kctx->kbdev; in kbase_ctx_sched_retain_ctx() 99 if (atomic_inc_return(&kctx->refcount) == 1) { in kbase_ctx_sched_retain_ctx() 100 int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx); in kbase_ctx_sched_retain_ctx() 107 if (free_as != kctx->as_nr) { in kbase_ctx_sched_retain_ctx() [all …]
|
| H A D | mali_kbase_replay.c | 67 static void dump_job_head(struct kbase_context *kctx, char *head_str, in dump_job_head() argument 71 dev_dbg(kctx->kbdev->dev, "%s\n", head_str); in dump_job_head() 72 dev_dbg(kctx->kbdev->dev, in dump_job_head() 100 dev_dbg(kctx->kbdev->dev, "next = %llx\n", in dump_job_head() 103 dev_dbg(kctx->kbdev->dev, "next = %x\n", in dump_job_head() 108 static int kbasep_replay_reset_sfbd(struct kbase_context *kctx, in kbasep_replay_reset_sfbd() argument 122 dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address); in kbasep_replay_reset_sfbd() 124 fbd_tiler = kbase_vmap(kctx, fbd_address + SFBD_TILER_OFFSET, in kbasep_replay_reset_sfbd() 127 dev_err(kctx->kbdev->dev, "kbasep_replay_reset_fbd: failed to map fbd\n"); in kbasep_replay_reset_sfbd() 132 dev_dbg(kctx->kbdev->dev, in kbasep_replay_reset_sfbd() [all …]
|
| H A D | mali_kbase_mem_profile_debugfs.c | 34 struct kbase_context *kctx = sfile->private; in kbasep_mem_profile_seq_show() local 36 mutex_lock(&kctx->mem_profile_lock); in kbasep_mem_profile_seq_show() 38 seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size); in kbasep_mem_profile_seq_show() 42 mutex_unlock(&kctx->mem_profile_lock); in kbasep_mem_profile_seq_show() 62 int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data, in kbasep_mem_profile_debugfs_insert() argument 67 mutex_lock(&kctx->mem_profile_lock); in kbasep_mem_profile_debugfs_insert() 69 dev_dbg(kctx->kbdev->dev, "initialised: %d", in kbasep_mem_profile_debugfs_insert() 70 kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)); in kbasep_mem_profile_debugfs_insert() 72 if (!kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) { in kbasep_mem_profile_debugfs_insert() 74 kctx->kctx_dentry, kctx, in kbasep_mem_profile_debugfs_insert() [all …]
|
| H A D | mali_kbase_event.c | 24 static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *k… in kbase_event_process() argument 28 lockdep_assert_held(&kctx->jctx.lock); in kbase_event_process() 30 KBASE_DEBUG_ASSERT(kctx != NULL); in kbase_event_process() 36 KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight)); in kbase_event_process() 38 KBASE_TLSTREAM_TL_NRET_ATOM_CTX(katom, kctx); in kbase_event_process() 115 struct kbase_context *kctx = katom->kctx; in kbase_event_process_noreport_worker() local 120 mutex_lock(&kctx->jctx.lock); in kbase_event_process_noreport_worker() 121 kbase_event_process(kctx, katom); in kbase_event_process_noreport_worker() 122 mutex_unlock(&kctx->jctx.lock); in kbase_event_process_noreport_worker() 134 static void kbase_event_process_noreport(struct kbase_context *kctx, in kbase_event_process_noreport() argument [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/context/ |
| H A D | mali_kbase_context.c | 85 static int kbase_insert_kctx_to_process(struct kbase_context *kctx) in kbase_insert_kctx_to_process() argument 87 struct rb_root *const prcs_root = &kctx->kbdev->process_root; in kbase_insert_kctx_to_process() 88 const pid_t tgid = kctx->tgid; in kbase_insert_kctx_to_process() 91 lockdep_assert_held(&kctx->kbdev->kctx_list_lock); in kbase_insert_kctx_to_process() 124 kctx->kprcs = kprcs; in kbase_insert_kctx_to_process() 125 list_add(&kctx->kprcs_link, &kprcs->kctx_list); in kbase_insert_kctx_to_process() 130 int kbase_context_common_init(struct kbase_context *kctx) in kbase_context_common_init() argument 136 kbase_disjoint_event(kctx->kbdev); in kbase_context_common_init() 138 kctx->process_mm = NULL; in kbase_context_common_init() 139 kctx->task = NULL; in kbase_context_common_init() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/ |
| H A D | mali_kbase_ctx_sched.c | 34 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument 36 return atomic_read(&kctx->refcount); in kbase_ktrace_get_ctx_refcnt() 39 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument 41 CSTD_UNUSED(kctx); in kbase_ktrace_get_ctx_refcnt() 72 void kbase_ctx_sched_init_ctx(struct kbase_context *kctx) in kbase_ctx_sched_init_ctx() argument 74 kctx->as_nr = KBASEP_AS_NR_INVALID; in kbase_ctx_sched_init_ctx() 75 atomic_set(&kctx->refcount, 0); in kbase_ctx_sched_init_ctx() 88 static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx) in kbasep_ctx_sched_find_as_for_ctx() argument 90 struct kbase_device *const kbdev = kctx->kbdev; in kbasep_ctx_sched_find_as_for_ctx() 96 if ((kctx->as_nr != KBASEP_AS_NR_INVALID) && in kbasep_ctx_sched_find_as_for_ctx() [all …]
|
| H A D | mali_kbase_js.c | 78 struct kbase_device *kbdev, struct kbase_context *kctx, 83 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx, 88 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument 90 return atomic_read(&kctx->refcount); in kbase_ktrace_get_ctx_refcnt() 93 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument 95 CSTD_UNUSED(kctx); in kbase_ktrace_get_ctx_refcnt() 154 static inline bool jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, unsigned int js, int prio) in jsctx_rb_none_to_pull_prio() argument 157 struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js]; in jsctx_rb_none_to_pull_prio() 159 lockdep_assert_held(&kctx->kbdev->hwaccess_lock); in jsctx_rb_none_to_pull_prio() 163 dev_dbg(kctx->kbdev->dev, "Slot %u (prio %d) is %spullable in kctx %pK\n", js, prio, in jsctx_rb_none_to_pull_prio() [all …]
|
| H A D | mali_kbase_mem.c | 77 static void free_partial_locked(struct kbase_context *kctx, 80 static size_t kbase_get_num_cpu_va_bits(struct kbase_context *kctx) in kbase_get_num_cpu_va_bits() argument 98 if (kbase_ctx_compat_mode(kctx)) in kbase_get_num_cpu_va_bits() 107 static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx, in kbase_gpu_va_to_rbtree() argument 112 struct kbase_reg_zone *exec_va_zone = kbase_ctx_reg_zone_get(kctx, KBASE_REG_ZONE_EXEC_VA); in kbase_gpu_va_to_rbtree() 116 kbase_ctx_reg_zone_get(kctx, KBASE_REG_ZONE_FIXED_VA); in kbase_gpu_va_to_rbtree() 119 kbase_ctx_reg_zone_get(kctx, KBASE_REG_ZONE_EXEC_FIXED_VA); in kbase_gpu_va_to_rbtree() 122 rbtree = &kctx->reg_rbtree_fixed; in kbase_gpu_va_to_rbtree() 125 rbtree = &kctx->reg_rbtree_exec_fixed; in kbase_gpu_va_to_rbtree() 130 rbtree = &kctx->reg_rbtree_exec; in kbase_gpu_va_to_rbtree() [all …]
|
| H A D | mali_kbase_gwt.c | 26 struct kbase_context *kctx, in kbase_gpu_gwt_setup_page_permission() argument 39 err = kbase_mmu_update_pages(kctx, reg->start_pfn, in kbase_gpu_gwt_setup_page_permission() 45 dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages failure\n"); in kbase_gpu_gwt_setup_page_permission() 52 static void kbase_gpu_gwt_setup_pages(struct kbase_context *kctx, in kbase_gpu_gwt_setup_pages() argument 55 kbase_gpu_gwt_setup_page_permission(kctx, flag, in kbase_gpu_gwt_setup_pages() 56 rb_first(&(kctx->reg_rbtree_same))); in kbase_gpu_gwt_setup_pages() 57 kbase_gpu_gwt_setup_page_permission(kctx, flag, in kbase_gpu_gwt_setup_pages() 58 rb_first(&(kctx->reg_rbtree_custom))); in kbase_gpu_gwt_setup_pages() 62 int kbase_gpu_gwt_start(struct kbase_context *kctx) in kbase_gpu_gwt_start() argument 64 kbase_gpu_vm_lock(kctx); in kbase_gpu_gwt_start() [all …]
|
| H A D | mali_kbase_softjobs.c | 51 struct kbase_context *kctx = katom->kctx; in kbasep_add_waiting_soft_job() local 54 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags); in kbasep_add_waiting_soft_job() 55 list_add_tail(&katom->queue, &kctx->waiting_soft_jobs); in kbasep_add_waiting_soft_job() 56 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags); in kbasep_add_waiting_soft_job() 61 struct kbase_context *kctx = katom->kctx; in kbasep_remove_waiting_soft_job() local 64 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags); in kbasep_remove_waiting_soft_job() 66 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags); in kbasep_remove_waiting_soft_job() 71 struct kbase_context *kctx = katom->kctx; in kbasep_add_waiting_with_timeout() local 84 if (!timer_pending(&kctx->soft_job_timeout)) { in kbasep_add_waiting_with_timeout() 86 &kctx->kbdev->js_data.soft_job_timeout_ms); in kbasep_add_waiting_with_timeout() [all …]
|
| H A D | mali_kbase_mem_linux.c | 88 static int kbase_csf_cpu_mmap_user_reg_page(struct kbase_context *kctx, struct vm_area_struct *vma); 89 static int kbase_csf_cpu_mmap_user_io_pages(struct kbase_context *kctx, struct vm_area_struct *vma); 92 static int kbase_vmap_phy_pages(struct kbase_context *kctx, struct kbase_va_region *reg, 95 static void kbase_vunmap_phy_pages(struct kbase_context *kctx, 98 static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma); 122 struct kbase_context *kctx, u64 gpu_addr) in kbase_find_event_mem_region() argument 128 lockdep_assert_held(&kctx->reg_lock); in kbase_find_event_mem_region() 130 list_for_each_entry(reg, &kctx->csf.event_pages_head, link) { in kbase_find_event_mem_region() 182 static int kbase_phy_alloc_mapping_init(struct kbase_context *kctx, in kbase_phy_alloc_mapping_init() argument 201 err = kbase_vmap_phy_pages(kctx, reg, 0u, size_bytes, kern_mapping, in kbase_phy_alloc_mapping_init() [all …]
|
| H A D | mali_kbase_debug_job_fault.c | 41 static void kbase_ctx_remove_pending_event(struct kbase_context *kctx) in kbase_ctx_remove_pending_event() argument 43 struct list_head *event_list = &kctx->kbdev->job_fault_event_list; in kbase_ctx_remove_pending_event() 47 spin_lock_irqsave(&kctx->kbdev->job_fault_event_lock, flags); in kbase_ctx_remove_pending_event() 49 if (event->katom->kctx == kctx) { in kbase_ctx_remove_pending_event() 51 spin_unlock_irqrestore(&kctx->kbdev->job_fault_event_lock, flags); in kbase_ctx_remove_pending_event() 53 wake_up(&kctx->kbdev->job_fault_resume_wq); in kbase_ctx_remove_pending_event() 62 spin_unlock_irqrestore(&kctx->kbdev->job_fault_event_lock, flags); in kbase_ctx_remove_pending_event() 65 static bool kbase_ctx_has_no_event_pending(struct kbase_context *kctx) in kbase_ctx_has_no_event_pending() argument 67 struct kbase_device *kbdev = kctx->kbdev; in kbase_ctx_has_no_event_pending() 68 struct list_head *event_list = &kctx->kbdev->job_fault_event_list; in kbase_ctx_has_no_event_pending() [all …]
|
| H A D | mali_kbase_jd.c | 65 get_compat_pointer(struct kbase_context *kctx, const u64 p) in get_compat_pointer() argument 68 if (kbase_ctx_flag(kctx, KCTX_COMPAT)) in get_compat_pointer() 79 dev_dbg(katom->kctx->kbdev->dev, "Atom %pK status to completed\n", in jd_mark_atom_complete() 81 KBASE_TLSTREAM_TL_JD_ATOM_COMPLETE(katom->kctx->kbdev, katom); in jd_mark_atom_complete() 93 struct kbase_context *kctx = katom->kctx; in jd_run_atom() local 95 dev_dbg(kctx->kbdev->dev, "JD run atom %pK in kctx %pK\n", in jd_run_atom() 96 (void *)katom, (void *)kctx); in jd_run_atom() 102 trace_sysgraph(SGR_SUBMIT, kctx->id, in jd_run_atom() 103 kbase_jd_atom_id(katom->kctx, katom)); in jd_run_atom() 121 dev_dbg(kctx->kbdev->dev, "Atom %pK status to in JS\n", (void *)katom); in jd_run_atom() [all …]
|
| H A D | mali_kbase_mem_profile_debugfs.c | 40 struct kbase_context *kctx = sfile->private; in kbasep_mem_profile_seq_show() local 42 mutex_lock(&kctx->mem_profile_lock); in kbasep_mem_profile_seq_show() 44 seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size); in kbasep_mem_profile_seq_show() 48 mutex_unlock(&kctx->mem_profile_lock); in kbasep_mem_profile_seq_show() 69 int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data, in kbasep_mem_profile_debugfs_insert() argument 75 mutex_lock(&kctx->mem_profile_lock); in kbasep_mem_profile_debugfs_insert() 77 dev_dbg(kctx->kbdev->dev, "initialised: %d", in kbasep_mem_profile_debugfs_insert() 78 kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)); in kbasep_mem_profile_debugfs_insert() 80 if (!kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) { in kbasep_mem_profile_debugfs_insert() 81 if (IS_ERR_OR_NULL(kctx->kctx_dentry)) { in kbasep_mem_profile_debugfs_insert() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/csf/ |
| H A D | mali_kbase_csf_event.c | 38 struct kbase_context *kctx; member 43 int kbase_csf_event_wait_add(struct kbase_context *kctx, in kbase_csf_event_wait_add() argument 53 event_cb->kctx = kctx; in kbase_csf_event_wait_add() 57 spin_lock_irqsave(&kctx->csf.event.lock, flags); in kbase_csf_event_wait_add() 58 list_add_tail(&event_cb->link, &kctx->csf.event.callback_list); in kbase_csf_event_wait_add() 59 dev_dbg(kctx->kbdev->dev, in kbase_csf_event_wait_add() 62 spin_unlock_irqrestore(&kctx->csf.event.lock, flags); in kbase_csf_event_wait_add() 70 void kbase_csf_event_wait_remove(struct kbase_context *kctx, in kbase_csf_event_wait_remove() argument 76 spin_lock_irqsave(&kctx->csf.event.lock, flags); in kbase_csf_event_wait_remove() 78 list_for_each_entry(event_cb, &kctx->csf.event.callback_list, link) { in kbase_csf_event_wait_remove() [all …]
|
| H A D | mali_kbase_csf_tiler_heap.c | 109 static void remove_external_chunk_mappings(struct kbase_context *const kctx, in remove_external_chunk_mappings() argument 112 lockdep_assert_held(&kctx->reg_lock); in remove_external_chunk_mappings() 115 kbase_mem_shrink_cpu_mapping(kctx, chunk->region, 0, in remove_external_chunk_mappings() 122 dev_dbg(kctx->kbdev->dev, "Removed external mappings from chunk 0x%llX", chunk->gpu_va); in remove_external_chunk_mappings() 144 struct kbase_context *const kctx = heap->kctx; in link_chunk() local 152 dev_dbg(kctx->kbdev->dev, in link_chunk() 179 struct kbase_context *const kctx = heap->kctx; in init_chunk() local 181 lockdep_assert_held(&kctx->csf.tiler_heaps.lock); in init_chunk() 184 dev_err(kctx->kbdev->dev, in init_chunk() 204 dev_err(kctx->kbdev->dev, "Failed to link a chunk to a tiler heap\n"); in init_chunk() [all …]
|
| H A D | mali_kbase_csf.c | 83 static void kbasep_ctx_user_reg_page_mapping_term(struct kbase_context *kctx) in kbasep_ctx_user_reg_page_mapping_term() argument 85 struct kbase_device *kbdev = kctx->kbdev; in kbasep_ctx_user_reg_page_mapping_term() 87 if (unlikely(kctx->csf.user_reg.vma)) in kbasep_ctx_user_reg_page_mapping_term() 89 kctx->tgid, kctx->id); in kbasep_ctx_user_reg_page_mapping_term() 90 if (WARN_ON_ONCE(!list_empty(&kctx->csf.user_reg.link))) in kbasep_ctx_user_reg_page_mapping_term() 91 list_del_init(&kctx->csf.user_reg.link); in kbasep_ctx_user_reg_page_mapping_term() 101 static int kbasep_ctx_user_reg_page_mapping_init(struct kbase_context *kctx) in kbasep_ctx_user_reg_page_mapping_init() argument 103 INIT_LIST_HEAD(&kctx->csf.user_reg.link); in kbasep_ctx_user_reg_page_mapping_init() 104 kctx->csf.user_reg.vma = NULL; in kbasep_ctx_user_reg_page_mapping_init() 105 kctx->csf.user_reg.file_offset = 0; in kbasep_ctx_user_reg_page_mapping_init() [all …]
|
| H A D | mali_kbase_csf_cpu_queue_debugfs.c | 28 bool kbase_csf_cpu_queue_read_dump_req(struct kbase_context *kctx, in kbase_csf_cpu_queue_read_dump_req() argument 31 if (atomic_cmpxchg(&kctx->csf.cpu_queue.dump_req_status, in kbase_csf_cpu_queue_read_dump_req() 52 struct kbase_context *kctx = file->private; in kbasep_csf_cpu_queue_debugfs_show() local 54 mutex_lock(&kctx->csf.lock); in kbasep_csf_cpu_queue_debugfs_show() 55 if (atomic_read(&kctx->csf.cpu_queue.dump_req_status) != in kbasep_csf_cpu_queue_debugfs_show() 58 mutex_unlock(&kctx->csf.lock); in kbasep_csf_cpu_queue_debugfs_show() 62 atomic_set(&kctx->csf.cpu_queue.dump_req_status, BASE_CSF_CPU_QUEUE_DUMP_ISSUED); in kbasep_csf_cpu_queue_debugfs_show() 63 init_completion(&kctx->csf.cpu_queue.dump_cmp); in kbasep_csf_cpu_queue_debugfs_show() 64 kbase_event_wakeup(kctx); in kbasep_csf_cpu_queue_debugfs_show() 65 mutex_unlock(&kctx->csf.lock); in kbasep_csf_cpu_queue_debugfs_show() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/context/backend/ |
| H A D | mali_kbase_context_jm.c | 42 void kbase_context_debugfs_init(struct kbase_context *const kctx) in kbase_context_debugfs_init() argument 44 kbase_debug_mem_view_init(kctx); in kbase_context_debugfs_init() 45 kbase_debug_mem_zones_init(kctx); in kbase_context_debugfs_init() 46 kbase_debug_mem_allocs_init(kctx); in kbase_context_debugfs_init() 47 kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx); in kbase_context_debugfs_init() 48 kbase_jit_debugfs_init(kctx); in kbase_context_debugfs_init() 49 kbasep_jd_debugfs_ctx_init(kctx); in kbase_context_debugfs_init() 53 void kbase_context_debugfs_term(struct kbase_context *const kctx) in kbase_context_debugfs_term() argument 55 debugfs_remove_recursive(kctx->kctx_dentry); in kbase_context_debugfs_term() 59 void kbase_context_debugfs_init(struct kbase_context *const kctx) in kbase_context_debugfs_init() argument [all …]
|
| H A D | mali_kbase_context_csf.c | 45 void kbase_context_debugfs_init(struct kbase_context *const kctx) in kbase_context_debugfs_init() argument 47 kbase_debug_mem_view_init(kctx); in kbase_context_debugfs_init() 48 kbase_debug_mem_zones_init(kctx); in kbase_context_debugfs_init() 49 kbase_debug_mem_allocs_init(kctx); in kbase_context_debugfs_init() 50 kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx); in kbase_context_debugfs_init() 51 kbase_jit_debugfs_init(kctx); in kbase_context_debugfs_init() 52 kbase_csf_queue_group_debugfs_init(kctx); in kbase_context_debugfs_init() 53 kbase_csf_kcpu_debugfs_init(kctx); in kbase_context_debugfs_init() 54 kbase_csf_sync_debugfs_init(kctx); in kbase_context_debugfs_init() 55 kbase_csf_tiler_heap_debugfs_init(kctx); in kbase_context_debugfs_init() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/jm/ |
| H A D | mali_kbase_jm_js.h | 96 int kbasep_js_kctx_init(struct kbase_context *const kctx); 115 void kbasep_js_kctx_term(struct kbase_context *kctx); 135 static inline void kbase_jsctx_slot_prio_blocked_set(struct kbase_context *kctx, unsigned int js, in kbase_jsctx_slot_prio_blocked_set() argument 139 &kctx->slot_tracking[js]; in kbase_jsctx_slot_prio_blocked_set() 141 lockdep_assert_held(&kctx->kbdev->hwaccess_lock); in kbase_jsctx_slot_prio_blocked_set() 147 KBASE_KTRACE_ADD_JM_SLOT_INFO(kctx->kbdev, JS_SLOT_PRIO_BLOCKED, kctx, in kbase_jsctx_slot_prio_blocked_set() 158 static inline int kbase_jsctx_atoms_pulled(struct kbase_context *kctx) in kbase_jsctx_atoms_pulled() argument 160 return atomic_read(&kctx->atoms_pulled_all_slots); in kbase_jsctx_atoms_pulled() 207 bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom); 243 struct kbase_context *kctx, struct kbase_jd_atom *atom); [all …]
|