| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/ |
| H A D | mali_kbase_ctx_sched.c | 74 kctx->as_nr = KBASEP_AS_NR_INVALID; in kbase_ctx_sched_init_ctx() 96 if ((kctx->as_nr != KBASEP_AS_NR_INVALID) && in kbasep_ctx_sched_find_as_for_ctx() 97 (kbdev->as_free & (1u << kctx->as_nr))) in kbasep_ctx_sched_find_as_for_ctx() 98 return kctx->as_nr; in kbasep_ctx_sched_find_as_for_ctx() 127 if (free_as != kctx->as_nr) { in kbase_ctx_sched_retain_ctx() 136 prev_kctx->as_nr = KBASEP_AS_NR_INVALID; in kbase_ctx_sched_retain_ctx() 138 kctx->as_nr = free_as; in kbase_ctx_sched_retain_ctx() 143 kctx->as_nr); in kbase_ctx_sched_retain_ctx() 151 WARN_ON(kctx->as_nr != KBASEP_AS_NR_INVALID); in kbase_ctx_sched_retain_ctx() 155 return kctx->as_nr; in kbase_ctx_sched_retain_ctx() [all …]
|
| H A D | mali_kbase_ctx_sched.h | 161 struct kbase_device *kbdev, size_t as_nr); 173 size_t as_nr); 188 struct kbase_device *kbdev, size_t as_nr);
|
| H A D | mali_kbase_js.c | 1797 kctx_as_nr = kctx->as_nr; in kbasep_js_runpool_release_ctx_internal() 1809 KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr); in kbasep_js_runpool_release_ctx_internal() 1857 KBASE_TLSTREAM_TL_NRET_AS_CTX(kbdev, &kbdev->as[kctx->as_nr], kctx); in kbasep_js_runpool_release_ctx_internal() 2069 int as_nr; local 2079 as_nr = kbase_ctx_sched_retain_ctx(kctx); 2082 if (as_nr == KBASEP_AS_NR_INVALID) { 2083 as_nr = kbase_backend_find_and_release_free_address_space( 2085 if (as_nr != KBASEP_AS_NR_INVALID) { 2091 as_nr = kbase_ctx_sched_retain_ctx(kctx); 2095 WARN_ON(as_nr == KBASEP_AS_NR_INVALID); [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/ |
| H A D | mali_kbase_ctx_sched.c | 76 if ((kctx->as_nr != KBASEP_AS_NR_INVALID) && in kbasep_ctx_sched_find_as_for_ctx() 77 (kbdev->as_free & (1u << kctx->as_nr))) in kbasep_ctx_sched_find_as_for_ctx() 78 return kctx->as_nr; in kbasep_ctx_sched_find_as_for_ctx() 107 if (free_as != kctx->as_nr) { in kbase_ctx_sched_retain_ctx() 114 prev_kctx->as_nr = KBASEP_AS_NR_INVALID; in kbase_ctx_sched_retain_ctx() 117 kctx->as_nr = free_as; in kbase_ctx_sched_retain_ctx() 127 WARN_ON(kctx->as_nr != KBASEP_AS_NR_INVALID); in kbase_ctx_sched_retain_ctx() 131 return kctx->as_nr; in kbase_ctx_sched_retain_ctx() 143 WARN_ON(kctx->as_nr == KBASEP_AS_NR_INVALID); in kbase_ctx_sched_retain_ctx_refcount() 144 WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx); in kbase_ctx_sched_retain_ctx_refcount() [all …]
|
| H A D | mali_kbase_js.h | 270 struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr); 613 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); in kbasep_js_is_submit_allowed() 616 test_bit = (u16) (1u << kctx->as_nr); in kbasep_js_is_submit_allowed() 634 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); in kbasep_js_set_submit_allowed() 637 set_bit = (u16) (1u << kctx->as_nr); in kbasep_js_set_submit_allowed() 639 dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr); in kbasep_js_set_submit_allowed() 658 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); in kbasep_js_clear_submit_allowed() 661 clear_bit = (u16) (1u << kctx->as_nr); in kbasep_js_clear_submit_allowed() 664 dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr); in kbasep_js_clear_submit_allowed() 779 … struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr) in kbasep_js_runpool_lookup_ctx_noretain() argument [all …]
|
| H A D | mali_kbase_mmu_mode_aarch64.c | 96 struct kbase_as * const as = &kbdev->as[kctx->as_nr]; in mmu_update() 105 static void mmu_disable_as(struct kbase_device *kbdev, int as_nr) in mmu_disable_as() argument 107 struct kbase_as * const as = &kbdev->as[as_nr]; in mmu_disable_as()
|
| H A D | mali_kbase_js.c | 144 int as_nr; in kbasep_js_runpool_retain_ctx_nolock() local 152 as_nr = kctx->as_nr; in kbasep_js_runpool_retain_ctx_nolock() 154 KBASE_DEBUG_ASSERT(as_nr >= 0); in kbasep_js_runpool_retain_ctx_nolock() 1245 int as_nr) in kbasep_js_runpool_lookup_ctx() argument 1253 KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS); in kbasep_js_runpool_lookup_ctx() 1258 found_kctx = kbdev->as_to_kctx[as_nr]; in kbasep_js_runpool_lookup_ctx() 1373 kctx_as_nr = kctx->as_nr; in kbasep_js_runpool_release_ctx_internal() 1386 KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr); in kbasep_js_runpool_release_ctx_internal() 1430 kbase_trace_mali_mmu_as_released(kctx->as_nr); in kbasep_js_runpool_release_ctx_internal() 1432 KBASE_TLSTREAM_TL_NRET_AS_CTX(&kbdev->as[kctx->as_nr], kctx); in kbasep_js_runpool_release_ctx_internal() [all …]
|
| H A D | mali_kbase_mmu_mode_lpae.c | 100 struct kbase_as * const as = &kbdev->as[kctx->as_nr]; in mmu_update() 109 static void mmu_disable_as(struct kbase_device *kbdev, int as_nr) in mmu_disable_as() argument 111 struct kbase_as * const as = &kbdev->as[as_nr]; in mmu_disable_as()
|
| H A D | mali_kbase_mmu.c | 881 &kbdev->as[kctx->as_nr], in kbase_mmu_flush_invalidate_noretain() 929 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); in kbase_mmu_flush_invalidate() 945 &kbdev->as[kctx->as_nr], in kbase_mmu_flush_invalidate() 993 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); in kbase_mmu_update() 999 void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr) in kbase_mmu_disable_as() argument 1004 kbdev->mmu_mode->disable_as(kbdev, as_nr); in kbase_mmu_disable_as() 1013 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); in kbase_mmu_disable() 1026 kctx->kbdev->mmu_mode->disable_as(kctx->kbdev, kctx->as_nr); in kbase_mmu_disable() 1780 if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) && in kbase_mmu_report_fault_and_kill() 1902 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); in kbase_as_poking_timer_retain_atom() [all …]
|
| H A D | mali_kbase_mmu_mode.h | 35 void (*disable_as)(struct kbase_device *kbdev, int as_nr);
|
| H A D | mali_kbase_tlstream.c | 1395 unsigned int as_nr; in kbase_create_timeline_objects() local 1406 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++) in kbase_create_timeline_objects() 1407 KBASE_TLSTREAM_TL_SUMMARY_NEW_AS(&kbdev->as[as_nr], as_nr); in kbase_create_timeline_objects() 1420 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++) in kbase_create_timeline_objects() 1422 &kbdev->as[as_nr], in kbase_create_timeline_objects()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/tl/backend/ |
| H A D | mali_kbase_timeline_jm.c | 31 unsigned int as_nr; in kbase_create_timeline_objects() local 45 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++) in kbase_create_timeline_objects() 46 __kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], as_nr); in kbase_create_timeline_objects() 60 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++) in kbase_create_timeline_objects() 62 &kbdev->as[as_nr], in kbase_create_timeline_objects()
|
| H A D | mali_kbase_timeline_csf.c | 32 unsigned int as_nr; in kbase_create_timeline_objects() local 55 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++) in kbase_create_timeline_objects() 56 __kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], as_nr); in kbase_create_timeline_objects() 65 for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++) in kbase_create_timeline_objects() 67 &kbdev->as[as_nr], in kbase_create_timeline_objects() 148 if (kctx->as_nr != KBASEP_AS_NR_INVALID) in kbase_create_timeline_objects() 150 kctx->as_nr); in kbase_create_timeline_objects()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/panfrost/ |
| H A D | panfrost_mmu.c | 29 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr) in wait_ready() argument 36 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr), in wait_ready() 45 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd) in write_cmd() argument 50 status = wait_ready(pfdev, as_nr); in write_cmd() 52 mmu_write(pfdev, AS_COMMAND(as_nr), cmd); in write_cmd() 57 static void lock_region(struct panfrost_device *pfdev, u32 as_nr, in lock_region() argument 71 mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL); in lock_region() 72 mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL); in lock_region() 73 write_cmd(pfdev, as_nr, AS_COMMAND_LOCK); in lock_region() 77 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr, in mmu_hw_do_operation_locked() argument [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mmu/backend/ |
| H A D | mali_kbase_mmu_csf.c | 68 static void submit_work_pagefault(struct kbase_device *kbdev, u32 as_nr, in submit_work_pagefault() argument 72 struct kbase_as *const as = &kbdev->as[as_nr]; in submit_work_pagefault() 76 kctx = kbase_ctx_sched_as_to_ctx_nolock(kbdev, as_nr); in submit_work_pagefault() 93 "Page fault is already pending for as %u", as_nr); in submit_work_pagefault() 324 u32 status, u32 as_nr) in kbase_mmu_bus_fault_interrupt() argument 331 if (WARN_ON(as_nr == MCU_AS_NR)) in kbase_mmu_bus_fault_interrupt() 334 if (WARN_ON(as_nr >= BASE_MAX_NR_AS)) in kbase_mmu_bus_fault_interrupt() 337 as = &kbdev->as[as_nr]; in kbase_mmu_bus_fault_interrupt() 347 kbase_as_fault_debugfs_new(kbdev, as_nr); in kbase_mmu_bus_fault_interrupt() 349 kctx = kbase_ctx_sched_as_to_ctx_refcount(kbdev, as_nr); in kbase_mmu_bus_fault_interrupt() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/jm/ |
| H A D | mali_kbase_jm_js.h | 697 if (WARN((kctx->as_nr == KBASEP_AS_NR_INVALID) || !kbase_ctx_flag(kctx, KCTX_SCHEDULED), in kbasep_js_is_submit_allowed() 699 kctx->as_nr, atomic_read(&kctx->flags))) in kbasep_js_is_submit_allowed() 702 test_bit = (u16) (1u << kctx->as_nr); in kbasep_js_is_submit_allowed() 706 is_allowed ? "is" : "isn't", (void *)kctx, kctx->as_nr); in kbasep_js_is_submit_allowed() 727 if (WARN((kctx->as_nr == KBASEP_AS_NR_INVALID) || !kbase_ctx_flag(kctx, KCTX_SCHEDULED), in kbasep_js_set_submit_allowed() 729 kctx->as_nr, atomic_read(&kctx->flags))) in kbasep_js_set_submit_allowed() 732 set_bit = (u16) (1u << kctx->as_nr); in kbasep_js_set_submit_allowed() 735 kctx, kctx->as_nr); in kbasep_js_set_submit_allowed() 759 if (WARN((kctx->as_nr == KBASEP_AS_NR_INVALID) || !kbase_ctx_flag(kctx, KCTX_SCHEDULED), in kbasep_js_clear_submit_allowed() 761 kctx->as_nr, atomic_read(&kctx->flags))) in kbasep_js_clear_submit_allowed() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mmu/ |
| H A D | mali_kbase_mmu_hw_direct.c | 170 static int wait_ready(struct kbase_device *kbdev, unsigned int as_nr) in wait_ready() argument 176 if (unlikely(kbdev->as[as_nr].is_unresponsive)) in wait_ready() 184 if (!(kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS)) & in wait_ready() 194 as_nr); in wait_ready() 195 kbdev->as[as_nr].is_unresponsive = true; in wait_ready() 202 static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd) in write_cmd() argument 205 const int status = wait_ready(kbdev, as_nr); in write_cmd() 208 kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd); in write_cmd() 212 as_nr, cmd); in write_cmd() 216 as_nr, cmd); in write_cmd() [all …]
|
| H A D | mali_kbase_mmu.c | 176 static void mmu_invalidate(struct kbase_device *kbdev, struct kbase_context *kctx, int as_nr, in mmu_invalidate() argument 183 if (kbdev->pm.backend.gpu_powered && (!kctx || kctx->as_nr >= 0)) { in mmu_invalidate() 184 as_nr = kctx ? kctx->as_nr : as_nr; in mmu_invalidate() 185 if (kbase_mmu_hw_do_unlock(kbdev, &kbdev->as[as_nr], op_param)) in mmu_invalidate() 231 static void mmu_flush_invalidate(struct kbase_device *kbdev, struct kbase_context *kctx, int as_nr, in mmu_flush_invalidate() argument 245 mmu_flush_invalidate_as(kbdev, &kbdev->as[as_nr], op_param); in mmu_flush_invalidate() 256 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); in mmu_flush_invalidate() 258 mmu_flush_invalidate_as(kbdev, &kbdev->as[kctx->as_nr], op_param); in mmu_flush_invalidate() 278 int as_nr, const struct kbase_mmu_hw_op_param *op_param) in mmu_flush_invalidate_on_gpu_ctrl() argument 286 if (kbdev->pm.backend.gpu_powered && (!kctx || kctx->as_nr >= 0)) { in mmu_flush_invalidate_on_gpu_ctrl() [all …]
|
| H A D | mali_kbase_mmu.h | 159 struct tagged_addr *phys, size_t nr, unsigned long flags, int as_nr, 164 unsigned long flags, int as_nr, int group_id, 169 unsigned long flags, int as_nr, int group_id, 218 int as_nr, bool ignore_page_migration); 306 u32 as_nr); 321 u32 as_nr, u64 address, bool as_valid);
|
| H A D | mali_kbase_mmu_mode_aarch64.c | 55 int as_nr) in mmu_update() argument 60 if (WARN_ON(as_nr == KBASEP_AS_NR_INVALID)) in mmu_update() 63 as = &kbdev->as[as_nr]; in mmu_update() 72 static void mmu_disable_as(struct kbase_device *kbdev, int as_nr) in mmu_disable_as() argument 74 struct kbase_as * const as = &kbdev->as[as_nr]; in mmu_disable_as()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/device/backend/ |
| H A D | mali_kbase_device_hw_csf.c | 43 u32 as_nr, bool as_valid) in kbase_report_gpu_fault() argument 54 kbase_mmu_gpu_fault_interrupt(kbdev, status, as_nr, address, as_valid); in kbase_report_gpu_fault() 62 const u32 as_nr = (status & GPU_FAULTSTATUS_JASID_MASK) >> in kbase_gpu_fault_interrupt() local 69 if (!as_valid || (as_nr == MCU_AS_NR)) { in kbase_gpu_fault_interrupt() 70 kbase_report_gpu_fault(kbdev, status, as_nr, as_valid); in kbase_gpu_fault_interrupt() 78 if (kbase_mmu_bus_fault_interrupt(kbdev, status, as_nr)) in kbase_gpu_fault_interrupt() 83 kbase_report_gpu_fault(kbdev, status, as_nr, as_valid); in kbase_gpu_fault_interrupt()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/backend/gpu/ |
| H A D | mali_kbase_jm_as.c | 94 int as_nr = kctx->as_nr; in kbase_backend_release_ctx_irq() local 96 if (as_nr == KBASEP_AS_NR_INVALID) { in kbase_backend_release_ctx_irq() 211 int as_nr) in kbase_backend_use_ctx() argument 226 new_address_space = &kbdev->as[as_nr]; in kbase_backend_use_ctx()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/backend/gpu/ |
| H A D | mali_kbase_jm_as.c | 89 int as_nr = kctx->as_nr; in kbase_backend_release_ctx_irq() local 91 if (as_nr == KBASEP_AS_NR_INVALID) { in kbase_backend_release_ctx_irq() 207 int as_nr) in kbase_backend_use_ctx() argument 221 new_address_space = &kbdev->as[as_nr]; in kbase_backend_use_ctx()
|
| H A D | mali_kbase_mmu_hw_direct.c | 66 unsigned int as_nr, struct kbase_context *kctx) in wait_ready() argument 69 u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx); in wait_ready() 74 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), NULL); in wait_ready() 83 kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx); in wait_ready() 88 static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd, in write_cmd() argument 94 status = wait_ready(kbdev, as_nr, kctx); in write_cmd() 96 kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd, in write_cmd()
|
| H A D | mali_kbase_device_hw.c | 156 KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID); in kbase_reg_write() 180 KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID); in kbase_reg_read()
|