Home
last modified time | relevance | path

Searched refs:scheduler (Results 1 – 25 of 269) sorted by relevance

1234567891011

/OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/csf/
H A Dmali_kbase_csf_scheduler.c67 void insert_group_to_runnable(struct kbase_csf_scheduler *const scheduler,
74 struct kbase_csf_scheduler *const scheduler);
115 struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; in wait_for_dump_complete_on_group_deschedule() local
118 lockdep_assert_held(&scheduler->lock); in wait_for_dump_complete_on_group_deschedule()
124 (scheduler->state == SCHED_BUSY)) && in wait_for_dump_complete_on_group_deschedule()
127 mutex_unlock(&scheduler->lock); in wait_for_dump_complete_on_group_deschedule()
131 mutex_lock(&scheduler->lock); in wait_for_dump_complete_on_group_deschedule()
161 struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; in schedule_actions_trigger_df() local
163 lockdep_assert_held(&scheduler->lock); in schedule_actions_trigger_df()
168 if (unlikely(scheduler->state != SCHED_BUSY)) { in schedule_actions_trigger_df()
[all …]
H A Dmali_kbase_csf_scheduler.h279 mutex_lock(&kbdev->csf.scheduler.lock); in kbase_csf_scheduler_lock()
289 mutex_unlock(&kbdev->csf.scheduler.lock); in kbase_csf_scheduler_unlock()
305 spin_lock_irqsave(&kbdev->csf.scheduler.interrupt_lock, *flags); in kbase_csf_scheduler_spin_lock()
318 spin_unlock_irqrestore(&kbdev->csf.scheduler.interrupt_lock, flags); in kbase_csf_scheduler_spin_unlock()
330 lockdep_assert_held(&kbdev->csf.scheduler.interrupt_lock); in kbase_csf_scheduler_spin_lock_assert_held()
373 return (kbdev->csf.scheduler.active_protm_grp != NULL); in kbase_csf_scheduler_protected_mode_in_use()
470 lockdep_assert_held(&kbdev->csf.scheduler.interrupt_lock); in kbase_csf_scheduler_all_csgs_idle()
471 return bitmap_equal(kbdev->csf.scheduler.csg_slots_idle_mask, in kbase_csf_scheduler_all_csgs_idle()
472 kbdev->csf.scheduler.csg_inuse_bitmap, in kbase_csf_scheduler_all_csgs_idle()
489 struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler; in kbase_csf_scheduler_tick_advance_nolock() local
[all …]
H A Dmali_kbase_csf_tiler_heap_reclaim.c59 struct kbase_csf_scheduler *const scheduler = &kctx->kbdev->csf.scheduler; in detach_ctx_from_heap_reclaim_mgr() local
62 lockdep_assert_held(&scheduler->lock); in detach_ctx_from_heap_reclaim_mgr()
71 WARN_ON(atomic_sub_return(remaining, &scheduler->reclaim_mgr.unused_pages) < in detach_ctx_from_heap_reclaim_mgr()
83 struct kbase_csf_scheduler *const scheduler = &kctx->kbdev->csf.scheduler; in attach_ctx_to_heap_reclaim_mgr() local
86 lockdep_assert_held(&scheduler->lock); in attach_ctx_to_heap_reclaim_mgr()
96 list_add_tail(&info->mgr_link, &scheduler->reclaim_mgr.ctx_lists[prio]); in attach_ctx_to_heap_reclaim_mgr()
98 atomic_add(info->nr_est_unused_pages, &scheduler->reclaim_mgr.unused_pages); in attach_ctx_to_heap_reclaim_mgr()
109 lockdep_assert_held(&kctx->kbdev->csf.scheduler.lock); in kbase_csf_tiler_heap_reclaim_sched_notify_grp_active()
125 struct kbase_csf_scheduler *const scheduler = &kctx->kbdev->csf.scheduler; in kbase_csf_tiler_heap_reclaim_sched_notify_grp_evict() local
130 lockdep_assert_held(&scheduler->lock); in kbase_csf_tiler_heap_reclaim_sched_notify_grp_evict()
[all …]
H A Dmali_kbase_csf_mcu_shared_reg.c105 struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data; in update_mapping_with_dummy_pages()
114 struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data; in insert_dummy_pages()
126 lockdep_assert_held(&group->kctx->kbdev->csf.scheduler.lock); in notify_group_csg_reg_map_done()
137 lockdep_assert_held(&kbdev->csf.scheduler.lock); in notify_group_csg_reg_map_error()
170 struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data; in userio_pages_replace_phys()
173 lockdep_assert_held(&kbdev->csf.scheduler.lock); in userio_pages_replace_phys()
208 lockdep_assert_held(&kbdev->csf.scheduler.lock); in csg_reg_update_on_csis()
262 lockdep_assert_held(&kbdev->csf.scheduler.lock); in group_bind_csg_reg()
353 lockdep_assert_held(&kbdev->csf.scheduler.lock); in kbase_csf_mcu_shared_set_group_csg_reg_active()
376 struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data; in kbase_csf_mcu_shared_set_group_csg_reg_unused()
[all …]
/OK3568_Linux_fs/kernel/drivers/video/rockchip/rve/
H A Drve_reg.c13 void rve_soft_reset(struct rve_scheduler_t *scheduler) in rve_soft_reset() argument
18 rve_write(1, RVE_SWREG5_IVE_IDLE_CTRL, scheduler); in rve_soft_reset()
22 rve_dump_read_back_reg(scheduler); in rve_soft_reset()
27 rve_read(RVE_SWREG5_IVE_IDLE_CTRL, scheduler), in rve_soft_reset()
28 rve_read(RVE_SWREG3_IVE_IDLE_PRC_STA, scheduler)); in rve_soft_reset()
30 pr_err("work status = %.8x", rve_read(RVE_SWREG6_IVE_WORK_STA, scheduler)); in rve_soft_reset()
36 reg = rve_read(RVE_SWREG3_IVE_IDLE_PRC_STA, scheduler); in rve_soft_reset()
41 rve_write(0x30000, RVE_SWREG3_IVE_IDLE_PRC_STA, scheduler); in rve_soft_reset()
44 rve_write(0xff0000, RVE_SWREG6_IVE_WORK_STA, scheduler); in rve_soft_reset()
47 rve_write(0x30000, RVE_SWREG1_IVE_IRQ, scheduler); in rve_soft_reset()
[all …]
H A Drve_job.c15 rve_scheduler_get_pending_job_list(struct rve_scheduler_t *scheduler) in rve_scheduler_get_pending_job_list() argument
20 spin_lock_irqsave(&scheduler->irq_lock, flags); in rve_scheduler_get_pending_job_list()
22 job = list_first_entry_or_null(&scheduler->todo_list, in rve_scheduler_get_pending_job_list()
25 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rve_scheduler_get_pending_job_list()
31 rve_scheduler_get_running_job(struct rve_scheduler_t *scheduler) in rve_scheduler_get_running_job() argument
36 spin_lock_irqsave(&scheduler->irq_lock, flags); in rve_scheduler_get_running_job()
38 job = scheduler->running_job; in rve_scheduler_get_running_job()
40 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rve_scheduler_get_running_job()
47 struct rve_scheduler_t *scheduler; in rve_scheduler_set_pid_info() local
52 scheduler = rve_job_get_scheduler(job); in rve_scheduler_set_pid_info()
[all …]
H A Drve_drv.c46 struct rve_scheduler_t *scheduler = NULL; in hrtimer_handler() local
55 scheduler = rve->scheduler[i]; in hrtimer_handler()
57 spin_lock_irqsave(&scheduler->irq_lock, flags); in hrtimer_handler()
60 job = scheduler->running_job; in hrtimer_handler()
62 scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time); in hrtimer_handler()
66 scheduler->timer.busy_time_record = scheduler->timer.busy_time; in hrtimer_handler()
67 scheduler->timer.busy_time = 0; in hrtimer_handler()
70 scheduler->session.rd_bandwidth = 0; in hrtimer_handler()
71 scheduler->session.wr_bandwidth = 0; in hrtimer_handler()
72 scheduler->session.cycle_cnt = 0; in hrtimer_handler()
[all …]
H A Drve_debugger.c151 struct rve_scheduler_t *scheduler = NULL; in rve_load_show() local
161 scheduler = rve_drvdata->scheduler[0]; in rve_load_show()
163 seq_printf(m, "scheduler[0]: %s\n", dev_driver_string(scheduler->dev)); in rve_load_show()
165 spin_lock_irqsave(&scheduler->irq_lock, flags); in rve_load_show()
167 busy_time_total = scheduler->timer.busy_time_record; in rve_load_show()
168 pid_info = scheduler->session.pid_info; in rve_load_show()
170 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rve_load_show()
192 struct rve_scheduler_t *scheduler = NULL; in rve_scheduler_show() local
204 scheduler = rve_drvdata->scheduler[i]; in rve_scheduler_show()
206 spin_lock_irqsave(&scheduler->irq_lock, flags); in rve_scheduler_show()
[all …]
/OK3568_Linux_fs/kernel/drivers/video/rockchip/rga3/
H A Drga_job.c151 void rga_job_scheduler_dump_info(struct rga_scheduler_t *scheduler) in rga_job_scheduler_dump_info() argument
155 lockdep_assert_held(&scheduler->irq_lock); in rga_job_scheduler_dump_info()
159 dev_driver_string(scheduler->dev), in rga_job_scheduler_dump_info()
160 scheduler->core, scheduler->job_count, scheduler->status); in rga_job_scheduler_dump_info()
162 if (scheduler->running_job) in rga_job_scheduler_dump_info()
163 rga_job_dump_info(scheduler->running_job); in rga_job_scheduler_dump_info()
165 list_for_each_entry(job_pos, &scheduler->todo_list, head) { in rga_job_scheduler_dump_info()
172 static int rga_job_run(struct rga_job *job, struct rga_scheduler_t *scheduler) in rga_job_run() argument
177 ret = rga_power_enable(scheduler); in rga_job_run()
183 ret = scheduler->ops->set_reg(job, scheduler); in rga_job_run()
[all …]
H A Drga_drv.c330 struct rga_scheduler_t *scheduler = NULL; in hrtimer_handler() local
338 scheduler = rga->scheduler[i]; in hrtimer_handler()
340 spin_lock_irqsave(&scheduler->irq_lock, flags); in hrtimer_handler()
343 job = scheduler->running_job; in hrtimer_handler()
345 scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time); in hrtimer_handler()
349 scheduler->timer.busy_time_record = scheduler->timer.busy_time; in hrtimer_handler()
350 scheduler->timer.busy_time = 0; in hrtimer_handler()
352 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in hrtimer_handler()
375 int rga_power_enable(struct rga_scheduler_t *scheduler) in rga_power_enable() argument
381 pm_runtime_get_sync(scheduler->dev); in rga_power_enable()
[all …]
H A Drga_policy.c226 struct rga_scheduler_t *scheduler = NULL; in rga_job_assign() local
249 data = rga_drvdata->scheduler[i]->data; in rga_job_assign()
250 scheduler = rga_drvdata->scheduler[i]; in rga_job_assign()
253 (!(scheduler->core & specified_cores))) in rga_job_assign()
257 pr_info("start policy on core = %d", scheduler->core); in rga_job_assign()
259 if (scheduler->data->mmu == RGA_MMU && in rga_job_assign()
270 scheduler->core); in rga_job_assign()
283 scheduler->core); in rga_job_assign()
291 scheduler->core); in rga_job_assign()
299 scheduler->core); in rga_job_assign()
[all …]
H A Drga_iommu.c61 job->scheduler); in rga_set_mmu_base()
69 job->scheduler); in rga_set_mmu_base()
77 job->scheduler); in rga_set_mmu_base()
90 job->scheduler); in rga_set_mmu_base()
239 struct rga_scheduler_t *scheduler = (struct rga_scheduler_t *)arg; in rga_iommu_intr_fault_handler() local
240 struct rga_job *job = scheduler->running_job; in rga_iommu_intr_fault_handler()
246 if (scheduler->ops->irq) in rga_iommu_intr_fault_handler()
247 scheduler->ops->irq(scheduler); in rga_iommu_intr_fault_handler()
252 scheduler->ops->soft_reset(scheduler); in rga_iommu_intr_fault_handler()
336 struct rga_scheduler_t *scheduler = NULL; in rga_iommu_bind() local
[all …]
H A Drga2_reg_info.c1925 static void rga_cmd_to_rga2_cmd(struct rga_scheduler_t *scheduler, in rga_cmd_to_rga2_cmd() argument
2056 if ((scheduler->data->feature & RGA_YIN_YOUT) && in rga_cmd_to_rga2_cmd()
2196 static void rga2_soft_reset(struct rga_scheduler_t *scheduler) in rga2_soft_reset() argument
2202 if (scheduler->data->mmu == RGA_IOMMU) in rga2_soft_reset()
2203 iommu_dte_addr = rga_read(RGA_IOMMU_DTE_ADDR, scheduler); in rga2_soft_reset()
2207 RGA2_SYS_CTRL, scheduler); in rga2_soft_reset()
2211 reg = rga_read(RGA2_SYS_CTRL, scheduler) & 1; in rga2_soft_reset()
2219 if (scheduler->data->mmu == RGA_IOMMU) { in rga2_soft_reset()
2220 rga_write(iommu_dte_addr, RGA_IOMMU_DTE_ADDR, scheduler); in rga2_soft_reset()
2222 rga_write(RGA_IOMMU_CMD_ENABLE_PAGING, RGA_IOMMU_COMMAND, scheduler); in rga2_soft_reset()
[all …]
H A Drga_mm.c312 static inline bool rga_mm_check_memory_limit(struct rga_scheduler_t *scheduler, int mm_flag) in rga_mm_check_memory_limit() argument
314 if (!scheduler) in rga_mm_check_memory_limit()
317 if (scheduler->data->mmu == RGA_MMU && in rga_mm_check_memory_limit()
320 rga_get_mmu_type_str(scheduler->data->mmu)); in rga_mm_check_memory_limit()
381 struct rga_scheduler_t *scheduler; in rga_mm_map_dma_buffer() local
383 scheduler = job ? job->scheduler : in rga_mm_map_dma_buffer()
384 rga_drvdata->scheduler[rga_drvdata->map_scheduler_index]; in rga_mm_map_dma_buffer()
385 if (scheduler == NULL) { in rga_mm_map_dma_buffer()
407 map_dev = scheduler->iommu_info ? scheduler->iommu_info->default_dev : scheduler->dev; in rga_mm_map_dma_buffer()
432 __func__, scheduler->core); in rga_mm_map_dma_buffer()
[all …]
H A Drga3_reg_info.c1758 static void rga3_soft_reset(struct rga_scheduler_t *scheduler) in rga3_soft_reset() argument
1763 if (scheduler->data->mmu == RGA_IOMMU) in rga3_soft_reset()
1764 iommu_dte_addr = rga_read(RGA_IOMMU_DTE_ADDR, scheduler); in rga3_soft_reset()
1767 RGA3_SYS_CTRL, scheduler); in rga3_soft_reset()
1770 if (rga_read(RGA3_RO_SRST, scheduler) & m_RGA3_RO_SRST_RO_RST_DONE) in rga3_soft_reset()
1777 RGA3_SYS_CTRL, scheduler); in rga3_soft_reset()
1779 if (scheduler->data->mmu == RGA_IOMMU) { in rga3_soft_reset()
1780 rga_write(iommu_dte_addr, RGA_IOMMU_DTE_ADDR, scheduler); in rga3_soft_reset()
1782 rga_write(RGA_IOMMU_CMD_ENABLE_PAGING, RGA_IOMMU_COMMAND, scheduler); in rga3_soft_reset()
1787 scheduler->core, rga_read(RGA3_SYS_CTRL, scheduler), in rga3_soft_reset()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/gvt/
H A Dsched_policy.c134 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in try_to_schedule_next_vgpu() local
143 if (scheduler->next_vgpu == scheduler->current_vgpu) { in try_to_schedule_next_vgpu()
144 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu()
152 scheduler->need_reschedule = true; in try_to_schedule_next_vgpu()
156 if (scheduler->current_workload[engine->id]) in try_to_schedule_next_vgpu()
161 vgpu_update_timeslice(scheduler->current_vgpu, cur_time); in try_to_schedule_next_vgpu()
162 vgpu_data = scheduler->next_vgpu->sched_data; in try_to_schedule_next_vgpu()
166 scheduler->current_vgpu = scheduler->next_vgpu; in try_to_schedule_next_vgpu()
167 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu()
169 scheduler->need_reschedule = false; in try_to_schedule_next_vgpu()
[all …]
H A Dscheduler.c274 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in shadow_context_status_change() local
280 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change()
282 scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
284 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change()
286 scheduler->engine_owner[ring_id] = NULL; in shadow_context_status_change()
288 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); in shadow_context_status_change()
293 workload = scheduler->current_workload[ring_id]; in shadow_context_status_change()
299 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change()
300 if (workload->vgpu != scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
302 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change()
[all …]
/OK3568_Linux_fs/kernel/net/netfilter/ipvs/
H A Dip_vs_sched.c41 struct ip_vs_scheduler *scheduler) in ip_vs_bind_scheduler() argument
45 if (scheduler->init_service) { in ip_vs_bind_scheduler()
46 ret = scheduler->init_service(svc); in ip_vs_bind_scheduler()
52 rcu_assign_pointer(svc->scheduler, scheduler); in ip_vs_bind_scheduler()
65 cur_sched = rcu_dereference_protected(svc->scheduler, 1); in ip_vs_unbind_scheduler()
133 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler) in ip_vs_scheduler_put() argument
135 if (scheduler) in ip_vs_scheduler_put()
136 module_put(scheduler->module); in ip_vs_scheduler_put()
145 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); in ip_vs_scheduler_err()
167 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) in register_ip_vs_scheduler() argument
[all …]
/OK3568_Linux_fs/kernel/Documentation/block/
H A Dswitching-sched.rst5 Each io queue has a set of io scheduler tunables associated with it. These
6 tunables control how the io scheduler works. You can find these entries
16 It is possible to change the IO scheduler for a given block device on
20 To set a specific scheduler, simply do this::
22 echo SCHEDNAME > /sys/block/DEV/queue/scheduler
24 where SCHEDNAME is the name of a defined IO scheduler, and DEV is the
28 a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
29 will be displayed, with the currently selected scheduler in brackets::
31 # cat /sys/block/sda/queue/scheduler
33 # echo none >/sys/block/sda/queue/scheduler
[all …]
/OK3568_Linux_fs/kernel/drivers/video/rockchip/rga3/include/
H A Drga_drv.h186 struct rga_scheduler_t *scheduler; member
269 struct rga_scheduler_t *scheduler; member
308 int (*get_version)(struct rga_scheduler_t *scheduler);
309 int (*set_reg)(struct rga_job *job, struct rga_scheduler_t *scheduler);
311 void (*soft_reset)(struct rga_scheduler_t *scheduler);
312 int (*read_back_reg)(struct rga_job *job, struct rga_scheduler_t *scheduler);
313 int (*irq)(struct rga_scheduler_t *scheduler);
314 int (*isr_thread)(struct rga_job *job, struct rga_scheduler_t *scheduler);
411 struct rga_scheduler_t *scheduler[RGA_MAX_SCHEDULER]; member
446 static inline int rga_read(int offset, struct rga_scheduler_t *scheduler) in rga_read() argument
[all …]
/OK3568_Linux_fs/kernel/drivers/video/rockchip/rve/include/
H A Drve_drv.h127 struct rve_scheduler_t *scheduler; member
152 int (*get_version)(struct rve_scheduler_t *scheduler);
153 int (*set_reg)(struct rve_job *job, struct rve_scheduler_t *scheduler);
155 void (*soft_reset)(struct rve_scheduler_t *scheduler);
229 struct rve_scheduler_t *scheduler; member
288 struct rve_scheduler_t *scheduler[RVE_MAX_SCHEDULER]; member
319 static inline int rve_read(int offset, struct rve_scheduler_t *scheduler) in rve_read() argument
321 return readl(scheduler->rve_base + offset); in rve_read()
324 static inline void rve_write(int value, int offset, struct rve_scheduler_t *scheduler) in rve_write() argument
326 writel(value, scheduler->rve_base + offset); in rve_write()
[all …]
/OK3568_Linux_fs/buildroot/dl/qt5location/git/src/3rdparty/mapbox-gl-native/src/mbgl/actor/
H A Dmailbox.cpp13 : scheduler(&scheduler_) { in Mailbox()
17 assert(!scheduler); in open()
24 scheduler = &scheduler_; in open()
31 (*scheduler)->schedule(shared_from_this()); in open()
47 bool Mailbox::isOpen() const { return bool(scheduler); } in isOpen()
60 if (wasEmpty && scheduler) { in push()
61 (*scheduler)->schedule(shared_from_this()); in push()
68 assert(scheduler); in receive()
88 (*scheduler)->schedule(shared_from_this()); in receive()
/OK3568_Linux_fs/buildroot/dl/qt5location/git/src/3rdparty/mapbox-gl-native/platform/qt/src/
H A Dqmapboxgl_map_renderer.cpp18 static QThreadStorage<std::shared_ptr<QMapboxGLScheduler>> scheduler; in getScheduler() local
20 if (!scheduler.hasLocalData()) { in getScheduler()
21 scheduler.setLocalData(std::make_shared<QMapboxGLScheduler>()); in getScheduler()
24 return scheduler.localData().get(); in getScheduler()
37 auto scheduler = getScheduler(); in QMapboxGLMapRenderer() local
40 mbgl::Scheduler::SetCurrent(scheduler); in QMapboxGLMapRenderer()
43 connect(scheduler, SIGNAL(needsProcessing()), this, SIGNAL(needsRendering())); in QMapboxGLMapRenderer()
/OK3568_Linux_fs/kernel/block/
H A DKconfig.iosched7 tristate "MQ deadline I/O scheduler"
10 MQ version of the deadline IO scheduler.
19 tristate "Kyber I/O scheduler"
22 The Kyber I/O scheduler is a low-overhead scheduler suitable for
28 tristate "BFQ I/O scheduler"
30 BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
/OK3568_Linux_fs/yocto/poky/meta/recipes-extended/cups/cups/
H A D0001-use-echo-only-in-init.patch9 scheduler/cups.sh.in | 2 +-
12 diff --git a/scheduler/cups.sh.in b/scheduler/cups.sh.in
14 --- a/scheduler/cups.sh.in
15 +++ b/scheduler/cups.sh.in

1234567891011