| /OK3568_Linux_fs/kernel/include/drm/ |
| H A D | gpu_scheduler.h | 55 * @rq: runqueue on which this entity is currently scheduled. 57 * Jobs from this entity can be scheduled on any scheduler 67 * The &drm_sched_fence.scheduled uses the 75 * @last_scheduled: points to the finished fence of the last scheduled job. 107 * struct drm_sched_rq - queue of entities to be scheduled. 111 * @entities: list of the entities to be scheduled. 112 * @current_entity: the entity which is to be scheduled. 130 * @scheduled: this fence is what will be signaled by the scheduler 131 * when the job is scheduled. 133 struct dma_fence scheduled; member [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/ |
| H A D | mali_kbase_js.h | 133 * - If the ctx is scheduled, it attempts to start the next job (which might be 140 * - If the context is high priority and the context is not scheduled, then it 142 * this context to be scheduled in. 144 * If the context is already scheduled on the RunPool, then adding a job to it 162 * when the context is currently scheduled. 224 * @brief Refcount a context as being busy, preventing it from being scheduled 233 * @return value != false if the retain succeeded, and the context will not be scheduled out. 234 * @return false if the retain failed (because the context is being/has been scheduled out). 239 * @brief Refcount a context as being busy, preventing it from being scheduled 247 * @return value != false if the retain succeeded, and the context will not be scheduled out. [all …]
|
| H A D | mali_kbase_js_ctx_attr.c | 33 * - ctx is scheduled on the runpool 79 * - ctx is scheduled on the runpool 119 * if the context is scheduled. 123 * - If the context is scheduled, then runpool_irq spinlock must also be held 156 * if the context is scheduled. 160 * - If the context is scheduled, then runpool_irq spinlock must also be held 212 /* The context should not have been scheduled yet, so ASSERT if this caused in kbasep_js_ctx_attr_set_initial_attrs() 227 /* The context is being scheduled in, so update the runpool with the new attributes */ in kbasep_js_ctx_attr_runpool_retain_ctx() 246 /* The context is being scheduled out, so update the runpool on the removed attributes */ in kbasep_js_ctx_attr_runpool_release_ctx()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/scheduler/ |
| H A D | sched_fence.c | 53 int ret = dma_fence_signal(&fence->scheduled); in drm_sched_fence_scheduled() 56 DMA_FENCE_TRACE(&fence->scheduled, in drm_sched_fence_scheduled() 59 DMA_FENCE_TRACE(&fence->scheduled, in drm_sched_fence_scheduled() 122 * Drop the extra reference from the scheduled fence to the base fence. 128 dma_fence_put(&fence->scheduled); in drm_sched_fence_release_finished() 146 return container_of(f, struct drm_sched_fence, scheduled); in to_drm_sched_fence() 170 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, in drm_sched_fence_create()
|
| H A D | sched_entity.c | 375 * Fence is a scheduled/finished fence from a job in drm_sched_entity_add_dependency_cb() 388 * it to be scheduled in drm_sched_entity_add_dependency_cb() 390 fence = dma_fence_get(&s_fence->scheduled); in drm_sched_entity_add_dependency_cb() 397 /* Ignore it when it is already scheduled */ in drm_sched_entity_add_dependency_cb() 411 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/ |
| H A D | mali_kbase_ctx_sched.h | 98 * is already scheduled in but want to take an extra reference to ensure that 142 * address space and ensure that is stays scheduled in 148 * longer required to stay scheduled in. 192 * preventing it from being scheduled out. 199 * Return: true if refcount succeeded, and the context will not be scheduled 201 * scheduled out). 207 * it from being scheduled out. 214 * Return: true if refcount succeeded, and the context will not be scheduled 216 * scheduled out).
|
| H A D | mali_kbase_js_ctx_attr.c | 40 * - ctx is scheduled on the runpool 90 * - ctx is scheduled on the runpool 130 * also retaining it on the runpool if the context is scheduled. 138 * - If the context is scheduled, then runpool_irq spinlock must also be held 171 * also releasing it from the runpool if the context is scheduled. 179 * - If the context is scheduled, then runpool_irq spinlock must also be held 223 /* The context is being scheduled in, so update the runpool with the new attributes */ in kbasep_js_ctx_attr_runpool_retain_ctx() 243 /* The context is being scheduled out, so update the runpool on the removed attributes */ in kbasep_js_ctx_attr_runpool_release_ctx()
|
| /OK3568_Linux_fs/kernel/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
| H A D | pno.h | 17 * brcmf_pno_start_sched_scan - initiate scheduled scan on device. 20 * @req: configuration parameters for scheduled scan. 26 * brcmf_pno_stop_sched_scan - terminate scheduled scan on device. 34 * brcmf_pno_wiphy_params - fill scheduled scan parameters in wiphy instance.
|
| /OK3568_Linux_fs/kernel/Documentation/powerpc/ |
| H A D | pmu-ebb.rst | 44 user process. This means once an EBB event is scheduled on the PMU, no non-EBB 56 first will be scheduled and the other will be put in error state. See the 84 userspace is able to reliably determine which PMC the event is scheduled on. 95 guarantee that it has been scheduled on the PMU. To ensure that the EBB event 96 has been scheduled on the PMU, you must perform a read() on the event. If the 97 read() returns EOF, then the event has not been scheduled and EBBs are not
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/ |
| H A D | drm_vblank_work.c | 99 * If @work is already scheduled, this function will reschedule said work 103 * %1 if @work was successfully (re)scheduled, %0 if it was either already 104 * scheduled or cancelled, or a negative error code on failure. 131 /* Already scheduled w/ same vbl count */ in drm_vblank_work_schedule() 172 * Cancel an already scheduled vblank work and wait for its 175 * On return, @work is guaranteed to no longer be scheduled or running, even 212 * drm_vblank_work_flush - wait for a scheduled vblank work to finish
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/jm/ |
| H A D | mali_kbase_jm_js.h | 175 * * If the ctx is scheduled, it attempts to start the next job (which might be 182 * * If the context is high priority and the context is not scheduled, then it 184 * this context to be scheduled in. 186 * If the context is already scheduled on the RunPool, then adding a job to it 205 * when the context is currently scheduled. 311 * allowing it to be scheduled out. 315 * When the refcount reaches zero and the context might be scheduled out 319 * If the context does get scheduled out, then The following actions will be 336 * changed. This is because the context being scheduled out could mean that 348 * scheduled, or that already has a zero refcount. [all …]
|
| /OK3568_Linux_fs/kernel/net/sctp/ |
| H A D | stream_sched_prio.c | 51 /* Look into scheduled priorities first, as they are sorted and in sctp_sched_prio_get_head() 52 * we can find it fast IF it's scheduled. in sctp_sched_prio_get_head() 92 bool scheduled = false; in sctp_sched_prio_unsched() local 97 /* Scheduled */ in sctp_sched_prio_unsched() 98 scheduled = true; in sctp_sched_prio_unsched() 114 return scheduled; in sctp_sched_prio_unsched() 124 /* Nothing to do if already scheduled */ in sctp_sched_prio_sched()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/backend/gpu/ |
| H A D | mali_kbase_js_backend.c | 158 /* Job has been scheduled for at least in timer_callback() 195 /* Job has been scheduled for at least in timer_callback() 212 /* Job has been scheduled for at least in timer_callback() 227 /* Job has been scheduled for at least in timer_callback() 235 /* Job has been scheduled for at least in timer_callback() 251 /* Job has been scheduled for at least in timer_callback()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/backend/gpu/ |
| H A D | mali_kbase_js_backend.c | 156 /* Job has been scheduled for at least in timer_callback() 188 /* Job has been scheduled for at least in timer_callback() 204 /* Job has been scheduled for at least in timer_callback() 219 /* Job has been scheduled for at least in timer_callback() 227 /* Job has been scheduled for at least in timer_callback() 243 /* Job has been scheduled for at least in timer_callback()
|
| /OK3568_Linux_fs/kernel/drivers/usb/host/ |
| H A D | xhci-mtk.h | 50 * (@repeat==1) scheduled within the interval 60 * scheduled first time within the interval 62 * scheduled within a interval. in the simple algorithm, only 66 * @pkts: number of packets to be transferred in the scheduled uframes
|
| /OK3568_Linux_fs/kernel/drivers/net/wireless/intel/iwlwifi/fw/api/ |
| H A D | time-event.h | 108 * the first fragment is scheduled. 110 * the first 2 fragments are scheduled. 116 * scheduled. 174 * the first fragment is scheduled. 176 * the first 2 fragments are scheduled. 182 * scheduled. 311 * @status: true if scheduled, false otherwise (not executed) 444 * Note: the session protection will always be scheduled to start as
|
| /OK3568_Linux_fs/kernel/drivers/soc/fsl/dpio/ |
| H A D | qbman-portal.h | 391 * qbman_swp_fq_schedule() - Move the fq to the scheduled state 393 * @fqid: the index of frame queue to be scheduled 406 * qbman_swp_fq_force() - Force the FQ to fully scheduled state 410 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled 443 * XOFF FQs will remain in the tenatively-scheduled state, even when 444 * non-empty, meaning they won't be selected for scheduled dequeuing. 445 * If a FQ is changed to XOFF after it had already become truly-scheduled
|
| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | posix-timers.h | 131 * @work: The task work to be scheduled 132 * @scheduled: @work has been scheduled already, no further processing 136 unsigned int scheduled; member
|
| /OK3568_Linux_fs/kernel/arch/alpha/kernel/ |
| H A D | perf_event.c | 36 /* Number of events scheduled; also number entries valid in arrays below. */ 40 /* Events currently scheduled. */ 42 /* Event type of each scheduled event. */ 44 /* Current index of each scheduled event; if not yet determined 149 * Check that a group of events can be simultaneously scheduled on to the 369 * Check that a group of events can be simultaneously scheduled on to the PMU. 387 * If new events have been scheduled then update cpuc with the new 637 * scheduled on to the PMU. At that point the code to programme the in __hw_perf_event_init() 646 * be scheduled on to the PMU. in __hw_perf_event_init() 731 /* Update cpuc with information from any new scheduled events. */ in alpha_pmu_enable()
|
| /OK3568_Linux_fs/prebuilts/gcc/linux-x86/aarch64/gcc-arm-10.3-2021.07-x86_64-aarch64-none-linux-gnu/lib/gcc/aarch64-none-linux-gnu/10.3.1/plugin/include/ |
| H A D | sel-sched-ir.h | 117 /* Number of times the insn was scheduled. */ 132 /* Cycle on which original insn was scheduled. Zero when it has not yet 133 been scheduled or more than one originator. */ 144 /* True when this expression needs a speculation check to be scheduled. 253 /* Current cycle that is being scheduled on this fence. */ 256 /* Number of insns that were scheduled on the current cycle. 261 that are inner boundaries of the scheduled parallel group. */ 272 /* A vector of insns that are scheduled but not yet completed. */ 276 an insn can be scheduled on this fence. */ 282 /* Insn, which has been scheduled last on this fence. */ [all …]
|
| /OK3568_Linux_fs/prebuilts/gcc/linux-x86/arm/gcc-arm-10.3-2021.07-x86_64-arm-none-linux-gnueabihf/lib/gcc/arm-none-linux-gnueabihf/10.3.1/plugin/include/ |
| H A D | sel-sched-ir.h | 117 /* Number of times the insn was scheduled. */ 132 /* Cycle on which original insn was scheduled. Zero when it has not yet 133 been scheduled or more than one originator. */ 144 /* True when this expression needs a speculation check to be scheduled. 253 /* Current cycle that is being scheduled on this fence. */ 256 /* Number of insns that were scheduled on the current cycle. 261 that are inner boundaries of the scheduled parallel group. */ 272 /* A vector of insns that are scheduled but not yet completed. */ 276 an insn can be scheduled on this fence. */ 282 /* Insn, which has been scheduled last on this fence. */ [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/ |
| H A D | i915_priolist_types.h | 23 /* Interactive workload, scheduled for immediate pageflipping */ 38 * another context. They get scheduled with their default priority and
|
| /OK3568_Linux_fs/kernel/arch/s390/pci/ |
| H A D | pci_irq.c | 147 atomic_t scheduled; member 153 atomic_t *scheduled = data; in zpci_handle_remote_irq() local 157 } while (atomic_dec_return(scheduled)); in zpci_handle_remote_irq() 179 if (atomic_inc_return(&cpu_data->scheduled) > 1) in zpci_handle_fallback_irq() 183 cpu_data->csd.info = &cpu_data->scheduled; in zpci_handle_fallback_irq()
|
| /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/ |
| H A D | dhd_linux_priv.h | 216 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled 232 /* Number of times NAPI processing got scheduled */ 236 /* Number of times RX Completions got scheduled */ 240 /* Number of times TX Completions got scheduled */ 368 /* indicates mem_dump was scheduled as work queue or called directly */
|
| /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/infineon/bcmdhd/ |
| H A D | dhd_linux_priv.h | 216 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled 232 /* Number of times NAPI processing got scheduled */ 236 /* Number of times RX Completions got scheduled */ 240 /* Number of times TX Completions got scheduled */ 368 /* indicates mem_dump was scheduled as work queue or called directly */
|