Lines Matching +full:power +full:- +full:on +full:- +full:delay
1 /* SPDX-License-Identifier: GPL-2.0 */
3 * workqueue.h --- work queue handling for Linux.
29 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
59 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
73 /* data contains off-queue information when !WORK_STRUCT_PWQ */
81 * pool it was on. Cap at 31 bits and use the highest number to
86 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
88 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
91 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
122 /* target workqueue and CPU ->timer uses to queue ->work */
134 /* target workqueue ->rcu uses to queue ->work */
139 * struct workqueue_attrs - A struct for workqueue attributes.
240 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
241 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
242 INIT_LIST_HEAD(&(_work)->entry); \
243 (_work)->func = (_func); \
249 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
250 INIT_LIST_HEAD(&(_work)->entry); \
251 (_work)->func = (_func); \
263 INIT_WORK(&(_work)->work, (_func)); \
264 __init_timer(&(_work)->timer, \
271 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
272 __init_timer_on_stack(&(_work)->timer, \
290 INIT_WORK(&(_work)->work, (_func))
293 INIT_WORK_ONSTACK(&(_work)->work, (_func))
296 * work_pending - Find out whether a work item is currently pending
303 * delayed_work_pending - Find out whether a delayable work item is currently
308 work_pending(&(w)->work)
312 * Documentation/core-api/workqueue.rst.
323 * Per-cpu workqueues are generally preferred because they tend to
324 * show better performance thanks to cache locality. Per-cpu
327 * of increasing power consumption.
330 * to execute and tries to keep idle cores idle to conserve power;
331 * however, for example, a per-cpu work item scheduled from an
332 * interrupt handler on an idle CPU will force the scheduler to
333 * excute the work item on that CPU breaking the idleness, which in
334 * turn may lead to more scheduling choices which are sub-optimal
335 * in terms of power consumption.
337 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
339 * specified. Per-cpu workqueues which are identified to
340 * contribute significantly to power-consumption are identified and
342 * leads to noticeable power saving at the cost of small
359 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
364 * System-wide workqueues which are always present.
367 * Multi-CPU multi-threaded. There are users which expect relatively
385 * *_power_efficient_wq are inclined towards saving power and converted
387 * they are same as their non-power-efficient counterparts - e.g.
400 * alloc_workqueue - allocate a workqueue
403 * @max_active: max in-flight work items, 0 for default
407 * information on WQ_* flags, please refer to
408 * Documentation/core-api/workqueue.rst.
411 * Pointer to the allocated workqueue on success, %NULL on failure.
418 * alloc_ordered_workqueue - allocate an ordered workqueue
428 * Pointer to the allocated workqueue on success, %NULL on failure.
455 struct delayed_work *work, unsigned long delay);
457 struct delayed_work *dwork, unsigned long delay);
488 * queue_work - queue work on a workqueue
492 * Returns %false if @work was already on a queue, %true otherwise.
494 * We queue the work to the CPU on which it was submitted, but if the CPU dies
497 * Memory-ordering properties: If it returns %true, guarantees that all stores
517 * queue_delayed_work - queue work on a workqueue after delay
520 * @delay: number of jiffies to wait before queueing
526 unsigned long delay) in queue_delayed_work() argument
528 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); in queue_delayed_work()
532 * mod_delayed_work - modify delay of or queue a delayed work
535 * @delay: number of jiffies to wait before queueing
537 * mod_delayed_work_on() on local CPU.
541 unsigned long delay) in mod_delayed_work() argument
543 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); in mod_delayed_work()
547 * schedule_work_on - put work task on a specific cpu
548 * @cpu: cpu to put the work task on
551 * This puts a job on a specific cpu
559 * schedule_work - put work task in global workqueue
562 * Returns %false if @work was already on the kernel-global workqueue and
565 * This puts a job in the kernel-global workqueue if it was not already
566 * queued and leaves it in the same position on the kernel-global
569 * Shares the same memory-ordering properties of queue_work(), cf. the
578 * flush_scheduled_work - ensure that any scheduled work has run to completion.
580 * Forces execution of the kernel-global workqueue and blocks until its
587 * One of the work items currently on the workqueue needs to acquire
593 * occur very often. It depends on what work items are on the workqueue and
607 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
610 * @delay: number of jiffies to wait
612 * After waiting for a given time this puts a job in the kernel-global
613 * workqueue on the specified CPU.
616 unsigned long delay) in schedule_delayed_work_on() argument
618 return queue_delayed_work_on(cpu, system_wq, dwork, delay); in schedule_delayed_work_on()
622 * schedule_delayed_work - put work task in global workqueue after delay
624 * @delay: number of jiffies to wait or 0 for immediate execution
626 * After waiting for a given time this puts a job in the kernel-global
630 unsigned long delay) in schedule_delayed_work() argument
632 return queue_delayed_work(system_wq, dwork, delay); in schedule_delayed_work()