xref: /OK3568_Linux_fs/kernel/include/linux/sched/rt.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_SCHED_RT_H
3*4882a593Smuzhiyun #define _LINUX_SCHED_RT_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/sched.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun struct task_struct;
8*4882a593Smuzhiyun 
rt_prio(int prio)9*4882a593Smuzhiyun static inline int rt_prio(int prio)
10*4882a593Smuzhiyun {
11*4882a593Smuzhiyun 	if (unlikely(prio < MAX_RT_PRIO))
12*4882a593Smuzhiyun 		return 1;
13*4882a593Smuzhiyun 	return 0;
14*4882a593Smuzhiyun }
15*4882a593Smuzhiyun 
rt_task(struct task_struct * p)16*4882a593Smuzhiyun static inline int rt_task(struct task_struct *p)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	return rt_prio(p->prio);
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun 
task_is_realtime(struct task_struct * tsk)21*4882a593Smuzhiyun static inline bool task_is_realtime(struct task_struct *tsk)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	int policy = tsk->policy;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	if (policy == SCHED_FIFO || policy == SCHED_RR)
26*4882a593Smuzhiyun 		return true;
27*4882a593Smuzhiyun 	if (policy == SCHED_DEADLINE)
28*4882a593Smuzhiyun 		return true;
29*4882a593Smuzhiyun 	return false;
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #ifdef CONFIG_RT_MUTEXES
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Must hold either p->pi_lock or task_rq(p)->lock.
35*4882a593Smuzhiyun  */
rt_mutex_get_top_task(struct task_struct * p)36*4882a593Smuzhiyun static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	return p->pi_top_task;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
41*4882a593Smuzhiyun extern void rt_mutex_adjust_pi(struct task_struct *p);
tsk_is_pi_blocked(struct task_struct * tsk)42*4882a593Smuzhiyun static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	return tsk->pi_blocked_on != NULL;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun #else
rt_mutex_get_top_task(struct task_struct * task)47*4882a593Smuzhiyun static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	return NULL;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun # define rt_mutex_adjust_pi(p)		do { } while (0)
tsk_is_pi_blocked(struct task_struct * tsk)52*4882a593Smuzhiyun static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	return false;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun #endif
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun extern void normalize_rt_tasks(void);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun  * default timeslice is 100 msecs (used only for SCHED_RR tasks).
63*4882a593Smuzhiyun  * Timeslices get refilled after they expire.
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun #define RR_TIMESLICE		(100 * HZ / 1000)
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #endif /* _LINUX_SCHED_RT_H */
68