1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_SCHED_SIGNAL_H
3*4882a593Smuzhiyun #define _LINUX_SCHED_SIGNAL_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/rculist.h>
6*4882a593Smuzhiyun #include <linux/signal.h>
7*4882a593Smuzhiyun #include <linux/sched.h>
8*4882a593Smuzhiyun #include <linux/sched/jobctl.h>
9*4882a593Smuzhiyun #include <linux/sched/task.h>
10*4882a593Smuzhiyun #include <linux/cred.h>
11*4882a593Smuzhiyun #include <linux/refcount.h>
12*4882a593Smuzhiyun #include <linux/posix-timers.h>
13*4882a593Smuzhiyun #include <linux/mm_types.h>
14*4882a593Smuzhiyun #include <asm/ptrace.h>
15*4882a593Smuzhiyun #include <linux/android_kabi.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * Types defining task->signal and task->sighand and APIs using them:
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun struct sighand_struct {
22*4882a593Smuzhiyun spinlock_t siglock;
23*4882a593Smuzhiyun refcount_t count;
24*4882a593Smuzhiyun wait_queue_head_t signalfd_wqh;
25*4882a593Smuzhiyun struct k_sigaction action[_NSIG];
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * Per-process accounting stats:
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun struct pacct_struct {
32*4882a593Smuzhiyun int ac_flag;
33*4882a593Smuzhiyun long ac_exitcode;
34*4882a593Smuzhiyun unsigned long ac_mem;
35*4882a593Smuzhiyun u64 ac_utime, ac_stime;
36*4882a593Smuzhiyun unsigned long ac_minflt, ac_majflt;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun struct cpu_itimer {
40*4882a593Smuzhiyun u64 expires;
41*4882a593Smuzhiyun u64 incr;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * This is the atomic variant of task_cputime, which can be used for
46*4882a593Smuzhiyun * storing and updating task_cputime statistics without locking.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun struct task_cputime_atomic {
49*4882a593Smuzhiyun atomic64_t utime;
50*4882a593Smuzhiyun atomic64_t stime;
51*4882a593Smuzhiyun atomic64_t sum_exec_runtime;
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define INIT_CPUTIME_ATOMIC \
55*4882a593Smuzhiyun (struct task_cputime_atomic) { \
56*4882a593Smuzhiyun .utime = ATOMIC64_INIT(0), \
57*4882a593Smuzhiyun .stime = ATOMIC64_INIT(0), \
58*4882a593Smuzhiyun .sum_exec_runtime = ATOMIC64_INIT(0), \
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun /**
61*4882a593Smuzhiyun * struct thread_group_cputimer - thread group interval timer counts
62*4882a593Smuzhiyun * @cputime_atomic: atomic thread group interval timers.
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * This structure contains the version of task_cputime, above, that is
65*4882a593Smuzhiyun * used for thread group CPU timer calculations.
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun struct thread_group_cputimer {
68*4882a593Smuzhiyun struct task_cputime_atomic cputime_atomic;
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun struct multiprocess_signals {
72*4882a593Smuzhiyun sigset_t signal;
73*4882a593Smuzhiyun struct hlist_node node;
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * NOTE! "signal_struct" does not have its own
78*4882a593Smuzhiyun * locking, because a shared signal_struct always
79*4882a593Smuzhiyun * implies a shared sighand_struct, so locking
80*4882a593Smuzhiyun * sighand_struct is always a proper superset of
81*4882a593Smuzhiyun * the locking of signal_struct.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun struct signal_struct {
84*4882a593Smuzhiyun refcount_t sigcnt;
85*4882a593Smuzhiyun atomic_t live;
86*4882a593Smuzhiyun int nr_threads;
87*4882a593Smuzhiyun struct list_head thread_head;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun wait_queue_head_t wait_chldexit; /* for wait4() */
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* current thread group signal load-balancing target: */
92*4882a593Smuzhiyun struct task_struct *curr_target;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* shared signal handling: */
95*4882a593Smuzhiyun struct sigpending shared_pending;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* For collecting multiprocess signals during fork */
98*4882a593Smuzhiyun struct hlist_head multiprocess;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* thread group exit support */
101*4882a593Smuzhiyun int group_exit_code;
102*4882a593Smuzhiyun /* overloaded:
103*4882a593Smuzhiyun * - notify group_exit_task when ->count is equal to notify_count
104*4882a593Smuzhiyun * - everyone except group_exit_task is stopped during signal delivery
105*4882a593Smuzhiyun * of fatal signals, group_exit_task processes the signal.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun int notify_count;
108*4882a593Smuzhiyun struct task_struct *group_exit_task;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* thread group stop support, overloads group_exit_code too */
111*4882a593Smuzhiyun int group_stop_count;
112*4882a593Smuzhiyun unsigned int flags; /* see SIGNAL_* flags below */
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun * PR_SET_CHILD_SUBREAPER marks a process, like a service
116*4882a593Smuzhiyun * manager, to re-parent orphan (double-forking) child processes
117*4882a593Smuzhiyun * to this process instead of 'init'. The service manager is
118*4882a593Smuzhiyun * able to receive SIGCHLD signals and is able to investigate
119*4882a593Smuzhiyun * the process until it calls wait(). All children of this
120*4882a593Smuzhiyun * process will inherit a flag if they should look for a
121*4882a593Smuzhiyun * child_subreaper process at exit.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun unsigned int is_child_subreaper:1;
124*4882a593Smuzhiyun unsigned int has_child_subreaper:1;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun #ifdef CONFIG_POSIX_TIMERS
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* POSIX.1b Interval Timers */
129*4882a593Smuzhiyun int posix_timer_id;
130*4882a593Smuzhiyun struct list_head posix_timers;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* ITIMER_REAL timer for the process */
133*4882a593Smuzhiyun struct hrtimer real_timer;
134*4882a593Smuzhiyun ktime_t it_real_incr;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
138*4882a593Smuzhiyun * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
139*4882a593Smuzhiyun * values are defined to 0 and 1 respectively
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun struct cpu_itimer it[2];
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun * Thread group totals for process CPU timers.
145*4882a593Smuzhiyun * See thread_group_cputimer(), et al, for details.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun struct thread_group_cputimer cputimer;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #endif
150*4882a593Smuzhiyun /* Empty if CONFIG_POSIX_TIMERS=n */
151*4882a593Smuzhiyun struct posix_cputimers posix_cputimers;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* PID/PID hash table linkage. */
154*4882a593Smuzhiyun struct pid *pids[PIDTYPE_MAX];
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun #ifdef CONFIG_NO_HZ_FULL
157*4882a593Smuzhiyun atomic_t tick_dep_mask;
158*4882a593Smuzhiyun #endif
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun struct pid *tty_old_pgrp;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* boolean value for session group leader */
163*4882a593Smuzhiyun int leader;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun struct tty_struct *tty; /* NULL if no tty */
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun #ifdef CONFIG_SCHED_AUTOGROUP
168*4882a593Smuzhiyun struct autogroup *autogroup;
169*4882a593Smuzhiyun #endif
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * Cumulative resource counters for dead threads in the group,
172*4882a593Smuzhiyun * and for reaped dead child processes forked by this group.
173*4882a593Smuzhiyun * Live threads maintain their own counters and add to these
174*4882a593Smuzhiyun * in __exit_signal, except for the group leader.
175*4882a593Smuzhiyun */
176*4882a593Smuzhiyun seqlock_t stats_lock;
177*4882a593Smuzhiyun u64 utime, stime, cutime, cstime;
178*4882a593Smuzhiyun u64 gtime;
179*4882a593Smuzhiyun u64 cgtime;
180*4882a593Smuzhiyun struct prev_cputime prev_cputime;
181*4882a593Smuzhiyun unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
182*4882a593Smuzhiyun unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
183*4882a593Smuzhiyun unsigned long inblock, oublock, cinblock, coublock;
184*4882a593Smuzhiyun unsigned long maxrss, cmaxrss;
185*4882a593Smuzhiyun struct task_io_accounting ioac;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun * Cumulative ns of schedule CPU time fo dead threads in the
189*4882a593Smuzhiyun * group, not including a zombie group leader, (This only differs
190*4882a593Smuzhiyun * from jiffies_to_ns(utime + stime) if sched_clock uses something
191*4882a593Smuzhiyun * other than jiffies.)
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun unsigned long long sum_sched_runtime;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * We don't bother to synchronize most readers of this at all,
197*4882a593Smuzhiyun * because there is no reader checking a limit that actually needs
198*4882a593Smuzhiyun * to get both rlim_cur and rlim_max atomically, and either one
199*4882a593Smuzhiyun * alone is a single word that can safely be read normally.
200*4882a593Smuzhiyun * getrlimit/setrlimit use task_lock(current->group_leader) to
201*4882a593Smuzhiyun * protect this instead of the siglock, because they really
202*4882a593Smuzhiyun * have no need to disable irqs.
203*4882a593Smuzhiyun */
204*4882a593Smuzhiyun struct rlimit rlim[RLIM_NLIMITS];
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun #ifdef CONFIG_BSD_PROCESS_ACCT
207*4882a593Smuzhiyun struct pacct_struct pacct; /* per-process accounting information */
208*4882a593Smuzhiyun #endif
209*4882a593Smuzhiyun #ifdef CONFIG_TASKSTATS
210*4882a593Smuzhiyun struct taskstats *stats;
211*4882a593Smuzhiyun #endif
212*4882a593Smuzhiyun #ifdef CONFIG_AUDIT
213*4882a593Smuzhiyun unsigned audit_tty;
214*4882a593Smuzhiyun struct tty_audit_buf *tty_audit_buf;
215*4882a593Smuzhiyun #endif
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * Thread is the potential origin of an oom condition; kill first on
219*4882a593Smuzhiyun * oom
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun bool oom_flag_origin;
222*4882a593Smuzhiyun short oom_score_adj; /* OOM kill score adjustment */
223*4882a593Smuzhiyun short oom_score_adj_min; /* OOM kill score adjustment min value.
224*4882a593Smuzhiyun * Only settable by CAP_SYS_RESOURCE. */
225*4882a593Smuzhiyun struct mm_struct *oom_mm; /* recorded mm when the thread group got
226*4882a593Smuzhiyun * killed by the oom killer */
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun struct mutex cred_guard_mutex; /* guard against foreign influences on
229*4882a593Smuzhiyun * credential calculations
230*4882a593Smuzhiyun * (notably. ptrace)
231*4882a593Smuzhiyun * Deprecated do not use in new code.
232*4882a593Smuzhiyun * Use exec_update_lock instead.
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun struct rw_semaphore exec_update_lock; /* Held while task_struct is
235*4882a593Smuzhiyun * being updated during exec,
236*4882a593Smuzhiyun * and may have inconsistent
237*4882a593Smuzhiyun * permissions.
238*4882a593Smuzhiyun */
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
241*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
242*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
243*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
244*4882a593Smuzhiyun } __randomize_layout;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Bits in flags field of signal_struct.
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
250*4882a593Smuzhiyun #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
251*4882a593Smuzhiyun #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
252*4882a593Smuzhiyun #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * Pending notifications to parent.
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun #define SIGNAL_CLD_STOPPED 0x00000010
257*4882a593Smuzhiyun #define SIGNAL_CLD_CONTINUED 0x00000020
258*4882a593Smuzhiyun #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
263*4882a593Smuzhiyun SIGNAL_STOP_CONTINUED)
264*4882a593Smuzhiyun
signal_set_stop_flags(struct signal_struct * sig,unsigned int flags)265*4882a593Smuzhiyun static inline void signal_set_stop_flags(struct signal_struct *sig,
266*4882a593Smuzhiyun unsigned int flags)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
269*4882a593Smuzhiyun sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* If true, all threads except ->group_exit_task have pending SIGKILL */
signal_group_exit(const struct signal_struct * sig)273*4882a593Smuzhiyun static inline int signal_group_exit(const struct signal_struct *sig)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun return (sig->flags & SIGNAL_GROUP_EXIT) ||
276*4882a593Smuzhiyun (sig->group_exit_task != NULL);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun extern void flush_signals(struct task_struct *);
280*4882a593Smuzhiyun extern void ignore_signals(struct task_struct *);
281*4882a593Smuzhiyun extern void flush_signal_handlers(struct task_struct *, int force_default);
282*4882a593Smuzhiyun extern int dequeue_signal(struct task_struct *task,
283*4882a593Smuzhiyun sigset_t *mask, kernel_siginfo_t *info);
284*4882a593Smuzhiyun
kernel_dequeue_signal(void)285*4882a593Smuzhiyun static inline int kernel_dequeue_signal(void)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun struct task_struct *task = current;
288*4882a593Smuzhiyun kernel_siginfo_t __info;
289*4882a593Smuzhiyun int ret;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun spin_lock_irq(&task->sighand->siglock);
292*4882a593Smuzhiyun ret = dequeue_signal(task, &task->blocked, &__info);
293*4882a593Smuzhiyun spin_unlock_irq(&task->sighand->siglock);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun return ret;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
kernel_signal_stop(void)298*4882a593Smuzhiyun static inline void kernel_signal_stop(void)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun spin_lock_irq(¤t->sighand->siglock);
301*4882a593Smuzhiyun if (current->jobctl & JOBCTL_STOP_DEQUEUED)
302*4882a593Smuzhiyun set_special_state(TASK_STOPPED);
303*4882a593Smuzhiyun spin_unlock_irq(¤t->sighand->siglock);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun schedule();
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun #ifdef __ARCH_SI_TRAPNO
308*4882a593Smuzhiyun # define ___ARCH_SI_TRAPNO(_a1) , _a1
309*4882a593Smuzhiyun #else
310*4882a593Smuzhiyun # define ___ARCH_SI_TRAPNO(_a1)
311*4882a593Smuzhiyun #endif
312*4882a593Smuzhiyun #ifdef __ia64__
313*4882a593Smuzhiyun # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
314*4882a593Smuzhiyun #else
315*4882a593Smuzhiyun # define ___ARCH_SI_IA64(_a1, _a2, _a3)
316*4882a593Smuzhiyun #endif
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun int force_sig_fault_to_task(int sig, int code, void __user *addr
319*4882a593Smuzhiyun ___ARCH_SI_TRAPNO(int trapno)
320*4882a593Smuzhiyun ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
321*4882a593Smuzhiyun , struct task_struct *t);
322*4882a593Smuzhiyun int force_sig_fault(int sig, int code, void __user *addr
323*4882a593Smuzhiyun ___ARCH_SI_TRAPNO(int trapno)
324*4882a593Smuzhiyun ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
325*4882a593Smuzhiyun int send_sig_fault(int sig, int code, void __user *addr
326*4882a593Smuzhiyun ___ARCH_SI_TRAPNO(int trapno)
327*4882a593Smuzhiyun ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
328*4882a593Smuzhiyun , struct task_struct *t);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun int force_sig_mceerr(int code, void __user *, short);
331*4882a593Smuzhiyun int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
334*4882a593Smuzhiyun int force_sig_pkuerr(void __user *addr, u32 pkey);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun int force_sig_ptrace_errno_trap(int errno, void __user *addr);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
339*4882a593Smuzhiyun extern void force_sigsegv(int sig);
340*4882a593Smuzhiyun extern int force_sig_info(struct kernel_siginfo *);
341*4882a593Smuzhiyun extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
342*4882a593Smuzhiyun extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
343*4882a593Smuzhiyun extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
344*4882a593Smuzhiyun const struct cred *);
345*4882a593Smuzhiyun extern int kill_pgrp(struct pid *pid, int sig, int priv);
346*4882a593Smuzhiyun extern int kill_pid(struct pid *pid, int sig, int priv);
347*4882a593Smuzhiyun extern __must_check bool do_notify_parent(struct task_struct *, int);
348*4882a593Smuzhiyun extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
349*4882a593Smuzhiyun extern void force_sig(int);
350*4882a593Smuzhiyun extern int send_sig(int, struct task_struct *, int);
351*4882a593Smuzhiyun extern int zap_other_threads(struct task_struct *p);
352*4882a593Smuzhiyun extern struct sigqueue *sigqueue_alloc(void);
353*4882a593Smuzhiyun extern void sigqueue_free(struct sigqueue *);
354*4882a593Smuzhiyun extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
355*4882a593Smuzhiyun extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
356*4882a593Smuzhiyun
restart_syscall(void)357*4882a593Smuzhiyun static inline int restart_syscall(void)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun set_tsk_thread_flag(current, TIF_SIGPENDING);
360*4882a593Smuzhiyun return -ERESTARTNOINTR;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
task_sigpending(struct task_struct * p)363*4882a593Smuzhiyun static inline int task_sigpending(struct task_struct *p)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
signal_pending(struct task_struct * p)368*4882a593Smuzhiyun static inline int signal_pending(struct task_struct *p)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
372*4882a593Smuzhiyun * behavior in terms of ensuring that we break out of wait loops
373*4882a593Smuzhiyun * so that notify signal callbacks can be processed.
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
376*4882a593Smuzhiyun return 1;
377*4882a593Smuzhiyun return task_sigpending(p);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
__fatal_signal_pending(struct task_struct * p)380*4882a593Smuzhiyun static inline int __fatal_signal_pending(struct task_struct *p)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun return unlikely(sigismember(&p->pending.signal, SIGKILL));
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
fatal_signal_pending(struct task_struct * p)385*4882a593Smuzhiyun static inline int fatal_signal_pending(struct task_struct *p)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun return task_sigpending(p) && __fatal_signal_pending(p);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
signal_pending_state(long state,struct task_struct * p)390*4882a593Smuzhiyun static inline int signal_pending_state(long state, struct task_struct *p)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
393*4882a593Smuzhiyun return 0;
394*4882a593Smuzhiyun if (!signal_pending(p))
395*4882a593Smuzhiyun return 0;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * This should only be used in fault handlers to decide whether we
402*4882a593Smuzhiyun * should stop the current fault routine to handle the signals
403*4882a593Smuzhiyun * instead, especially with the case where we've got interrupted with
404*4882a593Smuzhiyun * a VM_FAULT_RETRY.
405*4882a593Smuzhiyun */
fault_signal_pending(vm_fault_t fault_flags,struct pt_regs * regs)406*4882a593Smuzhiyun static inline bool fault_signal_pending(vm_fault_t fault_flags,
407*4882a593Smuzhiyun struct pt_regs *regs)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun return unlikely((fault_flags & VM_FAULT_RETRY) &&
410*4882a593Smuzhiyun (fatal_signal_pending(current) ||
411*4882a593Smuzhiyun (user_mode(regs) && signal_pending(current))));
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun * Reevaluate whether the task has signals pending delivery.
416*4882a593Smuzhiyun * Wake the task if so.
417*4882a593Smuzhiyun * This is required every time the blocked sigset_t changes.
418*4882a593Smuzhiyun * callers must hold sighand->siglock.
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun extern void recalc_sigpending_and_wake(struct task_struct *t);
421*4882a593Smuzhiyun extern void recalc_sigpending(void);
422*4882a593Smuzhiyun extern void calculate_sigpending(void);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
425*4882a593Smuzhiyun
signal_wake_up(struct task_struct * t,bool resume)426*4882a593Smuzhiyun static inline void signal_wake_up(struct task_struct *t, bool resume)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
429*4882a593Smuzhiyun }
ptrace_signal_wake_up(struct task_struct * t,bool resume)430*4882a593Smuzhiyun static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun void task_join_group_stop(struct task_struct *task);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun #ifdef TIF_RESTORE_SIGMASK
438*4882a593Smuzhiyun /*
439*4882a593Smuzhiyun * Legacy restore_sigmask accessors. These are inefficient on
440*4882a593Smuzhiyun * SMP architectures because they require atomic operations.
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /**
444*4882a593Smuzhiyun * set_restore_sigmask() - make sure saved_sigmask processing gets done
445*4882a593Smuzhiyun *
446*4882a593Smuzhiyun * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
447*4882a593Smuzhiyun * will run before returning to user mode, to process the flag. For
448*4882a593Smuzhiyun * all callers, TIF_SIGPENDING is already set or it's no harm to set
449*4882a593Smuzhiyun * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
450*4882a593Smuzhiyun * arch code will notice on return to user mode, in case those bits
451*4882a593Smuzhiyun * are scarce. We set TIF_SIGPENDING here to ensure that the arch
452*4882a593Smuzhiyun * signal code always gets run when TIF_RESTORE_SIGMASK is set.
453*4882a593Smuzhiyun */
set_restore_sigmask(void)454*4882a593Smuzhiyun static inline void set_restore_sigmask(void)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun set_thread_flag(TIF_RESTORE_SIGMASK);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
clear_tsk_restore_sigmask(struct task_struct * task)459*4882a593Smuzhiyun static inline void clear_tsk_restore_sigmask(struct task_struct *task)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
clear_restore_sigmask(void)464*4882a593Smuzhiyun static inline void clear_restore_sigmask(void)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun clear_thread_flag(TIF_RESTORE_SIGMASK);
467*4882a593Smuzhiyun }
test_tsk_restore_sigmask(struct task_struct * task)468*4882a593Smuzhiyun static inline bool test_tsk_restore_sigmask(struct task_struct *task)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
471*4882a593Smuzhiyun }
test_restore_sigmask(void)472*4882a593Smuzhiyun static inline bool test_restore_sigmask(void)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun return test_thread_flag(TIF_RESTORE_SIGMASK);
475*4882a593Smuzhiyun }
test_and_clear_restore_sigmask(void)476*4882a593Smuzhiyun static inline bool test_and_clear_restore_sigmask(void)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun #else /* TIF_RESTORE_SIGMASK */
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
set_restore_sigmask(void)484*4882a593Smuzhiyun static inline void set_restore_sigmask(void)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun current->restore_sigmask = true;
487*4882a593Smuzhiyun }
clear_tsk_restore_sigmask(struct task_struct * task)488*4882a593Smuzhiyun static inline void clear_tsk_restore_sigmask(struct task_struct *task)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun task->restore_sigmask = false;
491*4882a593Smuzhiyun }
clear_restore_sigmask(void)492*4882a593Smuzhiyun static inline void clear_restore_sigmask(void)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun current->restore_sigmask = false;
495*4882a593Smuzhiyun }
test_restore_sigmask(void)496*4882a593Smuzhiyun static inline bool test_restore_sigmask(void)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun return current->restore_sigmask;
499*4882a593Smuzhiyun }
test_tsk_restore_sigmask(struct task_struct * task)500*4882a593Smuzhiyun static inline bool test_tsk_restore_sigmask(struct task_struct *task)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun return task->restore_sigmask;
503*4882a593Smuzhiyun }
test_and_clear_restore_sigmask(void)504*4882a593Smuzhiyun static inline bool test_and_clear_restore_sigmask(void)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun if (!current->restore_sigmask)
507*4882a593Smuzhiyun return false;
508*4882a593Smuzhiyun current->restore_sigmask = false;
509*4882a593Smuzhiyun return true;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun #endif
512*4882a593Smuzhiyun
restore_saved_sigmask(void)513*4882a593Smuzhiyun static inline void restore_saved_sigmask(void)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun if (test_and_clear_restore_sigmask())
516*4882a593Smuzhiyun __set_current_blocked(¤t->saved_sigmask);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
520*4882a593Smuzhiyun
restore_saved_sigmask_unless(bool interrupted)521*4882a593Smuzhiyun static inline void restore_saved_sigmask_unless(bool interrupted)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun if (interrupted)
524*4882a593Smuzhiyun WARN_ON(!signal_pending(current));
525*4882a593Smuzhiyun else
526*4882a593Smuzhiyun restore_saved_sigmask();
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
sigmask_to_save(void)529*4882a593Smuzhiyun static inline sigset_t *sigmask_to_save(void)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun sigset_t *res = ¤t->blocked;
532*4882a593Smuzhiyun if (unlikely(test_restore_sigmask()))
533*4882a593Smuzhiyun res = ¤t->saved_sigmask;
534*4882a593Smuzhiyun return res;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
kill_cad_pid(int sig,int priv)537*4882a593Smuzhiyun static inline int kill_cad_pid(int sig, int priv)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun return kill_pid(cad_pid, sig, priv);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /* These can be the second arg to send_sig_info/send_group_sig_info. */
543*4882a593Smuzhiyun #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
544*4882a593Smuzhiyun #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
545*4882a593Smuzhiyun
__on_sig_stack(unsigned long sp)546*4882a593Smuzhiyun static inline int __on_sig_stack(unsigned long sp)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun #ifdef CONFIG_STACK_GROWSUP
549*4882a593Smuzhiyun return sp >= current->sas_ss_sp &&
550*4882a593Smuzhiyun sp - current->sas_ss_sp < current->sas_ss_size;
551*4882a593Smuzhiyun #else
552*4882a593Smuzhiyun return sp > current->sas_ss_sp &&
553*4882a593Smuzhiyun sp - current->sas_ss_sp <= current->sas_ss_size;
554*4882a593Smuzhiyun #endif
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /*
558*4882a593Smuzhiyun * True if we are on the alternate signal stack.
559*4882a593Smuzhiyun */
on_sig_stack(unsigned long sp)560*4882a593Smuzhiyun static inline int on_sig_stack(unsigned long sp)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun /*
563*4882a593Smuzhiyun * If the signal stack is SS_AUTODISARM then, by construction, we
564*4882a593Smuzhiyun * can't be on the signal stack unless user code deliberately set
565*4882a593Smuzhiyun * SS_AUTODISARM when we were already on it.
566*4882a593Smuzhiyun *
567*4882a593Smuzhiyun * This improves reliability: if user state gets corrupted such that
568*4882a593Smuzhiyun * the stack pointer points very close to the end of the signal stack,
569*4882a593Smuzhiyun * then this check will enable the signal to be handled anyway.
570*4882a593Smuzhiyun */
571*4882a593Smuzhiyun if (current->sas_ss_flags & SS_AUTODISARM)
572*4882a593Smuzhiyun return 0;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun return __on_sig_stack(sp);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
sas_ss_flags(unsigned long sp)577*4882a593Smuzhiyun static inline int sas_ss_flags(unsigned long sp)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun if (!current->sas_ss_size)
580*4882a593Smuzhiyun return SS_DISABLE;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun return on_sig_stack(sp) ? SS_ONSTACK : 0;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
sas_ss_reset(struct task_struct * p)585*4882a593Smuzhiyun static inline void sas_ss_reset(struct task_struct *p)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun p->sas_ss_sp = 0;
588*4882a593Smuzhiyun p->sas_ss_size = 0;
589*4882a593Smuzhiyun p->sas_ss_flags = SS_DISABLE;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
sigsp(unsigned long sp,struct ksignal * ksig)592*4882a593Smuzhiyun static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
595*4882a593Smuzhiyun #ifdef CONFIG_STACK_GROWSUP
596*4882a593Smuzhiyun return current->sas_ss_sp;
597*4882a593Smuzhiyun #else
598*4882a593Smuzhiyun return current->sas_ss_sp + current->sas_ss_size;
599*4882a593Smuzhiyun #endif
600*4882a593Smuzhiyun return sp;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun extern void __cleanup_sighand(struct sighand_struct *);
604*4882a593Smuzhiyun extern void flush_itimer_signals(void);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun #define tasklist_empty() \
607*4882a593Smuzhiyun list_empty(&init_task.tasks)
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun #define next_task(p) \
610*4882a593Smuzhiyun list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun #define for_each_process(p) \
613*4882a593Smuzhiyun for (p = &init_task ; (p = next_task(p)) != &init_task ; )
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun extern bool current_is_single_threaded(void);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /*
618*4882a593Smuzhiyun * Careful: do_each_thread/while_each_thread is a double loop so
619*4882a593Smuzhiyun * 'break' will not work as expected - use goto instead.
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun #define do_each_thread(g, t) \
622*4882a593Smuzhiyun for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun #define while_each_thread(g, t) \
625*4882a593Smuzhiyun while ((t = next_thread(t)) != g)
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun #define __for_each_thread(signal, t) \
628*4882a593Smuzhiyun list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun #define for_each_thread(p, t) \
631*4882a593Smuzhiyun __for_each_thread((p)->signal, t)
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /* Careful: this is a double loop, 'break' won't work as expected. */
634*4882a593Smuzhiyun #define for_each_process_thread(p, t) \
635*4882a593Smuzhiyun for_each_process(p) for_each_thread(p, t)
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun typedef int (*proc_visitor)(struct task_struct *p, void *data);
638*4882a593Smuzhiyun void walk_process_tree(struct task_struct *top, proc_visitor, void *);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun static inline
task_pid_type(struct task_struct * task,enum pid_type type)641*4882a593Smuzhiyun struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun struct pid *pid;
644*4882a593Smuzhiyun if (type == PIDTYPE_PID)
645*4882a593Smuzhiyun pid = task_pid(task);
646*4882a593Smuzhiyun else
647*4882a593Smuzhiyun pid = task->signal->pids[type];
648*4882a593Smuzhiyun return pid;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
task_tgid(struct task_struct * task)651*4882a593Smuzhiyun static inline struct pid *task_tgid(struct task_struct *task)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun return task->signal->pids[PIDTYPE_TGID];
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /*
657*4882a593Smuzhiyun * Without tasklist or RCU lock it is not safe to dereference
658*4882a593Smuzhiyun * the result of task_pgrp/task_session even if task == current,
659*4882a593Smuzhiyun * we can race with another thread doing sys_setsid/sys_setpgid.
660*4882a593Smuzhiyun */
task_pgrp(struct task_struct * task)661*4882a593Smuzhiyun static inline struct pid *task_pgrp(struct task_struct *task)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun return task->signal->pids[PIDTYPE_PGID];
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
task_session(struct task_struct * task)666*4882a593Smuzhiyun static inline struct pid *task_session(struct task_struct *task)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun return task->signal->pids[PIDTYPE_SID];
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
get_nr_threads(struct task_struct * task)671*4882a593Smuzhiyun static inline int get_nr_threads(struct task_struct *task)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun return task->signal->nr_threads;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
thread_group_leader(struct task_struct * p)676*4882a593Smuzhiyun static inline bool thread_group_leader(struct task_struct *p)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun return p->exit_signal >= 0;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun static inline
same_thread_group(struct task_struct * p1,struct task_struct * p2)682*4882a593Smuzhiyun bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun return p1->signal == p2->signal;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
next_thread(const struct task_struct * p)687*4882a593Smuzhiyun static inline struct task_struct *next_thread(const struct task_struct *p)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun return list_entry_rcu(p->thread_group.next,
690*4882a593Smuzhiyun struct task_struct, thread_group);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
thread_group_empty(struct task_struct * p)693*4882a593Smuzhiyun static inline int thread_group_empty(struct task_struct *p)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun return list_empty(&p->thread_group);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun #define delay_group_leader(p) \
699*4882a593Smuzhiyun (thread_group_leader(p) && !thread_group_empty(p))
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun extern bool thread_group_exited(struct pid *pid);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
704*4882a593Smuzhiyun unsigned long *flags);
705*4882a593Smuzhiyun
lock_task_sighand(struct task_struct * task,unsigned long * flags)706*4882a593Smuzhiyun static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
707*4882a593Smuzhiyun unsigned long *flags)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun struct sighand_struct *ret;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun ret = __lock_task_sighand(task, flags);
712*4882a593Smuzhiyun (void)__cond_lock(&task->sighand->siglock, ret);
713*4882a593Smuzhiyun return ret;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
unlock_task_sighand(struct task_struct * task,unsigned long * flags)716*4882a593Smuzhiyun static inline void unlock_task_sighand(struct task_struct *task,
717*4882a593Smuzhiyun unsigned long *flags)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun spin_unlock_irqrestore(&task->sighand->siglock, *flags);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
task_rlimit(const struct task_struct * task,unsigned int limit)722*4882a593Smuzhiyun static inline unsigned long task_rlimit(const struct task_struct *task,
723*4882a593Smuzhiyun unsigned int limit)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun return READ_ONCE(task->signal->rlim[limit].rlim_cur);
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
task_rlimit_max(const struct task_struct * task,unsigned int limit)728*4882a593Smuzhiyun static inline unsigned long task_rlimit_max(const struct task_struct *task,
729*4882a593Smuzhiyun unsigned int limit)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun return READ_ONCE(task->signal->rlim[limit].rlim_max);
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
rlimit(unsigned int limit)734*4882a593Smuzhiyun static inline unsigned long rlimit(unsigned int limit)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun return task_rlimit(current, limit);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
rlimit_max(unsigned int limit)739*4882a593Smuzhiyun static inline unsigned long rlimit_max(unsigned int limit)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun return task_rlimit_max(current, limit);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun #endif /* _LINUX_SCHED_SIGNAL_H */
745