1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef __ASM_PREEMPT_H 3*4882a593Smuzhiyun #define __ASM_PREEMPT_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #include <linux/thread_info.h> 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #define PREEMPT_ENABLED (0) 8*4882a593Smuzhiyun preempt_count(void)9*4882a593Smuzhiyunstatic __always_inline int preempt_count(void) 10*4882a593Smuzhiyun { 11*4882a593Smuzhiyun return READ_ONCE(current_thread_info()->preempt_count); 12*4882a593Smuzhiyun } 13*4882a593Smuzhiyun preempt_count_ptr(void)14*4882a593Smuzhiyunstatic __always_inline volatile int *preempt_count_ptr(void) 15*4882a593Smuzhiyun { 16*4882a593Smuzhiyun return ¤t_thread_info()->preempt_count; 17*4882a593Smuzhiyun } 18*4882a593Smuzhiyun preempt_count_set(int pc)19*4882a593Smuzhiyunstatic __always_inline void preempt_count_set(int pc) 20*4882a593Smuzhiyun { 21*4882a593Smuzhiyun *preempt_count_ptr() = pc; 22*4882a593Smuzhiyun } 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun /* 25*4882a593Smuzhiyun * must be macros to avoid header recursion hell 26*4882a593Smuzhiyun */ 27*4882a593Smuzhiyun #define init_task_preempt_count(p) do { \ 28*4882a593Smuzhiyun task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \ 29*4882a593Smuzhiyun } while (0) 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun #define init_idle_preempt_count(p, cpu) do { \ 32*4882a593Smuzhiyun task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ 33*4882a593Smuzhiyun } while (0) 34*4882a593Smuzhiyun set_preempt_need_resched(void)35*4882a593Smuzhiyunstatic __always_inline void set_preempt_need_resched(void) 36*4882a593Smuzhiyun { 37*4882a593Smuzhiyun } 38*4882a593Smuzhiyun clear_preempt_need_resched(void)39*4882a593Smuzhiyunstatic __always_inline void clear_preempt_need_resched(void) 40*4882a593Smuzhiyun { 41*4882a593Smuzhiyun } 42*4882a593Smuzhiyun test_preempt_need_resched(void)43*4882a593Smuzhiyunstatic __always_inline bool test_preempt_need_resched(void) 44*4882a593Smuzhiyun { 45*4882a593Smuzhiyun return false; 46*4882a593Smuzhiyun } 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun /* 49*4882a593Smuzhiyun * The various preempt_count add/sub methods 50*4882a593Smuzhiyun */ 51*4882a593Smuzhiyun __preempt_count_add(int val)52*4882a593Smuzhiyunstatic __always_inline void __preempt_count_add(int val) 53*4882a593Smuzhiyun { 54*4882a593Smuzhiyun *preempt_count_ptr() += val; 55*4882a593Smuzhiyun } 56*4882a593Smuzhiyun __preempt_count_sub(int val)57*4882a593Smuzhiyunstatic __always_inline void __preempt_count_sub(int val) 58*4882a593Smuzhiyun { 59*4882a593Smuzhiyun *preempt_count_ptr() -= val; 60*4882a593Smuzhiyun } 61*4882a593Smuzhiyun __preempt_count_dec_and_test(void)62*4882a593Smuzhiyunstatic __always_inline bool __preempt_count_dec_and_test(void) 63*4882a593Smuzhiyun { 64*4882a593Smuzhiyun /* 65*4882a593Smuzhiyun * Because of load-store architectures cannot do per-cpu atomic 66*4882a593Smuzhiyun * operations; we cannot use PREEMPT_NEED_RESCHED because it might get 67*4882a593Smuzhiyun * lost. 68*4882a593Smuzhiyun */ 69*4882a593Smuzhiyun return !--*preempt_count_ptr() && tif_need_resched(); 70*4882a593Smuzhiyun } 71*4882a593Smuzhiyun 72*4882a593Smuzhiyun /* 73*4882a593Smuzhiyun * Returns true when we need to resched and can (barring IRQ state). 74*4882a593Smuzhiyun */ should_resched(int preempt_offset)75*4882a593Smuzhiyunstatic __always_inline bool should_resched(int preempt_offset) 76*4882a593Smuzhiyun { 77*4882a593Smuzhiyun return unlikely(preempt_count() == preempt_offset && 78*4882a593Smuzhiyun tif_need_resched()); 79*4882a593Smuzhiyun } 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun #ifdef CONFIG_PREEMPTION 82*4882a593Smuzhiyun extern asmlinkage void preempt_schedule(void); 83*4882a593Smuzhiyun #define __preempt_schedule() preempt_schedule() 84*4882a593Smuzhiyun extern asmlinkage void preempt_schedule_notrace(void); 85*4882a593Smuzhiyun #define __preempt_schedule_notrace() preempt_schedule_notrace() 86*4882a593Smuzhiyun #endif /* CONFIG_PREEMPTION */ 87*4882a593Smuzhiyun 88*4882a593Smuzhiyun #endif /* __ASM_PREEMPT_H */ 89