1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Shadow Call Stack support. 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2019 Google LLC 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #ifndef _LINUX_SCS_H 9*4882a593Smuzhiyun #define _LINUX_SCS_H 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #include <linux/gfp.h> 12*4882a593Smuzhiyun #include <linux/poison.h> 13*4882a593Smuzhiyun #include <linux/sched.h> 14*4882a593Smuzhiyun #include <linux/sizes.h> 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun #ifdef CONFIG_SHADOW_CALL_STACK 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun #define SCS_ORDER 0 19*4882a593Smuzhiyun #define SCS_SIZE (PAGE_SIZE << SCS_ORDER) 20*4882a593Smuzhiyun #define GFP_SCS (GFP_KERNEL | __GFP_ZERO) 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun /* An illegal pointer value to mark the end of the shadow stack. */ 23*4882a593Smuzhiyun #define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA) 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun #define task_scs(tsk) (task_thread_info(tsk)->scs_base) 26*4882a593Smuzhiyun #define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp) 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun void *scs_alloc(int node); 29*4882a593Smuzhiyun void scs_free(void *s); 30*4882a593Smuzhiyun void scs_init(void); 31*4882a593Smuzhiyun int scs_prepare(struct task_struct *tsk, int node); 32*4882a593Smuzhiyun void scs_release(struct task_struct *tsk); 33*4882a593Smuzhiyun scs_task_reset(struct task_struct * tsk)34*4882a593Smuzhiyunstatic inline void scs_task_reset(struct task_struct *tsk) 35*4882a593Smuzhiyun { 36*4882a593Smuzhiyun /* 37*4882a593Smuzhiyun * Reset the shadow stack to the base address in case the task 38*4882a593Smuzhiyun * is reused. 39*4882a593Smuzhiyun */ 40*4882a593Smuzhiyun task_scs_sp(tsk) = task_scs(tsk); 41*4882a593Smuzhiyun } 42*4882a593Smuzhiyun __scs_magic(void * s)43*4882a593Smuzhiyunstatic inline unsigned long *__scs_magic(void *s) 44*4882a593Smuzhiyun { 45*4882a593Smuzhiyun return (unsigned long *)(s + SCS_SIZE) - 1; 46*4882a593Smuzhiyun } 47*4882a593Smuzhiyun task_scs_end_corrupted(struct task_struct * tsk)48*4882a593Smuzhiyunstatic inline bool task_scs_end_corrupted(struct task_struct *tsk) 49*4882a593Smuzhiyun { 50*4882a593Smuzhiyun unsigned long *magic = __scs_magic(task_scs(tsk)); 51*4882a593Smuzhiyun unsigned long sz = task_scs_sp(tsk) - task_scs(tsk); 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC; 54*4882a593Smuzhiyun } 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun #else /* CONFIG_SHADOW_CALL_STACK */ 57*4882a593Smuzhiyun scs_alloc(int node)58*4882a593Smuzhiyunstatic inline void *scs_alloc(int node) { return NULL; } scs_free(void * s)59*4882a593Smuzhiyunstatic inline void scs_free(void *s) {} scs_init(void)60*4882a593Smuzhiyunstatic inline void scs_init(void) {} scs_task_reset(struct task_struct * tsk)61*4882a593Smuzhiyunstatic inline void scs_task_reset(struct task_struct *tsk) {} scs_prepare(struct task_struct * tsk,int node)62*4882a593Smuzhiyunstatic inline int scs_prepare(struct task_struct *tsk, int node) { return 0; } scs_release(struct task_struct * tsk)63*4882a593Smuzhiyunstatic inline void scs_release(struct task_struct *tsk) {} task_scs_end_corrupted(struct task_struct * tsk)64*4882a593Smuzhiyunstatic inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; } 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun #endif /* CONFIG_SHADOW_CALL_STACK */ 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun #endif /* _LINUX_SCS_H */ 69