1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_KASAN_H
3*4882a593Smuzhiyun #define _LINUX_KASAN_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/bug.h>
6*4882a593Smuzhiyun #include <linux/kasan-enabled.h>
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/static_key.h>
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun struct kmem_cache;
12*4882a593Smuzhiyun struct page;
13*4882a593Smuzhiyun struct vm_struct;
14*4882a593Smuzhiyun struct task_struct;
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #ifdef CONFIG_KASAN
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/linkage.h>
19*4882a593Smuzhiyun #include <asm/kasan.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* kasan_data struct is used in KUnit tests for KASAN expected failures */
22*4882a593Smuzhiyun struct kunit_kasan_expectation {
23*4882a593Smuzhiyun bool report_expected;
24*4882a593Smuzhiyun bool report_found;
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #endif
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <linux/pgtable.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* Software KASAN implementations use shadow memory. */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #ifdef CONFIG_KASAN_SW_TAGS
36*4882a593Smuzhiyun /* This matches KASAN_TAG_INVALID. */
37*4882a593Smuzhiyun #define KASAN_SHADOW_INIT 0xFE
38*4882a593Smuzhiyun #else
39*4882a593Smuzhiyun #define KASAN_SHADOW_INIT 0
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #ifndef PTE_HWTABLE_PTRS
43*4882a593Smuzhiyun #define PTE_HWTABLE_PTRS 0
44*4882a593Smuzhiyun #endif
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
47*4882a593Smuzhiyun extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
48*4882a593Smuzhiyun extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
49*4882a593Smuzhiyun extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
50*4882a593Smuzhiyun extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun int kasan_populate_early_shadow(const void *shadow_start,
53*4882a593Smuzhiyun const void *shadow_end);
54*4882a593Smuzhiyun
kasan_mem_to_shadow(const void * addr)55*4882a593Smuzhiyun static inline void *kasan_mem_to_shadow(const void *addr)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
58*4882a593Smuzhiyun + KASAN_SHADOW_OFFSET;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun int kasan_add_zero_shadow(void *start, unsigned long size);
62*4882a593Smuzhiyun void kasan_remove_zero_shadow(void *start, unsigned long size);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Enable reporting bugs after kasan_disable_current() */
65*4882a593Smuzhiyun extern void kasan_enable_current(void);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* Disable reporting bugs for current task */
68*4882a593Smuzhiyun extern void kasan_disable_current(void);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
71*4882a593Smuzhiyun
kasan_add_zero_shadow(void * start,unsigned long size)72*4882a593Smuzhiyun static inline int kasan_add_zero_shadow(void *start, unsigned long size)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun return 0;
75*4882a593Smuzhiyun }
kasan_remove_zero_shadow(void * start,unsigned long size)76*4882a593Smuzhiyun static inline void kasan_remove_zero_shadow(void *start,
77*4882a593Smuzhiyun unsigned long size)
78*4882a593Smuzhiyun {}
79*4882a593Smuzhiyun
kasan_enable_current(void)80*4882a593Smuzhiyun static inline void kasan_enable_current(void) {}
kasan_disable_current(void)81*4882a593Smuzhiyun static inline void kasan_disable_current(void) {}
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #ifdef CONFIG_KASAN_HW_TAGS
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
88*4882a593Smuzhiyun void kasan_free_pages(struct page *page, unsigned int order);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #else /* CONFIG_KASAN_HW_TAGS */
91*4882a593Smuzhiyun
kasan_alloc_pages(struct page * page,unsigned int order,gfp_t flags)92*4882a593Smuzhiyun static __always_inline void kasan_alloc_pages(struct page *page,
93*4882a593Smuzhiyun unsigned int order, gfp_t flags)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun /* Only available for integrated init. */
96*4882a593Smuzhiyun BUILD_BUG();
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
kasan_free_pages(struct page * page,unsigned int order)99*4882a593Smuzhiyun static __always_inline void kasan_free_pages(struct page *page,
100*4882a593Smuzhiyun unsigned int order)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun /* Only available for integrated init. */
103*4882a593Smuzhiyun BUILD_BUG();
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #endif /* CONFIG_KASAN_HW_TAGS */
107*4882a593Smuzhiyun
kasan_has_integrated_init(void)108*4882a593Smuzhiyun static inline bool kasan_has_integrated_init(void)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun return kasan_hw_tags_enabled();
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #ifdef CONFIG_KASAN
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun struct kasan_cache {
116*4882a593Smuzhiyun int alloc_meta_offset;
117*4882a593Smuzhiyun int free_meta_offset;
118*4882a593Smuzhiyun bool is_kmalloc;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun slab_flags_t __kasan_never_merge(void);
kasan_never_merge(void)122*4882a593Smuzhiyun static __always_inline slab_flags_t kasan_never_merge(void)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun if (kasan_enabled())
125*4882a593Smuzhiyun return __kasan_never_merge();
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun void __kasan_unpoison_range(const void *addr, size_t size);
kasan_unpoison_range(const void * addr,size_t size)130*4882a593Smuzhiyun static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun if (kasan_enabled())
133*4882a593Smuzhiyun __kasan_unpoison_range(addr, size);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
kasan_poison_pages(struct page * page,unsigned int order,bool init)137*4882a593Smuzhiyun static __always_inline void kasan_poison_pages(struct page *page,
138*4882a593Smuzhiyun unsigned int order, bool init)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun if (kasan_enabled())
141*4882a593Smuzhiyun __kasan_poison_pages(page, order, init);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)145*4882a593Smuzhiyun static __always_inline void kasan_unpoison_pages(struct page *page,
146*4882a593Smuzhiyun unsigned int order, bool init)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun if (kasan_enabled())
149*4882a593Smuzhiyun __kasan_unpoison_pages(page, order, init);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
153*4882a593Smuzhiyun slab_flags_t *flags);
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)154*4882a593Smuzhiyun static __always_inline void kasan_cache_create(struct kmem_cache *cache,
155*4882a593Smuzhiyun unsigned int *size, slab_flags_t *flags)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun if (kasan_enabled())
158*4882a593Smuzhiyun __kasan_cache_create(cache, size, flags);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
kasan_cache_create_kmalloc(struct kmem_cache * cache)162*4882a593Smuzhiyun static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun if (kasan_enabled())
165*4882a593Smuzhiyun __kasan_cache_create_kmalloc(cache);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun size_t __kasan_metadata_size(struct kmem_cache *cache);
kasan_metadata_size(struct kmem_cache * cache)169*4882a593Smuzhiyun static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun if (kasan_enabled())
172*4882a593Smuzhiyun return __kasan_metadata_size(cache);
173*4882a593Smuzhiyun return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun void __kasan_poison_slab(struct page *page);
kasan_poison_slab(struct page * page)177*4882a593Smuzhiyun static __always_inline void kasan_poison_slab(struct page *page)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun if (kasan_enabled())
180*4882a593Smuzhiyun __kasan_poison_slab(page);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
kasan_unpoison_object_data(struct kmem_cache * cache,void * object)184*4882a593Smuzhiyun static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
185*4882a593Smuzhiyun void *object)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun if (kasan_enabled())
188*4882a593Smuzhiyun __kasan_unpoison_object_data(cache, object);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
kasan_poison_object_data(struct kmem_cache * cache,void * object)192*4882a593Smuzhiyun static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
193*4882a593Smuzhiyun void *object)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun if (kasan_enabled())
196*4882a593Smuzhiyun __kasan_poison_object_data(cache, object);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
200*4882a593Smuzhiyun const void *object);
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)201*4882a593Smuzhiyun static __always_inline void * __must_check kasan_init_slab_obj(
202*4882a593Smuzhiyun struct kmem_cache *cache, const void *object)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun if (kasan_enabled())
205*4882a593Smuzhiyun return __kasan_init_slab_obj(cache, object);
206*4882a593Smuzhiyun return (void *)object;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun bool __kasan_slab_free(struct kmem_cache *s, void *object,
210*4882a593Smuzhiyun unsigned long ip, bool init);
kasan_slab_free(struct kmem_cache * s,void * object,bool init)211*4882a593Smuzhiyun static __always_inline bool kasan_slab_free(struct kmem_cache *s,
212*4882a593Smuzhiyun void *object, bool init)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun if (kasan_enabled())
215*4882a593Smuzhiyun return __kasan_slab_free(s, object, _RET_IP_, init);
216*4882a593Smuzhiyun return false;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun void __kasan_kfree_large(void *ptr, unsigned long ip);
kasan_kfree_large(void * ptr)220*4882a593Smuzhiyun static __always_inline void kasan_kfree_large(void *ptr)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun if (kasan_enabled())
223*4882a593Smuzhiyun __kasan_kfree_large(ptr, _RET_IP_);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
kasan_slab_free_mempool(void * ptr)227*4882a593Smuzhiyun static __always_inline void kasan_slab_free_mempool(void *ptr)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun if (kasan_enabled())
230*4882a593Smuzhiyun __kasan_slab_free_mempool(ptr, _RET_IP_);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
234*4882a593Smuzhiyun void *object, gfp_t flags, bool init);
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)235*4882a593Smuzhiyun static __always_inline void * __must_check kasan_slab_alloc(
236*4882a593Smuzhiyun struct kmem_cache *s, void *object, gfp_t flags, bool init)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun if (kasan_enabled())
239*4882a593Smuzhiyun return __kasan_slab_alloc(s, object, flags, init);
240*4882a593Smuzhiyun return object;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
244*4882a593Smuzhiyun size_t size, gfp_t flags);
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)245*4882a593Smuzhiyun static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
246*4882a593Smuzhiyun const void *object, size_t size, gfp_t flags)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun if (kasan_enabled())
249*4882a593Smuzhiyun return __kasan_kmalloc(s, object, size, flags);
250*4882a593Smuzhiyun return (void *)object;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun void * __must_check __kasan_kmalloc_large(const void *ptr,
254*4882a593Smuzhiyun size_t size, gfp_t flags);
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)255*4882a593Smuzhiyun static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
256*4882a593Smuzhiyun size_t size, gfp_t flags)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun if (kasan_enabled())
259*4882a593Smuzhiyun return __kasan_kmalloc_large(ptr, size, flags);
260*4882a593Smuzhiyun return (void *)ptr;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun void * __must_check __kasan_krealloc(const void *object,
264*4882a593Smuzhiyun size_t new_size, gfp_t flags);
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)265*4882a593Smuzhiyun static __always_inline void * __must_check kasan_krealloc(const void *object,
266*4882a593Smuzhiyun size_t new_size, gfp_t flags)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun if (kasan_enabled())
269*4882a593Smuzhiyun return __kasan_krealloc(object, new_size, flags);
270*4882a593Smuzhiyun return (void *)object;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
275*4882a593Smuzhiyun * the hardware tag-based mode that doesn't rely on compiler instrumentation.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun bool __kasan_check_byte(const void *addr, unsigned long ip);
kasan_check_byte(const void * addr)278*4882a593Smuzhiyun static __always_inline bool kasan_check_byte(const void *addr)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun if (kasan_enabled())
281*4882a593Smuzhiyun return __kasan_check_byte(addr, _RET_IP_);
282*4882a593Smuzhiyun return true;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun bool kasan_save_enable_multi_shot(void);
287*4882a593Smuzhiyun void kasan_restore_multi_shot(bool enabled);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun #else /* CONFIG_KASAN */
290*4882a593Smuzhiyun
kasan_never_merge(void)291*4882a593Smuzhiyun static inline slab_flags_t kasan_never_merge(void)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun return 0;
294*4882a593Smuzhiyun }
kasan_unpoison_range(const void * address,size_t size)295*4882a593Smuzhiyun static inline void kasan_unpoison_range(const void *address, size_t size) {}
kasan_poison_pages(struct page * page,unsigned int order,bool init)296*4882a593Smuzhiyun static inline void kasan_poison_pages(struct page *page, unsigned int order,
297*4882a593Smuzhiyun bool init) {}
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)298*4882a593Smuzhiyun static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
299*4882a593Smuzhiyun bool init) {}
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)300*4882a593Smuzhiyun static inline void kasan_cache_create(struct kmem_cache *cache,
301*4882a593Smuzhiyun unsigned int *size,
302*4882a593Smuzhiyun slab_flags_t *flags) {}
kasan_cache_create_kmalloc(struct kmem_cache * cache)303*4882a593Smuzhiyun static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
kasan_metadata_size(struct kmem_cache * cache)304*4882a593Smuzhiyun static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
kasan_poison_slab(struct page * page)305*4882a593Smuzhiyun static inline void kasan_poison_slab(struct page *page) {}
kasan_unpoison_object_data(struct kmem_cache * cache,void * object)306*4882a593Smuzhiyun static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
307*4882a593Smuzhiyun void *object) {}
kasan_poison_object_data(struct kmem_cache * cache,void * object)308*4882a593Smuzhiyun static inline void kasan_poison_object_data(struct kmem_cache *cache,
309*4882a593Smuzhiyun void *object) {}
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)310*4882a593Smuzhiyun static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
311*4882a593Smuzhiyun const void *object)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun return (void *)object;
314*4882a593Smuzhiyun }
kasan_slab_free(struct kmem_cache * s,void * object,bool init)315*4882a593Smuzhiyun static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun return false;
318*4882a593Smuzhiyun }
kasan_kfree_large(void * ptr)319*4882a593Smuzhiyun static inline void kasan_kfree_large(void *ptr) {}
kasan_slab_free_mempool(void * ptr)320*4882a593Smuzhiyun static inline void kasan_slab_free_mempool(void *ptr) {}
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)321*4882a593Smuzhiyun static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
322*4882a593Smuzhiyun gfp_t flags, bool init)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun return object;
325*4882a593Smuzhiyun }
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)326*4882a593Smuzhiyun static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
327*4882a593Smuzhiyun size_t size, gfp_t flags)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun return (void *)object;
330*4882a593Smuzhiyun }
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)331*4882a593Smuzhiyun static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun return (void *)ptr;
334*4882a593Smuzhiyun }
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)335*4882a593Smuzhiyun static inline void *kasan_krealloc(const void *object, size_t new_size,
336*4882a593Smuzhiyun gfp_t flags)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun return (void *)object;
339*4882a593Smuzhiyun }
kasan_check_byte(const void * address)340*4882a593Smuzhiyun static inline bool kasan_check_byte(const void *address)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun return true;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun #endif /* CONFIG_KASAN */
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
348*4882a593Smuzhiyun void kasan_unpoison_task_stack(struct task_struct *task);
349*4882a593Smuzhiyun #else
kasan_unpoison_task_stack(struct task_struct * task)350*4882a593Smuzhiyun static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
351*4882a593Smuzhiyun #endif
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun #ifdef CONFIG_KASAN_GENERIC
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun void kasan_cache_shrink(struct kmem_cache *cache);
356*4882a593Smuzhiyun void kasan_cache_shutdown(struct kmem_cache *cache);
357*4882a593Smuzhiyun void kasan_record_aux_stack(void *ptr);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun #else /* CONFIG_KASAN_GENERIC */
360*4882a593Smuzhiyun
kasan_cache_shrink(struct kmem_cache * cache)361*4882a593Smuzhiyun static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
kasan_cache_shutdown(struct kmem_cache * cache)362*4882a593Smuzhiyun static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
kasan_record_aux_stack(void * ptr)363*4882a593Smuzhiyun static inline void kasan_record_aux_stack(void *ptr) {}
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun #endif /* CONFIG_KASAN_GENERIC */
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
368*4882a593Smuzhiyun
kasan_reset_tag(const void * addr)369*4882a593Smuzhiyun static inline void *kasan_reset_tag(const void *addr)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun return (void *)arch_kasan_reset_tag(addr);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun * kasan_report - print a report about a bad memory access detected by KASAN
376*4882a593Smuzhiyun * @addr: address of the bad access
377*4882a593Smuzhiyun * @size: size of the bad access
378*4882a593Smuzhiyun * @is_write: whether the bad access is a write or a read
379*4882a593Smuzhiyun * @ip: instruction pointer for the accessibility check or the bad access itself
380*4882a593Smuzhiyun */
381*4882a593Smuzhiyun bool kasan_report(unsigned long addr, size_t size,
382*4882a593Smuzhiyun bool is_write, unsigned long ip);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
385*4882a593Smuzhiyun
kasan_reset_tag(const void * addr)386*4882a593Smuzhiyun static inline void *kasan_reset_tag(const void *addr)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun return (void *)addr;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun #ifdef CONFIG_KASAN_HW_TAGS
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun void kasan_report_async(void);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun #endif /* CONFIG_KASAN_HW_TAGS */
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun #ifdef CONFIG_KASAN_SW_TAGS
400*4882a593Smuzhiyun void __init kasan_init_sw_tags(void);
401*4882a593Smuzhiyun #else
kasan_init_sw_tags(void)402*4882a593Smuzhiyun static inline void kasan_init_sw_tags(void) { }
403*4882a593Smuzhiyun #endif
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun #ifdef CONFIG_KASAN_HW_TAGS
406*4882a593Smuzhiyun void kasan_init_hw_tags_cpu(void);
407*4882a593Smuzhiyun void __init kasan_init_hw_tags(void);
408*4882a593Smuzhiyun #else
kasan_init_hw_tags_cpu(void)409*4882a593Smuzhiyun static inline void kasan_init_hw_tags_cpu(void) { }
kasan_init_hw_tags(void)410*4882a593Smuzhiyun static inline void kasan_init_hw_tags(void) { }
411*4882a593Smuzhiyun #endif
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun #ifdef CONFIG_KASAN_VMALLOC
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
416*4882a593Smuzhiyun void kasan_poison_vmalloc(const void *start, unsigned long size);
417*4882a593Smuzhiyun void kasan_unpoison_vmalloc(const void *start, unsigned long size);
418*4882a593Smuzhiyun void kasan_release_vmalloc(unsigned long start, unsigned long end,
419*4882a593Smuzhiyun unsigned long free_region_start,
420*4882a593Smuzhiyun unsigned long free_region_end);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun #else /* CONFIG_KASAN_VMALLOC */
423*4882a593Smuzhiyun
kasan_populate_vmalloc(unsigned long start,unsigned long size)424*4882a593Smuzhiyun static inline int kasan_populate_vmalloc(unsigned long start,
425*4882a593Smuzhiyun unsigned long size)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun return 0;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
kasan_poison_vmalloc(const void * start,unsigned long size)430*4882a593Smuzhiyun static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
431*4882a593Smuzhiyun { }
kasan_unpoison_vmalloc(const void * start,unsigned long size)432*4882a593Smuzhiyun static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
433*4882a593Smuzhiyun { }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end)434*4882a593Smuzhiyun static inline void kasan_release_vmalloc(unsigned long start,
435*4882a593Smuzhiyun unsigned long end,
436*4882a593Smuzhiyun unsigned long free_region_start,
437*4882a593Smuzhiyun unsigned long free_region_end) {}
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun #endif /* CONFIG_KASAN_VMALLOC */
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
442*4882a593Smuzhiyun !defined(CONFIG_KASAN_VMALLOC)
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /*
445*4882a593Smuzhiyun * These functions provide a special case to support backing module
446*4882a593Smuzhiyun * allocations with real shadow memory. With KASAN vmalloc, the special
447*4882a593Smuzhiyun * case is unnecessary, as the work is handled in the generic case.
448*4882a593Smuzhiyun */
449*4882a593Smuzhiyun int kasan_module_alloc(void *addr, size_t size);
450*4882a593Smuzhiyun void kasan_free_shadow(const struct vm_struct *vm);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
453*4882a593Smuzhiyun
kasan_module_alloc(void * addr,size_t size)454*4882a593Smuzhiyun static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
kasan_free_shadow(const struct vm_struct * vm)455*4882a593Smuzhiyun static inline void kasan_free_shadow(const struct vm_struct *vm) {}
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun #ifdef CONFIG_KASAN_INLINE
460*4882a593Smuzhiyun void kasan_non_canonical_hook(unsigned long addr);
461*4882a593Smuzhiyun #else /* CONFIG_KASAN_INLINE */
kasan_non_canonical_hook(unsigned long addr)462*4882a593Smuzhiyun static inline void kasan_non_canonical_hook(unsigned long addr) { }
463*4882a593Smuzhiyun #endif /* CONFIG_KASAN_INLINE */
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun #endif /* LINUX_KASAN_H */
466