1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __MM_KASAN_KASAN_H
3*4882a593Smuzhiyun #define __MM_KASAN_KASAN_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/kasan.h>
6*4882a593Smuzhiyun #include <linux/kasan-tags.h>
7*4882a593Smuzhiyun #include <linux/kfence.h>
8*4882a593Smuzhiyun #include <linux/stackdepot.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #ifdef CONFIG_KASAN_HW_TAGS
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/static_key.h>
13*4882a593Smuzhiyun #include "../slab.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
16*4882a593Smuzhiyun extern bool kasan_flag_async __ro_after_init;
17*4882a593Smuzhiyun
kasan_stack_collection_enabled(void)18*4882a593Smuzhiyun static inline bool kasan_stack_collection_enabled(void)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun return static_branch_unlikely(&kasan_flag_stacktrace);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun
kasan_async_mode_enabled(void)23*4882a593Smuzhiyun static inline bool kasan_async_mode_enabled(void)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun return kasan_flag_async;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun #else
28*4882a593Smuzhiyun
kasan_stack_collection_enabled(void)29*4882a593Smuzhiyun static inline bool kasan_stack_collection_enabled(void)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun return true;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
kasan_async_mode_enabled(void)34*4882a593Smuzhiyun static inline bool kasan_async_mode_enabled(void)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun return false;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #endif
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun extern bool kasan_flag_panic __ro_after_init;
42*4882a593Smuzhiyun extern bool kasan_flag_async __ro_after_init;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
45*4882a593Smuzhiyun #define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
46*4882a593Smuzhiyun #else
47*4882a593Smuzhiyun #include <asm/mte-kasan.h>
48*4882a593Smuzhiyun #define KASAN_GRANULE_SIZE MTE_GRANULE_SIZE
49*4882a593Smuzhiyun #endif
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define KASAN_GRANULE_MASK (KASAN_GRANULE_SIZE - 1)
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #ifdef CONFIG_KASAN_GENERIC
56*4882a593Smuzhiyun #define KASAN_FREE_PAGE 0xFF /* page was freed */
57*4882a593Smuzhiyun #define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
58*4882a593Smuzhiyun #define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
59*4882a593Smuzhiyun #define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
60*4882a593Smuzhiyun #define KASAN_KMALLOC_FREETRACK 0xFA /* object was freed and has free track set */
61*4882a593Smuzhiyun #else
62*4882a593Smuzhiyun #define KASAN_FREE_PAGE KASAN_TAG_INVALID
63*4882a593Smuzhiyun #define KASAN_PAGE_REDZONE KASAN_TAG_INVALID
64*4882a593Smuzhiyun #define KASAN_KMALLOC_REDZONE KASAN_TAG_INVALID
65*4882a593Smuzhiyun #define KASAN_KMALLOC_FREE KASAN_TAG_INVALID
66*4882a593Smuzhiyun #define KASAN_KMALLOC_FREETRACK KASAN_TAG_INVALID
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
70*4882a593Smuzhiyun #define KASAN_VMALLOC_INVALID 0xF8 /* unallocated space in vmapped page */
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * Stack redzone shadow values
74*4882a593Smuzhiyun * (Those are compiler's ABI, don't change them)
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun #define KASAN_STACK_LEFT 0xF1
77*4882a593Smuzhiyun #define KASAN_STACK_MID 0xF2
78*4882a593Smuzhiyun #define KASAN_STACK_RIGHT 0xF3
79*4882a593Smuzhiyun #define KASAN_STACK_PARTIAL 0xF4
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * alloca redzone shadow values
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun #define KASAN_ALLOCA_LEFT 0xCA
85*4882a593Smuzhiyun #define KASAN_ALLOCA_RIGHT 0xCB
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define KASAN_ALLOCA_REDZONE_SIZE 32
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Stack frame marker (compiler ABI).
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun #define KASAN_CURRENT_STACK_FRAME_MAGIC 0x41B58AB3
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* Don't break randconfig/all*config builds */
95*4882a593Smuzhiyun #ifndef KASAN_ABI_VERSION
96*4882a593Smuzhiyun #define KASAN_ABI_VERSION 1
97*4882a593Smuzhiyun #endif
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* Metadata layout customization. */
100*4882a593Smuzhiyun #define META_BYTES_PER_BLOCK 1
101*4882a593Smuzhiyun #define META_BLOCKS_PER_ROW 16
102*4882a593Smuzhiyun #define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
103*4882a593Smuzhiyun #define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
104*4882a593Smuzhiyun #define META_ROWS_AROUND_ADDR 2
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun struct kasan_access_info {
107*4882a593Smuzhiyun const void *access_addr;
108*4882a593Smuzhiyun const void *first_bad_addr;
109*4882a593Smuzhiyun size_t access_size;
110*4882a593Smuzhiyun bool is_write;
111*4882a593Smuzhiyun unsigned long ip;
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* The layout of struct dictated by compiler */
115*4882a593Smuzhiyun struct kasan_source_location {
116*4882a593Smuzhiyun const char *filename;
117*4882a593Smuzhiyun int line_no;
118*4882a593Smuzhiyun int column_no;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* The layout of struct dictated by compiler */
122*4882a593Smuzhiyun struct kasan_global {
123*4882a593Smuzhiyun const void *beg; /* Address of the beginning of the global variable. */
124*4882a593Smuzhiyun size_t size; /* Size of the global variable. */
125*4882a593Smuzhiyun size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */
126*4882a593Smuzhiyun const void *name;
127*4882a593Smuzhiyun const void *module_name; /* Name of the module where the global variable is declared. */
128*4882a593Smuzhiyun unsigned long has_dynamic_init; /* This needed for C++ */
129*4882a593Smuzhiyun #if KASAN_ABI_VERSION >= 4
130*4882a593Smuzhiyun struct kasan_source_location *location;
131*4882a593Smuzhiyun #endif
132*4882a593Smuzhiyun #if KASAN_ABI_VERSION >= 5
133*4882a593Smuzhiyun char *odr_indicator;
134*4882a593Smuzhiyun #endif
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /**
138*4882a593Smuzhiyun * Structures to keep alloc and free tracks *
139*4882a593Smuzhiyun */
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun #define KASAN_STACK_DEPTH 64
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun struct kasan_track {
144*4882a593Smuzhiyun u32 pid;
145*4882a593Smuzhiyun depot_stack_handle_t stack;
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
149*4882a593Smuzhiyun #define KASAN_NR_FREE_STACKS 5
150*4882a593Smuzhiyun #else
151*4882a593Smuzhiyun #define KASAN_NR_FREE_STACKS 1
152*4882a593Smuzhiyun #endif
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun struct kasan_alloc_meta {
155*4882a593Smuzhiyun struct kasan_track alloc_track;
156*4882a593Smuzhiyun #ifdef CONFIG_KASAN_GENERIC
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * The auxiliary stack is stored into struct kasan_alloc_meta.
159*4882a593Smuzhiyun * The free stack is stored into struct kasan_free_meta.
160*4882a593Smuzhiyun */
161*4882a593Smuzhiyun depot_stack_handle_t aux_stack[2];
162*4882a593Smuzhiyun #else
163*4882a593Smuzhiyun struct kasan_track free_track[KASAN_NR_FREE_STACKS];
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
166*4882a593Smuzhiyun u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
167*4882a593Smuzhiyun u8 free_track_idx;
168*4882a593Smuzhiyun #endif
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun struct qlist_node {
172*4882a593Smuzhiyun struct qlist_node *next;
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * Generic mode either stores free meta in the object itself or in the redzone
177*4882a593Smuzhiyun * after the object. In the former case free meta offset is 0, in the latter
178*4882a593Smuzhiyun * case it has some sane value smaller than INT_MAX. Use INT_MAX as free meta
179*4882a593Smuzhiyun * offset when free meta isn't present.
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun #define KASAN_NO_FREE_META INT_MAX
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun struct kasan_free_meta {
184*4882a593Smuzhiyun #ifdef CONFIG_KASAN_GENERIC
185*4882a593Smuzhiyun /* This field is used while the object is in the quarantine.
186*4882a593Smuzhiyun * Otherwise it might be used for the allocator freelist.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun struct qlist_node quarantine_link;
189*4882a593Smuzhiyun struct kasan_track free_track;
190*4882a593Smuzhiyun #endif
191*4882a593Smuzhiyun };
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
194*4882a593Smuzhiyun const void *object);
195*4882a593Smuzhiyun #ifdef CONFIG_KASAN_GENERIC
196*4882a593Smuzhiyun struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
197*4882a593Smuzhiyun const void *object);
198*4882a593Smuzhiyun #endif
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
201*4882a593Smuzhiyun
kasan_shadow_to_mem(const void * shadow_addr)202*4882a593Smuzhiyun static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
205*4882a593Smuzhiyun << KASAN_SHADOW_SCALE_SHIFT);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
addr_has_metadata(const void * addr)208*4882a593Smuzhiyun static inline bool addr_has_metadata(const void *addr)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /**
214*4882a593Smuzhiyun * kasan_check_range - Check memory region, and report if invalid access.
215*4882a593Smuzhiyun * @addr: the accessed address
216*4882a593Smuzhiyun * @size: the accessed size
217*4882a593Smuzhiyun * @write: true if access is a write access
218*4882a593Smuzhiyun * @ret_ip: return address
219*4882a593Smuzhiyun * @return: true if access was valid, false if invalid
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun bool kasan_check_range(unsigned long addr, size_t size, bool write,
222*4882a593Smuzhiyun unsigned long ret_ip);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
225*4882a593Smuzhiyun
addr_has_metadata(const void * addr)226*4882a593Smuzhiyun static inline bool addr_has_metadata(const void *addr)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun return (is_vmalloc_addr(addr) || virt_addr_valid(addr));
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
234*4882a593Smuzhiyun void kasan_print_tags(u8 addr_tag, const void *addr);
235*4882a593Smuzhiyun #else
kasan_print_tags(u8 addr_tag,const void * addr)236*4882a593Smuzhiyun static inline void kasan_print_tags(u8 addr_tag, const void *addr) { }
237*4882a593Smuzhiyun #endif
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun void *kasan_find_first_bad_addr(void *addr, size_t size);
240*4882a593Smuzhiyun const char *kasan_get_bug_type(struct kasan_access_info *info);
241*4882a593Smuzhiyun void kasan_metadata_fetch_row(char *buffer, void *row);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun #if defined(CONFIG_KASAN_GENERIC) && defined(CONFIG_KASAN_STACK)
244*4882a593Smuzhiyun void kasan_print_address_stack_frame(const void *addr);
245*4882a593Smuzhiyun #else
kasan_print_address_stack_frame(const void * addr)246*4882a593Smuzhiyun static inline void kasan_print_address_stack_frame(const void *addr) { }
247*4882a593Smuzhiyun #endif
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun bool kasan_report(unsigned long addr, size_t size,
250*4882a593Smuzhiyun bool is_write, unsigned long ip);
251*4882a593Smuzhiyun void kasan_report_invalid_free(void *object, unsigned long ip);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun struct page *kasan_addr_to_page(const void *addr);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun depot_stack_handle_t kasan_save_stack(gfp_t flags);
256*4882a593Smuzhiyun void kasan_set_track(struct kasan_track *track, gfp_t flags);
257*4882a593Smuzhiyun void kasan_set_free_info(struct kmem_cache *cache, void *object, u8 tag);
258*4882a593Smuzhiyun struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
259*4882a593Smuzhiyun void *object, u8 tag);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun #if defined(CONFIG_KASAN_GENERIC) && \
262*4882a593Smuzhiyun (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
263*4882a593Smuzhiyun bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
264*4882a593Smuzhiyun void kasan_quarantine_reduce(void);
265*4882a593Smuzhiyun void kasan_quarantine_remove_cache(struct kmem_cache *cache);
266*4882a593Smuzhiyun #else
kasan_quarantine_put(struct kmem_cache * cache,void * object)267*4882a593Smuzhiyun static inline bool kasan_quarantine_put(struct kmem_cache *cache, void *object) { return false; }
kasan_quarantine_reduce(void)268*4882a593Smuzhiyun static inline void kasan_quarantine_reduce(void) { }
kasan_quarantine_remove_cache(struct kmem_cache * cache)269*4882a593Smuzhiyun static inline void kasan_quarantine_remove_cache(struct kmem_cache *cache) { }
270*4882a593Smuzhiyun #endif
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun #ifndef arch_kasan_set_tag
arch_kasan_set_tag(const void * addr,u8 tag)273*4882a593Smuzhiyun static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun return addr;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun #endif
278*4882a593Smuzhiyun #ifndef arch_kasan_get_tag
279*4882a593Smuzhiyun #define arch_kasan_get_tag(addr) 0
280*4882a593Smuzhiyun #endif
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun #define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
283*4882a593Smuzhiyun #define get_tag(addr) arch_kasan_get_tag(addr)
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun #ifdef CONFIG_KASAN_HW_TAGS
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun #ifndef arch_enable_tagging_sync
288*4882a593Smuzhiyun #define arch_enable_tagging_sync()
289*4882a593Smuzhiyun #endif
290*4882a593Smuzhiyun #ifndef arch_enable_tagging_async
291*4882a593Smuzhiyun #define arch_enable_tagging_async()
292*4882a593Smuzhiyun #endif
293*4882a593Smuzhiyun #ifndef arch_set_tagging_report_once
294*4882a593Smuzhiyun #define arch_set_tagging_report_once(state)
295*4882a593Smuzhiyun #endif
296*4882a593Smuzhiyun #ifndef arch_force_async_tag_fault
297*4882a593Smuzhiyun #define arch_force_async_tag_fault()
298*4882a593Smuzhiyun #endif
299*4882a593Smuzhiyun #ifndef arch_get_random_tag
300*4882a593Smuzhiyun #define arch_get_random_tag() (0xFF)
301*4882a593Smuzhiyun #endif
302*4882a593Smuzhiyun #ifndef arch_get_mem_tag
303*4882a593Smuzhiyun #define arch_get_mem_tag(addr) (0xFF)
304*4882a593Smuzhiyun #endif
305*4882a593Smuzhiyun #ifndef arch_set_mem_tag_range
306*4882a593Smuzhiyun #define arch_set_mem_tag_range(addr, size, tag, init) ((void *)(addr))
307*4882a593Smuzhiyun #endif
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun #define hw_enable_tagging_sync() arch_enable_tagging_sync()
310*4882a593Smuzhiyun #define hw_enable_tagging_async() arch_enable_tagging_async()
311*4882a593Smuzhiyun #define hw_set_tagging_report_once(state) arch_set_tagging_report_once(state)
312*4882a593Smuzhiyun #define hw_force_async_tag_fault() arch_force_async_tag_fault()
313*4882a593Smuzhiyun #define hw_get_random_tag() arch_get_random_tag()
314*4882a593Smuzhiyun #define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
315*4882a593Smuzhiyun #define hw_set_mem_tag_range(addr, size, tag, init) \
316*4882a593Smuzhiyun arch_set_mem_tag_range((addr), (size), (tag), (init))
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun #else /* CONFIG_KASAN_HW_TAGS */
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun #define hw_enable_tagging_sync()
321*4882a593Smuzhiyun #define hw_enable_tagging_async()
322*4882a593Smuzhiyun #define hw_set_tagging_report_once(state)
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun #endif /* CONFIG_KASAN_HW_TAGS */
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun void kasan_set_tagging_report_once(bool state);
329*4882a593Smuzhiyun void kasan_enable_tagging_sync(void);
330*4882a593Smuzhiyun void kasan_force_async_fault(void);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun #else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
333*4882a593Smuzhiyun
kasan_set_tagging_report_once(bool state)334*4882a593Smuzhiyun static inline void kasan_set_tagging_report_once(bool state) { }
kasan_enable_tagging_sync(void)335*4882a593Smuzhiyun static inline void kasan_enable_tagging_sync(void) { }
kasan_force_async_fault(void)336*4882a593Smuzhiyun static inline void kasan_force_async_fault(void) { }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun #endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun #ifdef CONFIG_KASAN_SW_TAGS
341*4882a593Smuzhiyun u8 kasan_random_tag(void);
342*4882a593Smuzhiyun #elif defined(CONFIG_KASAN_HW_TAGS)
kasan_random_tag(void)343*4882a593Smuzhiyun static inline u8 kasan_random_tag(void) { return hw_get_random_tag(); }
344*4882a593Smuzhiyun #else
kasan_random_tag(void)345*4882a593Smuzhiyun static inline u8 kasan_random_tag(void) { return 0; }
346*4882a593Smuzhiyun #endif
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun #ifdef CONFIG_KASAN_HW_TAGS
349*4882a593Smuzhiyun
kasan_poison(const void * addr,size_t size,u8 value,bool init)350*4882a593Smuzhiyun static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun addr = kasan_reset_tag(addr);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Skip KFENCE memory if called explicitly outside of sl*b. */
355*4882a593Smuzhiyun if (is_kfence_address(addr))
356*4882a593Smuzhiyun return;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
359*4882a593Smuzhiyun return;
360*4882a593Smuzhiyun if (WARN_ON(size & KASAN_GRANULE_MASK))
361*4882a593Smuzhiyun return;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun hw_set_mem_tag_range((void *)addr, size, value, init);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
kasan_unpoison(const void * addr,size_t size,bool init)366*4882a593Smuzhiyun static inline void kasan_unpoison(const void *addr, size_t size, bool init)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun u8 tag = get_tag(addr);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun addr = kasan_reset_tag(addr);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* Skip KFENCE memory if called explicitly outside of sl*b. */
373*4882a593Smuzhiyun if (is_kfence_address(addr))
374*4882a593Smuzhiyun return;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
377*4882a593Smuzhiyun return;
378*4882a593Smuzhiyun /*
379*4882a593Smuzhiyun * Explicitly initialize the memory with the precise object size to
380*4882a593Smuzhiyun * avoid overwriting the SLAB redzone. This disables initialization in
381*4882a593Smuzhiyun * the arch code and may thus lead to performance penalty. The penalty
382*4882a593Smuzhiyun * is accepted since SLAB redzones aren't enabled in production builds.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun if (__slub_debug_enabled() &&
385*4882a593Smuzhiyun init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
386*4882a593Smuzhiyun init = false;
387*4882a593Smuzhiyun memzero_explicit((void *)addr, size);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun size = round_up(size, KASAN_GRANULE_SIZE);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun hw_set_mem_tag_range((void *)addr, size, tag, init);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
kasan_byte_accessible(const void * addr)394*4882a593Smuzhiyun static inline bool kasan_byte_accessible(const void *addr)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun u8 ptr_tag = get_tag(addr);
397*4882a593Smuzhiyun u8 mem_tag = hw_get_mem_tag((void *)addr);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return ptr_tag == KASAN_TAG_KERNEL || ptr_tag == mem_tag;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun #else /* CONFIG_KASAN_HW_TAGS */
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /**
405*4882a593Smuzhiyun * kasan_poison - mark the memory range as unaccessible
406*4882a593Smuzhiyun * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
407*4882a593Smuzhiyun * @size - range size, must be aligned to KASAN_GRANULE_SIZE
408*4882a593Smuzhiyun * @value - value that's written to metadata for the range
409*4882a593Smuzhiyun * @init - whether to initialize the memory range (only for hardware tag-based)
410*4882a593Smuzhiyun *
411*4882a593Smuzhiyun * The size gets aligned to KASAN_GRANULE_SIZE before marking the range.
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun void kasan_poison(const void *addr, size_t size, u8 value, bool init);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /**
416*4882a593Smuzhiyun * kasan_unpoison - mark the memory range as accessible
417*4882a593Smuzhiyun * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
418*4882a593Smuzhiyun * @size - range size, can be unaligned
419*4882a593Smuzhiyun * @init - whether to initialize the memory range (only for hardware tag-based)
420*4882a593Smuzhiyun *
421*4882a593Smuzhiyun * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before
422*4882a593Smuzhiyun * marking the range.
423*4882a593Smuzhiyun * For the generic mode, the last granule of the memory range gets partially
424*4882a593Smuzhiyun * unpoisoned based on the @size.
425*4882a593Smuzhiyun */
426*4882a593Smuzhiyun void kasan_unpoison(const void *addr, size_t size, bool init);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun bool kasan_byte_accessible(const void *addr);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun #endif /* CONFIG_KASAN_HW_TAGS */
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun #ifdef CONFIG_KASAN_GENERIC
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /**
435*4882a593Smuzhiyun * kasan_poison_last_granule - mark the last granule of the memory range as
436*4882a593Smuzhiyun * unaccessible
437*4882a593Smuzhiyun * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
438*4882a593Smuzhiyun * @size - range size
439*4882a593Smuzhiyun *
440*4882a593Smuzhiyun * This function is only available for the generic mode, as it's the only mode
441*4882a593Smuzhiyun * that has partially poisoned memory granules.
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun void kasan_poison_last_granule(const void *address, size_t size);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun #else /* CONFIG_KASAN_GENERIC */
446*4882a593Smuzhiyun
kasan_poison_last_granule(const void * address,size_t size)447*4882a593Smuzhiyun static inline void kasan_poison_last_granule(const void *address, size_t size) { }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun #endif /* CONFIG_KASAN_GENERIC */
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /*
452*4882a593Smuzhiyun * Exported functions for interfaces called from assembly or from generated
453*4882a593Smuzhiyun * code. Declarations here to avoid warning about missing declarations.
454*4882a593Smuzhiyun */
455*4882a593Smuzhiyun asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
456*4882a593Smuzhiyun void __asan_register_globals(struct kasan_global *globals, size_t size);
457*4882a593Smuzhiyun void __asan_unregister_globals(struct kasan_global *globals, size_t size);
458*4882a593Smuzhiyun void __asan_handle_no_return(void);
459*4882a593Smuzhiyun void __asan_alloca_poison(unsigned long addr, size_t size);
460*4882a593Smuzhiyun void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun void __asan_load1(unsigned long addr);
463*4882a593Smuzhiyun void __asan_store1(unsigned long addr);
464*4882a593Smuzhiyun void __asan_load2(unsigned long addr);
465*4882a593Smuzhiyun void __asan_store2(unsigned long addr);
466*4882a593Smuzhiyun void __asan_load4(unsigned long addr);
467*4882a593Smuzhiyun void __asan_store4(unsigned long addr);
468*4882a593Smuzhiyun void __asan_load8(unsigned long addr);
469*4882a593Smuzhiyun void __asan_store8(unsigned long addr);
470*4882a593Smuzhiyun void __asan_load16(unsigned long addr);
471*4882a593Smuzhiyun void __asan_store16(unsigned long addr);
472*4882a593Smuzhiyun void __asan_loadN(unsigned long addr, size_t size);
473*4882a593Smuzhiyun void __asan_storeN(unsigned long addr, size_t size);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun void __asan_load1_noabort(unsigned long addr);
476*4882a593Smuzhiyun void __asan_store1_noabort(unsigned long addr);
477*4882a593Smuzhiyun void __asan_load2_noabort(unsigned long addr);
478*4882a593Smuzhiyun void __asan_store2_noabort(unsigned long addr);
479*4882a593Smuzhiyun void __asan_load4_noabort(unsigned long addr);
480*4882a593Smuzhiyun void __asan_store4_noabort(unsigned long addr);
481*4882a593Smuzhiyun void __asan_load8_noabort(unsigned long addr);
482*4882a593Smuzhiyun void __asan_store8_noabort(unsigned long addr);
483*4882a593Smuzhiyun void __asan_load16_noabort(unsigned long addr);
484*4882a593Smuzhiyun void __asan_store16_noabort(unsigned long addr);
485*4882a593Smuzhiyun void __asan_loadN_noabort(unsigned long addr, size_t size);
486*4882a593Smuzhiyun void __asan_storeN_noabort(unsigned long addr, size_t size);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun void __asan_report_load1_noabort(unsigned long addr);
489*4882a593Smuzhiyun void __asan_report_store1_noabort(unsigned long addr);
490*4882a593Smuzhiyun void __asan_report_load2_noabort(unsigned long addr);
491*4882a593Smuzhiyun void __asan_report_store2_noabort(unsigned long addr);
492*4882a593Smuzhiyun void __asan_report_load4_noabort(unsigned long addr);
493*4882a593Smuzhiyun void __asan_report_store4_noabort(unsigned long addr);
494*4882a593Smuzhiyun void __asan_report_load8_noabort(unsigned long addr);
495*4882a593Smuzhiyun void __asan_report_store8_noabort(unsigned long addr);
496*4882a593Smuzhiyun void __asan_report_load16_noabort(unsigned long addr);
497*4882a593Smuzhiyun void __asan_report_store16_noabort(unsigned long addr);
498*4882a593Smuzhiyun void __asan_report_load_n_noabort(unsigned long addr, size_t size);
499*4882a593Smuzhiyun void __asan_report_store_n_noabort(unsigned long addr, size_t size);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun void __asan_set_shadow_00(const void *addr, size_t size);
502*4882a593Smuzhiyun void __asan_set_shadow_f1(const void *addr, size_t size);
503*4882a593Smuzhiyun void __asan_set_shadow_f2(const void *addr, size_t size);
504*4882a593Smuzhiyun void __asan_set_shadow_f3(const void *addr, size_t size);
505*4882a593Smuzhiyun void __asan_set_shadow_f5(const void *addr, size_t size);
506*4882a593Smuzhiyun void __asan_set_shadow_f8(const void *addr, size_t size);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun void __hwasan_load1_noabort(unsigned long addr);
509*4882a593Smuzhiyun void __hwasan_store1_noabort(unsigned long addr);
510*4882a593Smuzhiyun void __hwasan_load2_noabort(unsigned long addr);
511*4882a593Smuzhiyun void __hwasan_store2_noabort(unsigned long addr);
512*4882a593Smuzhiyun void __hwasan_load4_noabort(unsigned long addr);
513*4882a593Smuzhiyun void __hwasan_store4_noabort(unsigned long addr);
514*4882a593Smuzhiyun void __hwasan_load8_noabort(unsigned long addr);
515*4882a593Smuzhiyun void __hwasan_store8_noabort(unsigned long addr);
516*4882a593Smuzhiyun void __hwasan_load16_noabort(unsigned long addr);
517*4882a593Smuzhiyun void __hwasan_store16_noabort(unsigned long addr);
518*4882a593Smuzhiyun void __hwasan_loadN_noabort(unsigned long addr, size_t size);
519*4882a593Smuzhiyun void __hwasan_storeN_noabort(unsigned long addr, size_t size);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun #endif
524