xref: /OK3568_Linux_fs/kernel/mm/kasan/common.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * This file contains common KASAN code.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6*4882a593Smuzhiyun  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9*4882a593Smuzhiyun  *        Andrey Konovalov <andreyknvl@gmail.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/export.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/kasan.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/linkage.h>
17*4882a593Smuzhiyun #include <linux/memblock.h>
18*4882a593Smuzhiyun #include <linux/memory.h>
19*4882a593Smuzhiyun #include <linux/mm.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <linux/printk.h>
22*4882a593Smuzhiyun #include <linux/sched.h>
23*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/stacktrace.h>
26*4882a593Smuzhiyun #include <linux/string.h>
27*4882a593Smuzhiyun #include <linux/types.h>
28*4882a593Smuzhiyun #include <linux/bug.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include "kasan.h"
31*4882a593Smuzhiyun #include "../slab.h"
32*4882a593Smuzhiyun 
kasan_save_stack(gfp_t flags)33*4882a593Smuzhiyun depot_stack_handle_t kasan_save_stack(gfp_t flags)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	unsigned long entries[KASAN_STACK_DEPTH];
36*4882a593Smuzhiyun 	unsigned int nr_entries;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
39*4882a593Smuzhiyun 	nr_entries = filter_irq_stacks(entries, nr_entries);
40*4882a593Smuzhiyun 	return stack_depot_save(entries, nr_entries, flags);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
kasan_set_track(struct kasan_track * track,gfp_t flags)43*4882a593Smuzhiyun void kasan_set_track(struct kasan_track *track, gfp_t flags)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	track->pid = current->pid;
46*4882a593Smuzhiyun 	track->stack = kasan_save_stack(flags);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
kasan_enable_current(void)50*4882a593Smuzhiyun void kasan_enable_current(void)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	current->kasan_depth++;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
kasan_disable_current(void)55*4882a593Smuzhiyun void kasan_disable_current(void)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	current->kasan_depth--;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
60*4882a593Smuzhiyun 
__kasan_unpoison_range(const void * address,size_t size)61*4882a593Smuzhiyun void __kasan_unpoison_range(const void *address, size_t size)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	kasan_unpoison(address, size, false);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #ifdef CONFIG_KASAN_STACK
67*4882a593Smuzhiyun /* Unpoison the entire stack for a task. */
kasan_unpoison_task_stack(struct task_struct * task)68*4882a593Smuzhiyun void kasan_unpoison_task_stack(struct task_struct *task)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	void *base = task_stack_page(task);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	kasan_unpoison(base, THREAD_SIZE, false);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* Unpoison the stack for the current task beyond a watermark sp value. */
kasan_unpoison_task_stack_below(const void * watermark)76*4882a593Smuzhiyun asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	/*
79*4882a593Smuzhiyun 	 * Calculate the task stack base address.  Avoid using 'current'
80*4882a593Smuzhiyun 	 * because this function is called by early resume code which hasn't
81*4882a593Smuzhiyun 	 * yet set up the percpu register (%gs).
82*4882a593Smuzhiyun 	 */
83*4882a593Smuzhiyun 	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	kasan_unpoison(base, watermark - base, false);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun #endif /* CONFIG_KASAN_STACK */
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * Only allow cache merging when stack collection is disabled and no metadata
91*4882a593Smuzhiyun  * is present.
92*4882a593Smuzhiyun  */
__kasan_never_merge(void)93*4882a593Smuzhiyun slab_flags_t __kasan_never_merge(void)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	if (kasan_stack_collection_enabled())
96*4882a593Smuzhiyun 		return SLAB_KASAN;
97*4882a593Smuzhiyun 	return 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
__kasan_unpoison_pages(struct page * page,unsigned int order,bool init)100*4882a593Smuzhiyun void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	u8 tag;
103*4882a593Smuzhiyun 	unsigned long i;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (unlikely(PageHighMem(page)))
106*4882a593Smuzhiyun 		return;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	tag = kasan_random_tag();
109*4882a593Smuzhiyun 	for (i = 0; i < (1 << order); i++)
110*4882a593Smuzhiyun 		page_kasan_tag_set(page + i, tag);
111*4882a593Smuzhiyun 	kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
__kasan_poison_pages(struct page * page,unsigned int order,bool init)114*4882a593Smuzhiyun void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	if (likely(!PageHighMem(page)))
117*4882a593Smuzhiyun 		kasan_poison(page_address(page), PAGE_SIZE << order,
118*4882a593Smuzhiyun 			     KASAN_FREE_PAGE, init);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
123*4882a593Smuzhiyun  * For larger allocations larger redzones are used.
124*4882a593Smuzhiyun  */
optimal_redzone(unsigned int object_size)125*4882a593Smuzhiyun static inline unsigned int optimal_redzone(unsigned int object_size)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	return
128*4882a593Smuzhiyun 		object_size <= 64        - 16   ? 16 :
129*4882a593Smuzhiyun 		object_size <= 128       - 32   ? 32 :
130*4882a593Smuzhiyun 		object_size <= 512       - 64   ? 64 :
131*4882a593Smuzhiyun 		object_size <= 4096      - 128  ? 128 :
132*4882a593Smuzhiyun 		object_size <= (1 << 14) - 256  ? 256 :
133*4882a593Smuzhiyun 		object_size <= (1 << 15) - 512  ? 512 :
134*4882a593Smuzhiyun 		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
__kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)137*4882a593Smuzhiyun void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
138*4882a593Smuzhiyun 			  slab_flags_t *flags)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	unsigned int ok_size;
141*4882a593Smuzhiyun 	unsigned int optimal_size;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/*
144*4882a593Smuzhiyun 	 * SLAB_KASAN is used to mark caches as ones that are sanitized by
145*4882a593Smuzhiyun 	 * KASAN. Currently this flag is used in two places:
146*4882a593Smuzhiyun 	 * 1. In slab_ksize() when calculating the size of the accessible
147*4882a593Smuzhiyun 	 *    memory within the object.
148*4882a593Smuzhiyun 	 * 2. In slab_common.c to prevent merging of sanitized caches.
149*4882a593Smuzhiyun 	 */
150*4882a593Smuzhiyun 	*flags |= SLAB_KASAN;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (!kasan_stack_collection_enabled())
153*4882a593Smuzhiyun 		return;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	ok_size = *size;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	/* Add alloc meta into redzone. */
158*4882a593Smuzhiyun 	cache->kasan_info.alloc_meta_offset = *size;
159*4882a593Smuzhiyun 	*size += sizeof(struct kasan_alloc_meta);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/*
162*4882a593Smuzhiyun 	 * If alloc meta doesn't fit, don't add it.
163*4882a593Smuzhiyun 	 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
164*4882a593Smuzhiyun 	 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
165*4882a593Smuzhiyun 	 * larger sizes.
166*4882a593Smuzhiyun 	 */
167*4882a593Smuzhiyun 	if (*size > KMALLOC_MAX_SIZE) {
168*4882a593Smuzhiyun 		cache->kasan_info.alloc_meta_offset = 0;
169*4882a593Smuzhiyun 		*size = ok_size;
170*4882a593Smuzhiyun 		/* Continue, since free meta might still fit. */
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* Only the generic mode uses free meta or flexible redzones. */
174*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
175*4882a593Smuzhiyun 		cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
176*4882a593Smuzhiyun 		return;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/*
180*4882a593Smuzhiyun 	 * Add free meta into redzone when it's not possible to store
181*4882a593Smuzhiyun 	 * it in the object. This is the case when:
182*4882a593Smuzhiyun 	 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
183*4882a593Smuzhiyun 	 *    be touched after it was freed, or
184*4882a593Smuzhiyun 	 * 2. Object has a constructor, which means it's expected to
185*4882a593Smuzhiyun 	 *    retain its content until the next allocation, or
186*4882a593Smuzhiyun 	 * 3. Object is too small.
187*4882a593Smuzhiyun 	 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
188*4882a593Smuzhiyun 	 */
189*4882a593Smuzhiyun 	if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
190*4882a593Smuzhiyun 	    cache->object_size < sizeof(struct kasan_free_meta)) {
191*4882a593Smuzhiyun 		ok_size = *size;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		cache->kasan_info.free_meta_offset = *size;
194*4882a593Smuzhiyun 		*size += sizeof(struct kasan_free_meta);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 		/* If free meta doesn't fit, don't add it. */
197*4882a593Smuzhiyun 		if (*size > KMALLOC_MAX_SIZE) {
198*4882a593Smuzhiyun 			cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
199*4882a593Smuzhiyun 			*size = ok_size;
200*4882a593Smuzhiyun 		}
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* Calculate size with optimal redzone. */
204*4882a593Smuzhiyun 	optimal_size = cache->object_size + optimal_redzone(cache->object_size);
205*4882a593Smuzhiyun 	/* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
206*4882a593Smuzhiyun 	if (optimal_size > KMALLOC_MAX_SIZE)
207*4882a593Smuzhiyun 		optimal_size = KMALLOC_MAX_SIZE;
208*4882a593Smuzhiyun 	/* Use optimal size if the size with added metas is not large enough. */
209*4882a593Smuzhiyun 	if (*size < optimal_size)
210*4882a593Smuzhiyun 		*size = optimal_size;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
__kasan_cache_create_kmalloc(struct kmem_cache * cache)213*4882a593Smuzhiyun void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	cache->kasan_info.is_kmalloc = true;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
__kasan_metadata_size(struct kmem_cache * cache)218*4882a593Smuzhiyun size_t __kasan_metadata_size(struct kmem_cache *cache)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	if (!kasan_stack_collection_enabled())
221*4882a593Smuzhiyun 		return 0;
222*4882a593Smuzhiyun 	return (cache->kasan_info.alloc_meta_offset ?
223*4882a593Smuzhiyun 		sizeof(struct kasan_alloc_meta) : 0) +
224*4882a593Smuzhiyun 		(cache->kasan_info.free_meta_offset ?
225*4882a593Smuzhiyun 		sizeof(struct kasan_free_meta) : 0);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
kasan_get_alloc_meta(struct kmem_cache * cache,const void * object)228*4882a593Smuzhiyun struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
229*4882a593Smuzhiyun 					      const void *object)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	if (!cache->kasan_info.alloc_meta_offset)
232*4882a593Smuzhiyun 		return NULL;
233*4882a593Smuzhiyun 	return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun #ifdef CONFIG_KASAN_GENERIC
kasan_get_free_meta(struct kmem_cache * cache,const void * object)237*4882a593Smuzhiyun struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
238*4882a593Smuzhiyun 					    const void *object)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
241*4882a593Smuzhiyun 	if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
242*4882a593Smuzhiyun 		return NULL;
243*4882a593Smuzhiyun 	return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun #endif
246*4882a593Smuzhiyun 
__kasan_poison_slab(struct page * page)247*4882a593Smuzhiyun void __kasan_poison_slab(struct page *page)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	unsigned long i;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	for (i = 0; i < compound_nr(page); i++)
252*4882a593Smuzhiyun 		page_kasan_tag_reset(page + i);
253*4882a593Smuzhiyun 	kasan_poison(page_address(page), page_size(page),
254*4882a593Smuzhiyun 		     KASAN_KMALLOC_REDZONE, false);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
__kasan_unpoison_object_data(struct kmem_cache * cache,void * object)257*4882a593Smuzhiyun void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	kasan_unpoison(object, cache->object_size, false);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
__kasan_poison_object_data(struct kmem_cache * cache,void * object)262*4882a593Smuzhiyun void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
265*4882a593Smuzhiyun 			KASAN_KMALLOC_REDZONE, false);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun  * This function assigns a tag to an object considering the following:
270*4882a593Smuzhiyun  * 1. A cache might have a constructor, which might save a pointer to a slab
271*4882a593Smuzhiyun  *    object somewhere (e.g. in the object itself). We preassign a tag for
272*4882a593Smuzhiyun  *    each object in caches with constructors during slab creation and reuse
273*4882a593Smuzhiyun  *    the same tag each time a particular object is allocated.
274*4882a593Smuzhiyun  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
275*4882a593Smuzhiyun  *    accessed after being freed. We preassign tags for objects in these
276*4882a593Smuzhiyun  *    caches as well.
277*4882a593Smuzhiyun  * 3. For SLAB allocator we can't preassign tags randomly since the freelist
278*4882a593Smuzhiyun  *    is stored as an array of indexes instead of a linked list. Assign tags
279*4882a593Smuzhiyun  *    based on objects indexes, so that objects that are next to each other
280*4882a593Smuzhiyun  *    get different tags.
281*4882a593Smuzhiyun  */
assign_tag(struct kmem_cache * cache,const void * object,bool init)282*4882a593Smuzhiyun static inline u8 assign_tag(struct kmem_cache *cache,
283*4882a593Smuzhiyun 					const void *object, bool init)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
286*4882a593Smuzhiyun 		return 0xff;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/*
289*4882a593Smuzhiyun 	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
290*4882a593Smuzhiyun 	 * set, assign a tag when the object is being allocated (init == false).
291*4882a593Smuzhiyun 	 */
292*4882a593Smuzhiyun 	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
293*4882a593Smuzhiyun 		return init ? KASAN_TAG_KERNEL : kasan_random_tag();
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
296*4882a593Smuzhiyun #ifdef CONFIG_SLAB
297*4882a593Smuzhiyun 	/* For SLAB assign tags based on the object index in the freelist. */
298*4882a593Smuzhiyun 	return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
299*4882a593Smuzhiyun #else
300*4882a593Smuzhiyun 	/*
301*4882a593Smuzhiyun 	 * For SLUB assign a random tag during slab creation, otherwise reuse
302*4882a593Smuzhiyun 	 * the already assigned tag.
303*4882a593Smuzhiyun 	 */
304*4882a593Smuzhiyun 	return init ? kasan_random_tag() : get_tag(object);
305*4882a593Smuzhiyun #endif
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
__kasan_init_slab_obj(struct kmem_cache * cache,const void * object)308*4882a593Smuzhiyun void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
309*4882a593Smuzhiyun 						const void *object)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct kasan_alloc_meta *alloc_meta;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	if (kasan_stack_collection_enabled()) {
314*4882a593Smuzhiyun 		alloc_meta = kasan_get_alloc_meta(cache, object);
315*4882a593Smuzhiyun 		if (alloc_meta)
316*4882a593Smuzhiyun 			__memset(alloc_meta, 0, sizeof(*alloc_meta));
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
320*4882a593Smuzhiyun 	object = set_tag(object, assign_tag(cache, object, true));
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	return (void *)object;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
____kasan_slab_free(struct kmem_cache * cache,void * object,unsigned long ip,bool quarantine,bool init)325*4882a593Smuzhiyun static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
326*4882a593Smuzhiyun 				unsigned long ip, bool quarantine, bool init)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	u8 tag;
329*4882a593Smuzhiyun 	void *tagged_object;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	tag = get_tag(object);
332*4882a593Smuzhiyun 	tagged_object = object;
333*4882a593Smuzhiyun 	object = kasan_reset_tag(object);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (is_kfence_address(object))
336*4882a593Smuzhiyun 		return false;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
339*4882a593Smuzhiyun 	    object)) {
340*4882a593Smuzhiyun 		kasan_report_invalid_free(tagged_object, ip);
341*4882a593Smuzhiyun 		return true;
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/* RCU slabs could be legally used after free within the RCU period */
345*4882a593Smuzhiyun 	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
346*4882a593Smuzhiyun 		return false;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (!kasan_byte_accessible(tagged_object)) {
349*4882a593Smuzhiyun 		kasan_report_invalid_free(tagged_object, ip);
350*4882a593Smuzhiyun 		return true;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
354*4882a593Smuzhiyun 			KASAN_KMALLOC_FREE, init);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
357*4882a593Smuzhiyun 		return false;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (kasan_stack_collection_enabled())
360*4882a593Smuzhiyun 		kasan_set_free_info(cache, object, tag);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	return kasan_quarantine_put(cache, object);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
__kasan_slab_free(struct kmem_cache * cache,void * object,unsigned long ip,bool init)365*4882a593Smuzhiyun bool __kasan_slab_free(struct kmem_cache *cache, void *object,
366*4882a593Smuzhiyun 				unsigned long ip, bool init)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	return ____kasan_slab_free(cache, object, ip, true, init);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
____kasan_kfree_large(void * ptr,unsigned long ip)371*4882a593Smuzhiyun static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	if (ptr != page_address(virt_to_head_page(ptr))) {
374*4882a593Smuzhiyun 		kasan_report_invalid_free(ptr, ip);
375*4882a593Smuzhiyun 		return true;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if (!kasan_byte_accessible(ptr)) {
379*4882a593Smuzhiyun 		kasan_report_invalid_free(ptr, ip);
380*4882a593Smuzhiyun 		return true;
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/*
384*4882a593Smuzhiyun 	 * The object will be poisoned by kasan_free_pages() or
385*4882a593Smuzhiyun 	 * kasan_slab_free_mempool().
386*4882a593Smuzhiyun 	 */
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	return false;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
__kasan_kfree_large(void * ptr,unsigned long ip)391*4882a593Smuzhiyun void __kasan_kfree_large(void *ptr, unsigned long ip)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	____kasan_kfree_large(ptr, ip);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
__kasan_slab_free_mempool(void * ptr,unsigned long ip)396*4882a593Smuzhiyun void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct page *page;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	page = virt_to_head_page(ptr);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	/*
403*4882a593Smuzhiyun 	 * Even though this function is only called for kmem_cache_alloc and
404*4882a593Smuzhiyun 	 * kmalloc backed mempool allocations, those allocations can still be
405*4882a593Smuzhiyun 	 * !PageSlab() when the size provided to kmalloc is larger than
406*4882a593Smuzhiyun 	 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
407*4882a593Smuzhiyun 	 */
408*4882a593Smuzhiyun 	if (unlikely(!PageSlab(page))) {
409*4882a593Smuzhiyun 		if (____kasan_kfree_large(ptr, ip))
410*4882a593Smuzhiyun 			return;
411*4882a593Smuzhiyun 		kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
412*4882a593Smuzhiyun 	} else {
413*4882a593Smuzhiyun 		____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
set_alloc_info(struct kmem_cache * cache,void * object,gfp_t flags,bool is_kmalloc)417*4882a593Smuzhiyun static void set_alloc_info(struct kmem_cache *cache, void *object,
418*4882a593Smuzhiyun 				gfp_t flags, bool is_kmalloc)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	struct kasan_alloc_meta *alloc_meta;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	/* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
423*4882a593Smuzhiyun 	if (cache->kasan_info.is_kmalloc && !is_kmalloc)
424*4882a593Smuzhiyun 		return;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	alloc_meta = kasan_get_alloc_meta(cache, object);
427*4882a593Smuzhiyun 	if (alloc_meta)
428*4882a593Smuzhiyun 		kasan_set_track(&alloc_meta->alloc_track, flags);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
__kasan_slab_alloc(struct kmem_cache * cache,void * object,gfp_t flags,bool init)431*4882a593Smuzhiyun void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
432*4882a593Smuzhiyun 					void *object, gfp_t flags, bool init)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	u8 tag;
435*4882a593Smuzhiyun 	void *tagged_object;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	if (gfpflags_allow_blocking(flags))
438*4882a593Smuzhiyun 		kasan_quarantine_reduce();
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	if (unlikely(object == NULL))
441*4882a593Smuzhiyun 		return NULL;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (is_kfence_address(object))
444*4882a593Smuzhiyun 		return (void *)object;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/*
447*4882a593Smuzhiyun 	 * Generate and assign random tag for tag-based modes.
448*4882a593Smuzhiyun 	 * Tag is ignored in set_tag() for the generic mode.
449*4882a593Smuzhiyun 	 */
450*4882a593Smuzhiyun 	tag = assign_tag(cache, object, false);
451*4882a593Smuzhiyun 	tagged_object = set_tag(object, tag);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/*
454*4882a593Smuzhiyun 	 * Unpoison the whole object.
455*4882a593Smuzhiyun 	 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
456*4882a593Smuzhiyun 	 */
457*4882a593Smuzhiyun 	kasan_unpoison(tagged_object, cache->object_size, init);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	/* Save alloc info (if possible) for non-kmalloc() allocations. */
460*4882a593Smuzhiyun 	if (kasan_stack_collection_enabled())
461*4882a593Smuzhiyun 		set_alloc_info(cache, (void *)object, flags, false);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	return tagged_object;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
____kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)466*4882a593Smuzhiyun static inline void *____kasan_kmalloc(struct kmem_cache *cache,
467*4882a593Smuzhiyun 				const void *object, size_t size, gfp_t flags)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	unsigned long redzone_start;
470*4882a593Smuzhiyun 	unsigned long redzone_end;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	if (gfpflags_allow_blocking(flags))
473*4882a593Smuzhiyun 		kasan_quarantine_reduce();
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	if (unlikely(object == NULL))
476*4882a593Smuzhiyun 		return NULL;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	if (is_kfence_address(kasan_reset_tag(object)))
479*4882a593Smuzhiyun 		return (void *)object;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/*
482*4882a593Smuzhiyun 	 * The object has already been unpoisoned by kasan_slab_alloc() for
483*4882a593Smuzhiyun 	 * kmalloc() or by kasan_krealloc() for krealloc().
484*4882a593Smuzhiyun 	 */
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	/*
487*4882a593Smuzhiyun 	 * The redzone has byte-level precision for the generic mode.
488*4882a593Smuzhiyun 	 * Partially poison the last object granule to cover the unaligned
489*4882a593Smuzhiyun 	 * part of the redzone.
490*4882a593Smuzhiyun 	 */
491*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
492*4882a593Smuzhiyun 		kasan_poison_last_granule((void *)object, size);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	/* Poison the aligned part of the redzone. */
495*4882a593Smuzhiyun 	redzone_start = round_up((unsigned long)(object + size),
496*4882a593Smuzhiyun 				KASAN_GRANULE_SIZE);
497*4882a593Smuzhiyun 	redzone_end = round_up((unsigned long)(object + cache->object_size),
498*4882a593Smuzhiyun 				KASAN_GRANULE_SIZE);
499*4882a593Smuzhiyun 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
500*4882a593Smuzhiyun 			   KASAN_KMALLOC_REDZONE, false);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/*
503*4882a593Smuzhiyun 	 * Save alloc info (if possible) for kmalloc() allocations.
504*4882a593Smuzhiyun 	 * This also rewrites the alloc info when called from kasan_krealloc().
505*4882a593Smuzhiyun 	 */
506*4882a593Smuzhiyun 	if (kasan_stack_collection_enabled())
507*4882a593Smuzhiyun 		set_alloc_info(cache, (void *)object, flags, true);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	/* Keep the tag that was set by kasan_slab_alloc(). */
510*4882a593Smuzhiyun 	return (void *)object;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
__kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)513*4882a593Smuzhiyun void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
514*4882a593Smuzhiyun 					size_t size, gfp_t flags)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	return ____kasan_kmalloc(cache, object, size, flags);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun EXPORT_SYMBOL(__kasan_kmalloc);
519*4882a593Smuzhiyun 
__kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)520*4882a593Smuzhiyun void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
521*4882a593Smuzhiyun 						gfp_t flags)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun 	unsigned long redzone_start;
524*4882a593Smuzhiyun 	unsigned long redzone_end;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	if (gfpflags_allow_blocking(flags))
527*4882a593Smuzhiyun 		kasan_quarantine_reduce();
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	if (unlikely(ptr == NULL))
530*4882a593Smuzhiyun 		return NULL;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	/*
533*4882a593Smuzhiyun 	 * The object has already been unpoisoned by kasan_alloc_pages() for
534*4882a593Smuzhiyun 	 * alloc_pages() or by kasan_krealloc() for krealloc().
535*4882a593Smuzhiyun 	 */
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/*
538*4882a593Smuzhiyun 	 * The redzone has byte-level precision for the generic mode.
539*4882a593Smuzhiyun 	 * Partially poison the last object granule to cover the unaligned
540*4882a593Smuzhiyun 	 * part of the redzone.
541*4882a593Smuzhiyun 	 */
542*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
543*4882a593Smuzhiyun 		kasan_poison_last_granule(ptr, size);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	/* Poison the aligned part of the redzone. */
546*4882a593Smuzhiyun 	redzone_start = round_up((unsigned long)(ptr + size),
547*4882a593Smuzhiyun 				KASAN_GRANULE_SIZE);
548*4882a593Smuzhiyun 	redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
549*4882a593Smuzhiyun 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
550*4882a593Smuzhiyun 		     KASAN_PAGE_REDZONE, false);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	return (void *)ptr;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
__kasan_krealloc(const void * object,size_t size,gfp_t flags)555*4882a593Smuzhiyun void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun 	struct page *page;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	if (unlikely(object == ZERO_SIZE_PTR))
560*4882a593Smuzhiyun 		return (void *)object;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/*
563*4882a593Smuzhiyun 	 * Unpoison the object's data.
564*4882a593Smuzhiyun 	 * Part of it might already have been unpoisoned, but it's unknown
565*4882a593Smuzhiyun 	 * how big that part is.
566*4882a593Smuzhiyun 	 */
567*4882a593Smuzhiyun 	kasan_unpoison(object, size, false);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	page = virt_to_head_page(object);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
572*4882a593Smuzhiyun 	if (unlikely(!PageSlab(page)))
573*4882a593Smuzhiyun 		return __kasan_kmalloc_large(object, size, flags);
574*4882a593Smuzhiyun 	else
575*4882a593Smuzhiyun 		return ____kasan_kmalloc(page->slab_cache, object, size, flags);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun 
__kasan_check_byte(const void * address,unsigned long ip)578*4882a593Smuzhiyun bool __kasan_check_byte(const void *address, unsigned long ip)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun 	if (!kasan_byte_accessible(address)) {
581*4882a593Smuzhiyun 		kasan_report((unsigned long)address, 1, false, ip);
582*4882a593Smuzhiyun 		return false;
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 	return true;
585*4882a593Smuzhiyun }
586