xref: /OK3568_Linux_fs/kernel/mm/kfence/core.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * KFENCE guarded object allocator and fault handling.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2020, Google LLC.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define pr_fmt(fmt) "kfence: " fmt
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/atomic.h>
11*4882a593Smuzhiyun #include <linux/bug.h>
12*4882a593Smuzhiyun #include <linux/debugfs.h>
13*4882a593Smuzhiyun #include <linux/irq_work.h>
14*4882a593Smuzhiyun #include <linux/kcsan-checks.h>
15*4882a593Smuzhiyun #include <linux/kfence.h>
16*4882a593Smuzhiyun #include <linux/kmemleak.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/lockdep.h>
19*4882a593Smuzhiyun #include <linux/memblock.h>
20*4882a593Smuzhiyun #include <linux/moduleparam.h>
21*4882a593Smuzhiyun #include <linux/random.h>
22*4882a593Smuzhiyun #include <linux/rcupdate.h>
23*4882a593Smuzhiyun #include <linux/sched/sysctl.h>
24*4882a593Smuzhiyun #include <linux/seq_file.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun #include <linux/spinlock.h>
27*4882a593Smuzhiyun #include <linux/string.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <asm/kfence.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include "kfence.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /* Disables KFENCE on the first warning assuming an irrecoverable error. */
34*4882a593Smuzhiyun #define KFENCE_WARN_ON(cond)                                                   \
35*4882a593Smuzhiyun 	({                                                                     \
36*4882a593Smuzhiyun 		const bool __cond = WARN_ON(cond);                             \
37*4882a593Smuzhiyun 		if (unlikely(__cond))                                          \
38*4882a593Smuzhiyun 			WRITE_ONCE(kfence_enabled, false);                     \
39*4882a593Smuzhiyun 		__cond;                                                        \
40*4882a593Smuzhiyun 	})
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* === Data ================================================================= */
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static bool kfence_enabled __read_mostly;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #ifdef MODULE_PARAM_PREFIX
49*4882a593Smuzhiyun #undef MODULE_PARAM_PREFIX
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun #define MODULE_PARAM_PREFIX "kfence."
52*4882a593Smuzhiyun 
param_set_sample_interval(const char * val,const struct kernel_param * kp)53*4882a593Smuzhiyun static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	unsigned long num;
56*4882a593Smuzhiyun 	int ret = kstrtoul(val, 0, &num);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	if (ret < 0)
59*4882a593Smuzhiyun 		return ret;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	if (!num) /* Using 0 to indicate KFENCE is disabled. */
62*4882a593Smuzhiyun 		WRITE_ONCE(kfence_enabled, false);
63*4882a593Smuzhiyun 	else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
64*4882a593Smuzhiyun 		return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	*((unsigned long *)kp->arg) = num;
67*4882a593Smuzhiyun 	return 0;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
param_get_sample_interval(char * buffer,const struct kernel_param * kp)70*4882a593Smuzhiyun static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	if (!READ_ONCE(kfence_enabled))
73*4882a593Smuzhiyun 		return sprintf(buffer, "0\n");
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return param_get_ulong(buffer, kp);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun static const struct kernel_param_ops sample_interval_param_ops = {
79*4882a593Smuzhiyun 	.set = param_set_sample_interval,
80*4882a593Smuzhiyun 	.get = param_get_sample_interval,
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* The pool of pages used for guard pages and objects. */
85*4882a593Smuzhiyun char *__kfence_pool __ro_after_init;
86*4882a593Smuzhiyun EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun  * Per-object metadata, with one-to-one mapping of object metadata to
90*4882a593Smuzhiyun  * backing pages (in __kfence_pool).
91*4882a593Smuzhiyun  */
92*4882a593Smuzhiyun static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
93*4882a593Smuzhiyun struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /* Freelist with available objects. */
96*4882a593Smuzhiyun static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
97*4882a593Smuzhiyun static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #ifdef CONFIG_KFENCE_STATIC_KEYS
100*4882a593Smuzhiyun /* The static key to set up a KFENCE allocation. */
101*4882a593Smuzhiyun DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
102*4882a593Smuzhiyun #endif
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /* Gates the allocation, ensuring only one succeeds in a given period. */
105*4882a593Smuzhiyun atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /* Statistics counters for debugfs. */
108*4882a593Smuzhiyun enum kfence_counter_id {
109*4882a593Smuzhiyun 	KFENCE_COUNTER_ALLOCATED,
110*4882a593Smuzhiyun 	KFENCE_COUNTER_ALLOCS,
111*4882a593Smuzhiyun 	KFENCE_COUNTER_FREES,
112*4882a593Smuzhiyun 	KFENCE_COUNTER_ZOMBIES,
113*4882a593Smuzhiyun 	KFENCE_COUNTER_BUGS,
114*4882a593Smuzhiyun 	KFENCE_COUNTER_COUNT,
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun static atomic_long_t counters[KFENCE_COUNTER_COUNT];
117*4882a593Smuzhiyun static const char *const counter_names[] = {
118*4882a593Smuzhiyun 	[KFENCE_COUNTER_ALLOCATED]	= "currently allocated",
119*4882a593Smuzhiyun 	[KFENCE_COUNTER_ALLOCS]		= "total allocations",
120*4882a593Smuzhiyun 	[KFENCE_COUNTER_FREES]		= "total frees",
121*4882a593Smuzhiyun 	[KFENCE_COUNTER_ZOMBIES]	= "zombie allocations",
122*4882a593Smuzhiyun 	[KFENCE_COUNTER_BUGS]		= "total bugs",
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /* === Internals ============================================================ */
127*4882a593Smuzhiyun 
kfence_protect(unsigned long addr)128*4882a593Smuzhiyun static bool kfence_protect(unsigned long addr)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
kfence_unprotect(unsigned long addr)133*4882a593Smuzhiyun static bool kfence_unprotect(unsigned long addr)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
addr_to_metadata(unsigned long addr)138*4882a593Smuzhiyun static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	long index;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/* The checks do not affect performance; only called from slow-paths. */
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (!is_kfence_address((void *)addr))
145*4882a593Smuzhiyun 		return NULL;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/*
148*4882a593Smuzhiyun 	 * May be an invalid index if called with an address at the edge of
149*4882a593Smuzhiyun 	 * __kfence_pool, in which case we would report an "invalid access"
150*4882a593Smuzhiyun 	 * error.
151*4882a593Smuzhiyun 	 */
152*4882a593Smuzhiyun 	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
153*4882a593Smuzhiyun 	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
154*4882a593Smuzhiyun 		return NULL;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return &kfence_metadata[index];
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
metadata_to_pageaddr(const struct kfence_metadata * meta)159*4882a593Smuzhiyun static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
162*4882a593Smuzhiyun 	unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* The checks do not affect performance; only called from slow-paths. */
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* Only call with a pointer into kfence_metadata. */
167*4882a593Smuzhiyun 	if (KFENCE_WARN_ON(meta < kfence_metadata ||
168*4882a593Smuzhiyun 			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
169*4882a593Smuzhiyun 		return 0;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	/*
172*4882a593Smuzhiyun 	 * This metadata object only ever maps to 1 page; verify that the stored
173*4882a593Smuzhiyun 	 * address is in the expected range.
174*4882a593Smuzhiyun 	 */
175*4882a593Smuzhiyun 	if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
176*4882a593Smuzhiyun 		return 0;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return pageaddr;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun  * Update the object's metadata state, including updating the alloc/free stacks
183*4882a593Smuzhiyun  * depending on the state transition.
184*4882a593Smuzhiyun  */
metadata_update_state(struct kfence_metadata * meta,enum kfence_object_state next)185*4882a593Smuzhiyun static noinline void metadata_update_state(struct kfence_metadata *meta,
186*4882a593Smuzhiyun 					   enum kfence_object_state next)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct kfence_track *track =
189*4882a593Smuzhiyun 		next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	lockdep_assert_held(&meta->lock);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	/*
194*4882a593Smuzhiyun 	 * Skip over 1 (this) functions; noinline ensures we do not accidentally
195*4882a593Smuzhiyun 	 * skip over the caller by never inlining.
196*4882a593Smuzhiyun 	 */
197*4882a593Smuzhiyun 	track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
198*4882a593Smuzhiyun 	track->pid = task_pid_nr(current);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/*
201*4882a593Smuzhiyun 	 * Pairs with READ_ONCE() in
202*4882a593Smuzhiyun 	 *	kfence_shutdown_cache(),
203*4882a593Smuzhiyun 	 *	kfence_handle_page_fault().
204*4882a593Smuzhiyun 	 */
205*4882a593Smuzhiyun 	WRITE_ONCE(meta->state, next);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /* Write canary byte to @addr. */
set_canary_byte(u8 * addr)209*4882a593Smuzhiyun static inline bool set_canary_byte(u8 *addr)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	*addr = KFENCE_CANARY_PATTERN(addr);
212*4882a593Smuzhiyun 	return true;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /* Check canary byte at @addr. */
check_canary_byte(u8 * addr)216*4882a593Smuzhiyun static inline bool check_canary_byte(u8 *addr)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
219*4882a593Smuzhiyun 		return true;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
222*4882a593Smuzhiyun 	kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
223*4882a593Smuzhiyun 			    KFENCE_ERROR_CORRUPTION);
224*4882a593Smuzhiyun 	return false;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /* __always_inline this to ensure we won't do an indirect call to fn. */
for_each_canary(const struct kfence_metadata * meta,bool (* fn)(u8 *))228*4882a593Smuzhiyun static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
231*4882a593Smuzhiyun 	unsigned long addr;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	lockdep_assert_held(&meta->lock);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	/*
236*4882a593Smuzhiyun 	 * We'll iterate over each canary byte per-side until fn() returns
237*4882a593Smuzhiyun 	 * false. However, we'll still iterate over the canary bytes to the
238*4882a593Smuzhiyun 	 * right of the object even if there was an error in the canary bytes to
239*4882a593Smuzhiyun 	 * the left of the object. Specifically, if check_canary_byte()
240*4882a593Smuzhiyun 	 * generates an error, showing both sides might give more clues as to
241*4882a593Smuzhiyun 	 * what the error is about when displaying which bytes were corrupted.
242*4882a593Smuzhiyun 	 */
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/* Apply to left of object. */
245*4882a593Smuzhiyun 	for (addr = pageaddr; addr < meta->addr; addr++) {
246*4882a593Smuzhiyun 		if (!fn((u8 *)addr))
247*4882a593Smuzhiyun 			break;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	/* Apply to right of object. */
251*4882a593Smuzhiyun 	for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
252*4882a593Smuzhiyun 		if (!fn((u8 *)addr))
253*4882a593Smuzhiyun 			break;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
kfence_guarded_alloc(struct kmem_cache * cache,size_t size,gfp_t gfp)257*4882a593Smuzhiyun static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct kfence_metadata *meta = NULL;
260*4882a593Smuzhiyun 	unsigned long flags;
261*4882a593Smuzhiyun 	struct page *page;
262*4882a593Smuzhiyun 	void *addr;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	/* Try to obtain a free object. */
265*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
266*4882a593Smuzhiyun 	if (!list_empty(&kfence_freelist)) {
267*4882a593Smuzhiyun 		meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
268*4882a593Smuzhiyun 		list_del_init(&meta->list);
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
271*4882a593Smuzhiyun 	if (!meta)
272*4882a593Smuzhiyun 		return NULL;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
275*4882a593Smuzhiyun 		/*
276*4882a593Smuzhiyun 		 * This is extremely unlikely -- we are reporting on a
277*4882a593Smuzhiyun 		 * use-after-free, which locked meta->lock, and the reporting
278*4882a593Smuzhiyun 		 * code via printk calls kmalloc() which ends up in
279*4882a593Smuzhiyun 		 * kfence_alloc() and tries to grab the same object that we're
280*4882a593Smuzhiyun 		 * reporting on. While it has never been observed, lockdep does
281*4882a593Smuzhiyun 		 * report that there is a possibility of deadlock. Fix it by
282*4882a593Smuzhiyun 		 * using trylock and bailing out gracefully.
283*4882a593Smuzhiyun 		 */
284*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
285*4882a593Smuzhiyun 		/* Put the object back on the freelist. */
286*4882a593Smuzhiyun 		list_add_tail(&meta->list, &kfence_freelist);
287*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 		return NULL;
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	meta->addr = metadata_to_pageaddr(meta);
293*4882a593Smuzhiyun 	/* Unprotect if we're reusing this page. */
294*4882a593Smuzhiyun 	if (meta->state == KFENCE_OBJECT_FREED)
295*4882a593Smuzhiyun 		kfence_unprotect(meta->addr);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/*
298*4882a593Smuzhiyun 	 * Note: for allocations made before RNG initialization, will always
299*4882a593Smuzhiyun 	 * return zero. We still benefit from enabling KFENCE as early as
300*4882a593Smuzhiyun 	 * possible, even when the RNG is not yet available, as this will allow
301*4882a593Smuzhiyun 	 * KFENCE to detect bugs due to earlier allocations. The only downside
302*4882a593Smuzhiyun 	 * is that the out-of-bounds accesses detected are deterministic for
303*4882a593Smuzhiyun 	 * such allocations.
304*4882a593Smuzhiyun 	 */
305*4882a593Smuzhiyun 	if (prandom_u32_max(2)) {
306*4882a593Smuzhiyun 		/* Allocate on the "right" side, re-calculate address. */
307*4882a593Smuzhiyun 		meta->addr += PAGE_SIZE - size;
308*4882a593Smuzhiyun 		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	addr = (void *)meta->addr;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* Update remaining metadata. */
314*4882a593Smuzhiyun 	metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
315*4882a593Smuzhiyun 	/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
316*4882a593Smuzhiyun 	WRITE_ONCE(meta->cache, cache);
317*4882a593Smuzhiyun 	meta->size = size;
318*4882a593Smuzhiyun 	for_each_canary(meta, set_canary_byte);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/* Set required struct page fields. */
321*4882a593Smuzhiyun 	page = virt_to_page(meta->addr);
322*4882a593Smuzhiyun 	page->slab_cache = cache;
323*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_SLUB))
324*4882a593Smuzhiyun 		page->objects = 1;
325*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_SLAB))
326*4882a593Smuzhiyun 		page->s_mem = addr;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&meta->lock, flags);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* Memory initialization. */
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/*
333*4882a593Smuzhiyun 	 * We check slab_want_init_on_alloc() ourselves, rather than letting
334*4882a593Smuzhiyun 	 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
335*4882a593Smuzhiyun 	 * redzone.
336*4882a593Smuzhiyun 	 */
337*4882a593Smuzhiyun 	if (unlikely(slab_want_init_on_alloc(gfp, cache)))
338*4882a593Smuzhiyun 		memzero_explicit(addr, size);
339*4882a593Smuzhiyun 	if (cache->ctor)
340*4882a593Smuzhiyun 		cache->ctor(addr);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
343*4882a593Smuzhiyun 		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
346*4882a593Smuzhiyun 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	return addr;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
kfence_guarded_free(void * addr,struct kfence_metadata * meta,bool zombie)351*4882a593Smuzhiyun static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	struct kcsan_scoped_access assert_page_exclusive;
354*4882a593Smuzhiyun 	unsigned long flags;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&meta->lock, flags);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
359*4882a593Smuzhiyun 		/* Invalid or double-free, bail out. */
360*4882a593Smuzhiyun 		atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
361*4882a593Smuzhiyun 		kfence_report_error((unsigned long)addr, false, NULL, meta,
362*4882a593Smuzhiyun 				    KFENCE_ERROR_INVALID_FREE);
363*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&meta->lock, flags);
364*4882a593Smuzhiyun 		return;
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
368*4882a593Smuzhiyun 	kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
369*4882a593Smuzhiyun 				  KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
370*4882a593Smuzhiyun 				  &assert_page_exclusive);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
373*4882a593Smuzhiyun 		kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/* Restore page protection if there was an OOB access. */
376*4882a593Smuzhiyun 	if (meta->unprotected_page) {
377*4882a593Smuzhiyun 		memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
378*4882a593Smuzhiyun 		kfence_protect(meta->unprotected_page);
379*4882a593Smuzhiyun 		meta->unprotected_page = 0;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/* Check canary bytes for memory corruption. */
383*4882a593Smuzhiyun 	for_each_canary(meta, check_canary_byte);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/*
386*4882a593Smuzhiyun 	 * Clear memory if init-on-free is set. While we protect the page, the
387*4882a593Smuzhiyun 	 * data is still there, and after a use-after-free is detected, we
388*4882a593Smuzhiyun 	 * unprotect the page, so the data is still accessible.
389*4882a593Smuzhiyun 	 */
390*4882a593Smuzhiyun 	if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
391*4882a593Smuzhiyun 		memzero_explicit(addr, meta->size);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	/* Mark the object as freed. */
394*4882a593Smuzhiyun 	metadata_update_state(meta, KFENCE_OBJECT_FREED);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&meta->lock, flags);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	/* Protect to detect use-after-frees. */
399*4882a593Smuzhiyun 	kfence_protect((unsigned long)addr);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	kcsan_end_scoped_access(&assert_page_exclusive);
402*4882a593Smuzhiyun 	if (!zombie) {
403*4882a593Smuzhiyun 		/* Add it to the tail of the freelist for reuse. */
404*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
405*4882a593Smuzhiyun 		KFENCE_WARN_ON(!list_empty(&meta->list));
406*4882a593Smuzhiyun 		list_add_tail(&meta->list, &kfence_freelist);
407*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 		atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
410*4882a593Smuzhiyun 		atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
411*4882a593Smuzhiyun 	} else {
412*4882a593Smuzhiyun 		/* See kfence_shutdown_cache(). */
413*4882a593Smuzhiyun 		atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
rcu_guarded_free(struct rcu_head * h)417*4882a593Smuzhiyun static void rcu_guarded_free(struct rcu_head *h)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	kfence_guarded_free((void *)meta->addr, meta, false);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
kfence_init_pool(void)424*4882a593Smuzhiyun static bool __init kfence_init_pool(void)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	unsigned long addr = (unsigned long)__kfence_pool;
427*4882a593Smuzhiyun 	struct page *pages;
428*4882a593Smuzhiyun 	int i;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	if (!__kfence_pool)
431*4882a593Smuzhiyun 		return false;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (!arch_kfence_init_pool())
434*4882a593Smuzhiyun 		goto err;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	pages = virt_to_page(addr);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	/*
439*4882a593Smuzhiyun 	 * Set up object pages: they must have PG_slab set, to avoid freeing
440*4882a593Smuzhiyun 	 * these as real pages.
441*4882a593Smuzhiyun 	 *
442*4882a593Smuzhiyun 	 * We also want to avoid inserting kfence_free() in the kfree()
443*4882a593Smuzhiyun 	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
444*4882a593Smuzhiyun 	 * enters __slab_free() slow-path.
445*4882a593Smuzhiyun 	 */
446*4882a593Smuzhiyun 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
447*4882a593Smuzhiyun 		if (!i || (i % 2))
448*4882a593Smuzhiyun 			continue;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 		/* Verify we do not have a compound head page. */
451*4882a593Smuzhiyun 		if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
452*4882a593Smuzhiyun 			goto err;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		__SetPageSlab(&pages[i]);
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/*
458*4882a593Smuzhiyun 	 * Protect the first 2 pages. The first page is mostly unnecessary, and
459*4882a593Smuzhiyun 	 * merely serves as an extended guard page. However, adding one
460*4882a593Smuzhiyun 	 * additional page in the beginning gives us an even number of pages,
461*4882a593Smuzhiyun 	 * which simplifies the mapping of address to metadata index.
462*4882a593Smuzhiyun 	 */
463*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
464*4882a593Smuzhiyun 		if (unlikely(!kfence_protect(addr)))
465*4882a593Smuzhiyun 			goto err;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 		addr += PAGE_SIZE;
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
471*4882a593Smuzhiyun 		struct kfence_metadata *meta = &kfence_metadata[i];
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 		/* Initialize metadata. */
474*4882a593Smuzhiyun 		INIT_LIST_HEAD(&meta->list);
475*4882a593Smuzhiyun 		raw_spin_lock_init(&meta->lock);
476*4882a593Smuzhiyun 		meta->state = KFENCE_OBJECT_UNUSED;
477*4882a593Smuzhiyun 		meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
478*4882a593Smuzhiyun 		list_add_tail(&meta->list, &kfence_freelist);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		/* Protect the right redzone. */
481*4882a593Smuzhiyun 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
482*4882a593Smuzhiyun 			goto err;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 		addr += 2 * PAGE_SIZE;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	/*
488*4882a593Smuzhiyun 	 * The pool is live and will never be deallocated from this point on.
489*4882a593Smuzhiyun 	 * Remove the pool object from the kmemleak object tree, as it would
490*4882a593Smuzhiyun 	 * otherwise overlap with allocations returned by kfence_alloc(), which
491*4882a593Smuzhiyun 	 * are registered with kmemleak through the slab post-alloc hook.
492*4882a593Smuzhiyun 	 */
493*4882a593Smuzhiyun 	kmemleak_free(__kfence_pool);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	return true;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun err:
498*4882a593Smuzhiyun 	/*
499*4882a593Smuzhiyun 	 * Only release unprotected pages, and do not try to go back and change
500*4882a593Smuzhiyun 	 * page attributes due to risk of failing to do so as well. If changing
501*4882a593Smuzhiyun 	 * page attributes for some pages fails, it is very likely that it also
502*4882a593Smuzhiyun 	 * fails for the first page, and therefore expect addr==__kfence_pool in
503*4882a593Smuzhiyun 	 * most failure cases.
504*4882a593Smuzhiyun 	 */
505*4882a593Smuzhiyun 	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
506*4882a593Smuzhiyun 	__kfence_pool = NULL;
507*4882a593Smuzhiyun 	return false;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun /* === DebugFS Interface ==================================================== */
511*4882a593Smuzhiyun 
stats_show(struct seq_file * seq,void * v)512*4882a593Smuzhiyun static int stats_show(struct seq_file *seq, void *v)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	int i;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
517*4882a593Smuzhiyun 	for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
518*4882a593Smuzhiyun 		seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	return 0;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(stats);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /*
525*4882a593Smuzhiyun  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
526*4882a593Smuzhiyun  * start_object() and next_object() return the object index + 1, because NULL is used
527*4882a593Smuzhiyun  * to stop iteration.
528*4882a593Smuzhiyun  */
start_object(struct seq_file * seq,loff_t * pos)529*4882a593Smuzhiyun static void *start_object(struct seq_file *seq, loff_t *pos)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
532*4882a593Smuzhiyun 		return (void *)((long)*pos + 1);
533*4882a593Smuzhiyun 	return NULL;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
stop_object(struct seq_file * seq,void * v)536*4882a593Smuzhiyun static void stop_object(struct seq_file *seq, void *v)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
next_object(struct seq_file * seq,void * v,loff_t * pos)540*4882a593Smuzhiyun static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	++*pos;
543*4882a593Smuzhiyun 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
544*4882a593Smuzhiyun 		return (void *)((long)*pos + 1);
545*4882a593Smuzhiyun 	return NULL;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
show_object(struct seq_file * seq,void * v)548*4882a593Smuzhiyun static int show_object(struct seq_file *seq, void *v)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
551*4882a593Smuzhiyun 	unsigned long flags;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&meta->lock, flags);
554*4882a593Smuzhiyun 	kfence_print_object(seq, meta);
555*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&meta->lock, flags);
556*4882a593Smuzhiyun 	seq_puts(seq, "---------------------------------\n");
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	return 0;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun static const struct seq_operations object_seqops = {
562*4882a593Smuzhiyun 	.start = start_object,
563*4882a593Smuzhiyun 	.next = next_object,
564*4882a593Smuzhiyun 	.stop = stop_object,
565*4882a593Smuzhiyun 	.show = show_object,
566*4882a593Smuzhiyun };
567*4882a593Smuzhiyun 
open_objects(struct inode * inode,struct file * file)568*4882a593Smuzhiyun static int open_objects(struct inode *inode, struct file *file)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	return seq_open(file, &object_seqops);
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun static const struct file_operations objects_fops = {
574*4882a593Smuzhiyun 	.open = open_objects,
575*4882a593Smuzhiyun 	.read = seq_read,
576*4882a593Smuzhiyun 	.llseek = seq_lseek,
577*4882a593Smuzhiyun 	.release = seq_release,
578*4882a593Smuzhiyun };
579*4882a593Smuzhiyun 
kfence_debugfs_init(void)580*4882a593Smuzhiyun static int __init kfence_debugfs_init(void)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
585*4882a593Smuzhiyun 	debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
586*4882a593Smuzhiyun 	return 0;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun late_initcall(kfence_debugfs_init);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun /* === Allocation Gate Timer ================================================ */
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun #ifdef CONFIG_KFENCE_STATIC_KEYS
594*4882a593Smuzhiyun /* Wait queue to wake up allocation-gate timer task. */
595*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
596*4882a593Smuzhiyun 
wake_up_kfence_timer(struct irq_work * work)597*4882a593Smuzhiyun static void wake_up_kfence_timer(struct irq_work *work)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	wake_up(&allocation_wait);
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
602*4882a593Smuzhiyun #endif
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun  * Set up delayed work, which will enable and disable the static key. We need to
606*4882a593Smuzhiyun  * use a work queue (rather than a simple timer), since enabling and disabling a
607*4882a593Smuzhiyun  * static key cannot be done from an interrupt.
608*4882a593Smuzhiyun  *
609*4882a593Smuzhiyun  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
610*4882a593Smuzhiyun  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
611*4882a593Smuzhiyun  * more aggressive sampling intervals), we could get away with a variant that
612*4882a593Smuzhiyun  * avoids IPIs, at the cost of not immediately capturing allocations if the
613*4882a593Smuzhiyun  * instructions remain cached.
614*4882a593Smuzhiyun  */
615*4882a593Smuzhiyun static struct delayed_work kfence_timer;
toggle_allocation_gate(struct work_struct * work)616*4882a593Smuzhiyun static void toggle_allocation_gate(struct work_struct *work)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun 	if (!READ_ONCE(kfence_enabled))
619*4882a593Smuzhiyun 		return;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	atomic_set(&kfence_allocation_gate, 0);
622*4882a593Smuzhiyun #ifdef CONFIG_KFENCE_STATIC_KEYS
623*4882a593Smuzhiyun 	/* Enable static key, and await allocation to happen. */
624*4882a593Smuzhiyun 	static_branch_enable(&kfence_allocation_key);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if (sysctl_hung_task_timeout_secs) {
627*4882a593Smuzhiyun 		/*
628*4882a593Smuzhiyun 		 * During low activity with no allocations we might wait a
629*4882a593Smuzhiyun 		 * while; let's avoid the hung task warning.
630*4882a593Smuzhiyun 		 */
631*4882a593Smuzhiyun 		wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
632*4882a593Smuzhiyun 					sysctl_hung_task_timeout_secs * HZ / 2);
633*4882a593Smuzhiyun 	} else {
634*4882a593Smuzhiyun 		wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
635*4882a593Smuzhiyun 	}
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	/* Disable static key and reset timer. */
638*4882a593Smuzhiyun 	static_branch_disable(&kfence_allocation_key);
639*4882a593Smuzhiyun #endif
640*4882a593Smuzhiyun 	queue_delayed_work(system_unbound_wq, &kfence_timer,
641*4882a593Smuzhiyun 			   msecs_to_jiffies(kfence_sample_interval));
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun /* === Public interface ===================================================== */
646*4882a593Smuzhiyun 
kfence_alloc_pool(void)647*4882a593Smuzhiyun void __init kfence_alloc_pool(void)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun 	if (!kfence_sample_interval)
650*4882a593Smuzhiyun 		return;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (!__kfence_pool)
655*4882a593Smuzhiyun 		pr_err("failed to allocate pool\n");
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
kfence_init(void)658*4882a593Smuzhiyun void __init kfence_init(void)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
661*4882a593Smuzhiyun 	if (!kfence_sample_interval)
662*4882a593Smuzhiyun 		return;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	if (!kfence_init_pool()) {
665*4882a593Smuzhiyun 		pr_err("%s failed\n", __func__);
666*4882a593Smuzhiyun 		return;
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	WRITE_ONCE(kfence_enabled, true);
670*4882a593Smuzhiyun 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
671*4882a593Smuzhiyun 	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
672*4882a593Smuzhiyun 		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
673*4882a593Smuzhiyun 		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
kfence_shutdown_cache(struct kmem_cache * s)676*4882a593Smuzhiyun void kfence_shutdown_cache(struct kmem_cache *s)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	unsigned long flags;
679*4882a593Smuzhiyun 	struct kfence_metadata *meta;
680*4882a593Smuzhiyun 	int i;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
683*4882a593Smuzhiyun 		bool in_use;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 		meta = &kfence_metadata[i];
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 		/*
688*4882a593Smuzhiyun 		 * If we observe some inconsistent cache and state pair where we
689*4882a593Smuzhiyun 		 * should have returned false here, cache destruction is racing
690*4882a593Smuzhiyun 		 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
691*4882a593Smuzhiyun 		 * the lock will not help, as different critical section
692*4882a593Smuzhiyun 		 * serialization will have the same outcome.
693*4882a593Smuzhiyun 		 */
694*4882a593Smuzhiyun 		if (READ_ONCE(meta->cache) != s ||
695*4882a593Smuzhiyun 		    READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
696*4882a593Smuzhiyun 			continue;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&meta->lock, flags);
699*4882a593Smuzhiyun 		in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
700*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&meta->lock, flags);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 		if (in_use) {
703*4882a593Smuzhiyun 			/*
704*4882a593Smuzhiyun 			 * This cache still has allocations, and we should not
705*4882a593Smuzhiyun 			 * release them back into the freelist so they can still
706*4882a593Smuzhiyun 			 * safely be used and retain the kernel's default
707*4882a593Smuzhiyun 			 * behaviour of keeping the allocations alive (leak the
708*4882a593Smuzhiyun 			 * cache); however, they effectively become "zombie
709*4882a593Smuzhiyun 			 * allocations" as the KFENCE objects are the only ones
710*4882a593Smuzhiyun 			 * still in use and the owning cache is being destroyed.
711*4882a593Smuzhiyun 			 *
712*4882a593Smuzhiyun 			 * We mark them freed, so that any subsequent use shows
713*4882a593Smuzhiyun 			 * more useful error messages that will include stack
714*4882a593Smuzhiyun 			 * traces of the user of the object, the original
715*4882a593Smuzhiyun 			 * allocation, and caller to shutdown_cache().
716*4882a593Smuzhiyun 			 */
717*4882a593Smuzhiyun 			kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
718*4882a593Smuzhiyun 		}
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
722*4882a593Smuzhiyun 		meta = &kfence_metadata[i];
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 		/* See above. */
725*4882a593Smuzhiyun 		if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
726*4882a593Smuzhiyun 			continue;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&meta->lock, flags);
729*4882a593Smuzhiyun 		if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
730*4882a593Smuzhiyun 			meta->cache = NULL;
731*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&meta->lock, flags);
732*4882a593Smuzhiyun 	}
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun 
__kfence_alloc(struct kmem_cache * s,size_t size,gfp_t flags)735*4882a593Smuzhiyun void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	/*
738*4882a593Smuzhiyun 	 * Perform size check before switching kfence_allocation_gate, so that
739*4882a593Smuzhiyun 	 * we don't disable KFENCE without making an allocation.
740*4882a593Smuzhiyun 	 */
741*4882a593Smuzhiyun 	if (size > PAGE_SIZE)
742*4882a593Smuzhiyun 		return NULL;
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	/*
745*4882a593Smuzhiyun 	 * Skip allocations from non-default zones, including DMA. We cannot
746*4882a593Smuzhiyun 	 * guarantee that pages in the KFENCE pool will have the requested
747*4882a593Smuzhiyun 	 * properties (e.g. reside in DMAable memory).
748*4882a593Smuzhiyun 	 */
749*4882a593Smuzhiyun 	if ((flags & GFP_ZONEMASK) ||
750*4882a593Smuzhiyun 	    (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
751*4882a593Smuzhiyun 		return NULL;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	/*
754*4882a593Smuzhiyun 	 * allocation_gate only needs to become non-zero, so it doesn't make
755*4882a593Smuzhiyun 	 * sense to continue writing to it and pay the associated contention
756*4882a593Smuzhiyun 	 * cost, in case we have a large number of concurrent allocations.
757*4882a593Smuzhiyun 	 */
758*4882a593Smuzhiyun 	if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
759*4882a593Smuzhiyun 		return NULL;
760*4882a593Smuzhiyun #ifdef CONFIG_KFENCE_STATIC_KEYS
761*4882a593Smuzhiyun 	/*
762*4882a593Smuzhiyun 	 * waitqueue_active() is fully ordered after the update of
763*4882a593Smuzhiyun 	 * kfence_allocation_gate per atomic_inc_return().
764*4882a593Smuzhiyun 	 */
765*4882a593Smuzhiyun 	if (waitqueue_active(&allocation_wait)) {
766*4882a593Smuzhiyun 		/*
767*4882a593Smuzhiyun 		 * Calling wake_up() here may deadlock when allocations happen
768*4882a593Smuzhiyun 		 * from within timer code. Use an irq_work to defer it.
769*4882a593Smuzhiyun 		 */
770*4882a593Smuzhiyun 		irq_work_queue(&wake_up_kfence_timer_work);
771*4882a593Smuzhiyun 	}
772*4882a593Smuzhiyun #endif
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	if (!READ_ONCE(kfence_enabled))
775*4882a593Smuzhiyun 		return NULL;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	return kfence_guarded_alloc(s, size, flags);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun 
kfence_ksize(const void * addr)780*4882a593Smuzhiyun size_t kfence_ksize(const void *addr)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	/*
785*4882a593Smuzhiyun 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
786*4882a593Smuzhiyun 	 * either a use-after-free or invalid access.
787*4882a593Smuzhiyun 	 */
788*4882a593Smuzhiyun 	return meta ? meta->size : 0;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
kfence_object_start(const void * addr)791*4882a593Smuzhiyun void *kfence_object_start(const void *addr)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	/*
796*4882a593Smuzhiyun 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
797*4882a593Smuzhiyun 	 * either a use-after-free or invalid access.
798*4882a593Smuzhiyun 	 */
799*4882a593Smuzhiyun 	return meta ? (void *)meta->addr : NULL;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
__kfence_free(void * addr)802*4882a593Smuzhiyun void __kfence_free(void *addr)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun 	struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	/*
807*4882a593Smuzhiyun 	 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
808*4882a593Smuzhiyun 	 * the object, as the object page may be recycled for other-typed
809*4882a593Smuzhiyun 	 * objects once it has been freed. meta->cache may be NULL if the cache
810*4882a593Smuzhiyun 	 * was destroyed.
811*4882a593Smuzhiyun 	 */
812*4882a593Smuzhiyun 	if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
813*4882a593Smuzhiyun 		call_rcu(&meta->rcu_head, rcu_guarded_free);
814*4882a593Smuzhiyun 	else
815*4882a593Smuzhiyun 		kfence_guarded_free(addr, meta, false);
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun 
kfence_handle_page_fault(unsigned long addr,bool is_write,struct pt_regs * regs)818*4882a593Smuzhiyun bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun 	const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
821*4882a593Smuzhiyun 	struct kfence_metadata *to_report = NULL;
822*4882a593Smuzhiyun 	enum kfence_error_type error_type;
823*4882a593Smuzhiyun 	unsigned long flags;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	if (!is_kfence_address((void *)addr))
826*4882a593Smuzhiyun 		return false;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
829*4882a593Smuzhiyun 		return kfence_unprotect(addr); /* ... unprotect and proceed. */
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	if (page_index % 2) {
834*4882a593Smuzhiyun 		/* This is a redzone, report a buffer overflow. */
835*4882a593Smuzhiyun 		struct kfence_metadata *meta;
836*4882a593Smuzhiyun 		int distance = 0;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 		meta = addr_to_metadata(addr - PAGE_SIZE);
839*4882a593Smuzhiyun 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
840*4882a593Smuzhiyun 			to_report = meta;
841*4882a593Smuzhiyun 			/* Data race ok; distance calculation approximate. */
842*4882a593Smuzhiyun 			distance = addr - data_race(meta->addr + meta->size);
843*4882a593Smuzhiyun 		}
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 		meta = addr_to_metadata(addr + PAGE_SIZE);
846*4882a593Smuzhiyun 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
847*4882a593Smuzhiyun 			/* Data race ok; distance calculation approximate. */
848*4882a593Smuzhiyun 			if (!to_report || distance > data_race(meta->addr) - addr)
849*4882a593Smuzhiyun 				to_report = meta;
850*4882a593Smuzhiyun 		}
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 		if (!to_report)
853*4882a593Smuzhiyun 			goto out;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&to_report->lock, flags);
856*4882a593Smuzhiyun 		to_report->unprotected_page = addr;
857*4882a593Smuzhiyun 		error_type = KFENCE_ERROR_OOB;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 		/*
860*4882a593Smuzhiyun 		 * If the object was freed before we took the look we can still
861*4882a593Smuzhiyun 		 * report this as an OOB -- the report will simply show the
862*4882a593Smuzhiyun 		 * stacktrace of the free as well.
863*4882a593Smuzhiyun 		 */
864*4882a593Smuzhiyun 	} else {
865*4882a593Smuzhiyun 		to_report = addr_to_metadata(addr);
866*4882a593Smuzhiyun 		if (!to_report)
867*4882a593Smuzhiyun 			goto out;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&to_report->lock, flags);
870*4882a593Smuzhiyun 		error_type = KFENCE_ERROR_UAF;
871*4882a593Smuzhiyun 		/*
872*4882a593Smuzhiyun 		 * We may race with __kfence_alloc(), and it is possible that a
873*4882a593Smuzhiyun 		 * freed object may be reallocated. We simply report this as a
874*4882a593Smuzhiyun 		 * use-after-free, with the stack trace showing the place where
875*4882a593Smuzhiyun 		 * the object was re-allocated.
876*4882a593Smuzhiyun 		 */
877*4882a593Smuzhiyun 	}
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun out:
880*4882a593Smuzhiyun 	if (to_report) {
881*4882a593Smuzhiyun 		kfence_report_error(addr, is_write, regs, to_report, error_type);
882*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&to_report->lock, flags);
883*4882a593Smuzhiyun 	} else {
884*4882a593Smuzhiyun 		/* This may be a UAF or OOB access, but we can't be sure. */
885*4882a593Smuzhiyun 		kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
886*4882a593Smuzhiyun 	}
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	return kfence_unprotect(addr); /* Unprotect and let access proceed. */
889*4882a593Smuzhiyun }
890