1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4*4882a593Smuzhiyun * which are designed to protect kernel memory from needless exposure
5*4882a593Smuzhiyun * and overwrite under many unintended conditions. This code is based
6*4882a593Smuzhiyun * on PAX_USERCOPY, which is:
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
9*4882a593Smuzhiyun * Security Inc.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <linux/highmem.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/sched.h>
17*4882a593Smuzhiyun #include <linux/sched/task.h>
18*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
19*4882a593Smuzhiyun #include <linux/thread_info.h>
20*4882a593Smuzhiyun #include <linux/atomic.h>
21*4882a593Smuzhiyun #include <linux/jump_label.h>
22*4882a593Smuzhiyun #include <asm/sections.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun * Checks if a given pointer and length is contained by the current
26*4882a593Smuzhiyun * stack frame (if possible).
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * Returns:
29*4882a593Smuzhiyun * NOT_STACK: not at all on the stack
30*4882a593Smuzhiyun * GOOD_FRAME: fully within a valid stack frame
31*4882a593Smuzhiyun * GOOD_STACK: fully on the stack (when can't do frame-checking)
32*4882a593Smuzhiyun * BAD_STACK: error condition (invalid stack position or bad stack frame)
33*4882a593Smuzhiyun */
check_stack_object(const void * obj,unsigned long len)34*4882a593Smuzhiyun static noinline int check_stack_object(const void *obj, unsigned long len)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun const void * const stack = task_stack_page(current);
37*4882a593Smuzhiyun const void * const stackend = stack + THREAD_SIZE;
38*4882a593Smuzhiyun int ret;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* Object is not on the stack at all. */
41*4882a593Smuzhiyun if (obj + len <= stack || stackend <= obj)
42*4882a593Smuzhiyun return NOT_STACK;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Reject: object partially overlaps the stack (passing the
46*4882a593Smuzhiyun * check above means at least one end is within the stack,
47*4882a593Smuzhiyun * so if this check fails, the other end is outside the stack).
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun if (obj < stack || stackend < obj + len)
50*4882a593Smuzhiyun return BAD_STACK;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Check if object is safely within a valid frame. */
53*4882a593Smuzhiyun ret = arch_within_stack_frames(stack, stackend, obj, len);
54*4882a593Smuzhiyun if (ret)
55*4882a593Smuzhiyun return ret;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun return GOOD_STACK;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
62*4882a593Smuzhiyun * an unexpected state during a copy_from_user() or copy_to_user() call.
63*4882a593Smuzhiyun * There are several checks being performed on the buffer by the
64*4882a593Smuzhiyun * __check_object_size() function. Normal stack buffer usage should never
65*4882a593Smuzhiyun * trip the checks, and kernel text addressing will always trip the check.
66*4882a593Smuzhiyun * For cache objects, it is checking that only the whitelisted range of
67*4882a593Smuzhiyun * bytes for a given cache is being accessed (via the cache's usersize and
68*4882a593Smuzhiyun * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
69*4882a593Smuzhiyun * kmem_cache_create_usercopy() function to create the cache (and
70*4882a593Smuzhiyun * carefully audit the whitelist range).
71*4882a593Smuzhiyun */
usercopy_warn(const char * name,const char * detail,bool to_user,unsigned long offset,unsigned long len)72*4882a593Smuzhiyun void usercopy_warn(const char *name, const char *detail, bool to_user,
73*4882a593Smuzhiyun unsigned long offset, unsigned long len)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
76*4882a593Smuzhiyun to_user ? "exposure" : "overwrite",
77*4882a593Smuzhiyun to_user ? "from" : "to",
78*4882a593Smuzhiyun name ? : "unknown?!",
79*4882a593Smuzhiyun detail ? " '" : "", detail ? : "", detail ? "'" : "",
80*4882a593Smuzhiyun offset, len);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
usercopy_abort(const char * name,const char * detail,bool to_user,unsigned long offset,unsigned long len)83*4882a593Smuzhiyun void __noreturn usercopy_abort(const char *name, const char *detail,
84*4882a593Smuzhiyun bool to_user, unsigned long offset,
85*4882a593Smuzhiyun unsigned long len)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
88*4882a593Smuzhiyun to_user ? "exposure" : "overwrite",
89*4882a593Smuzhiyun to_user ? "from" : "to",
90*4882a593Smuzhiyun name ? : "unknown?!",
91*4882a593Smuzhiyun detail ? " '" : "", detail ? : "", detail ? "'" : "",
92*4882a593Smuzhiyun offset, len);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * For greater effect, it would be nice to do do_group_exit(),
96*4882a593Smuzhiyun * but BUG() actually hooks all the lock-breaking and per-arch
97*4882a593Smuzhiyun * Oops code, so that is used here instead.
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun BUG();
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
overlaps(const unsigned long ptr,unsigned long n,unsigned long low,unsigned long high)103*4882a593Smuzhiyun static bool overlaps(const unsigned long ptr, unsigned long n,
104*4882a593Smuzhiyun unsigned long low, unsigned long high)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun const unsigned long check_low = ptr;
107*4882a593Smuzhiyun unsigned long check_high = check_low + n;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Does not overlap if entirely above or entirely below. */
110*4882a593Smuzhiyun if (check_low >= high || check_high <= low)
111*4882a593Smuzhiyun return false;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun return true;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Is this address range in the kernel text area? */
check_kernel_text_object(const unsigned long ptr,unsigned long n,bool to_user)117*4882a593Smuzhiyun static inline void check_kernel_text_object(const unsigned long ptr,
118*4882a593Smuzhiyun unsigned long n, bool to_user)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun unsigned long textlow = (unsigned long)_stext;
121*4882a593Smuzhiyun unsigned long texthigh = (unsigned long)_etext;
122*4882a593Smuzhiyun unsigned long textlow_linear, texthigh_linear;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (overlaps(ptr, n, textlow, texthigh))
125*4882a593Smuzhiyun usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * Some architectures have virtual memory mappings with a secondary
129*4882a593Smuzhiyun * mapping of the kernel text, i.e. there is more than one virtual
130*4882a593Smuzhiyun * kernel address that points to the kernel image. It is usually
131*4882a593Smuzhiyun * when there is a separate linear physical memory mapping, in that
132*4882a593Smuzhiyun * __pa() is not just the reverse of __va(). This can be detected
133*4882a593Smuzhiyun * and checked:
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun textlow_linear = (unsigned long)lm_alias(textlow);
136*4882a593Smuzhiyun /* No different mapping: we're done. */
137*4882a593Smuzhiyun if (textlow_linear == textlow)
138*4882a593Smuzhiyun return;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* Check the secondary mapping... */
141*4882a593Smuzhiyun texthigh_linear = (unsigned long)lm_alias(texthigh);
142*4882a593Smuzhiyun if (overlaps(ptr, n, textlow_linear, texthigh_linear))
143*4882a593Smuzhiyun usercopy_abort("linear kernel text", NULL, to_user,
144*4882a593Smuzhiyun ptr - textlow_linear, n);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
check_bogus_address(const unsigned long ptr,unsigned long n,bool to_user)147*4882a593Smuzhiyun static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
148*4882a593Smuzhiyun bool to_user)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun /* Reject if object wraps past end of memory. */
151*4882a593Smuzhiyun if (ptr + (n - 1) < ptr)
152*4882a593Smuzhiyun usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* Reject if NULL or ZERO-allocation. */
155*4882a593Smuzhiyun if (ZERO_OR_NULL_PTR(ptr))
156*4882a593Smuzhiyun usercopy_abort("null address", NULL, to_user, ptr, n);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* Checks for allocs that are marked in some way as spanning multiple pages. */
check_page_span(const void * ptr,unsigned long n,struct page * page,bool to_user)160*4882a593Smuzhiyun static inline void check_page_span(const void *ptr, unsigned long n,
161*4882a593Smuzhiyun struct page *page, bool to_user)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
164*4882a593Smuzhiyun const void *end = ptr + n - 1;
165*4882a593Smuzhiyun struct page *endpage;
166*4882a593Smuzhiyun bool is_reserved, is_cma;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * Sometimes the kernel data regions are not marked Reserved (see
170*4882a593Smuzhiyun * check below). And sometimes [_sdata,_edata) does not cover
171*4882a593Smuzhiyun * rodata and/or bss, so check each range explicitly.
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* Allow reads of kernel rodata region (if not marked as Reserved). */
175*4882a593Smuzhiyun if (ptr >= (const void *)__start_rodata &&
176*4882a593Smuzhiyun end <= (const void *)__end_rodata) {
177*4882a593Smuzhiyun if (!to_user)
178*4882a593Smuzhiyun usercopy_abort("rodata", NULL, to_user, 0, n);
179*4882a593Smuzhiyun return;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /* Allow kernel data region (if not marked as Reserved). */
183*4882a593Smuzhiyun if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
184*4882a593Smuzhiyun return;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* Allow kernel bss region (if not marked as Reserved). */
187*4882a593Smuzhiyun if (ptr >= (const void *)__bss_start &&
188*4882a593Smuzhiyun end <= (const void *)__bss_stop)
189*4882a593Smuzhiyun return;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Is the object wholly within one base page? */
192*4882a593Smuzhiyun if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
193*4882a593Smuzhiyun ((unsigned long)end & (unsigned long)PAGE_MASK)))
194*4882a593Smuzhiyun return;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* Allow if fully inside the same compound (__GFP_COMP) page. */
197*4882a593Smuzhiyun endpage = virt_to_head_page(end);
198*4882a593Smuzhiyun if (likely(endpage == page))
199*4882a593Smuzhiyun return;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * Reject if range is entirely either Reserved (i.e. special or
203*4882a593Smuzhiyun * device memory), or CMA. Otherwise, reject since the object spans
204*4882a593Smuzhiyun * several independently allocated pages.
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun is_reserved = PageReserved(page);
207*4882a593Smuzhiyun is_cma = is_migrate_cma_page(page);
208*4882a593Smuzhiyun if (!is_reserved && !is_cma)
209*4882a593Smuzhiyun usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
212*4882a593Smuzhiyun page = virt_to_head_page(ptr);
213*4882a593Smuzhiyun if (is_reserved && !PageReserved(page))
214*4882a593Smuzhiyun usercopy_abort("spans Reserved and non-Reserved pages",
215*4882a593Smuzhiyun NULL, to_user, 0, n);
216*4882a593Smuzhiyun if (is_cma && !is_migrate_cma_page(page))
217*4882a593Smuzhiyun usercopy_abort("spans CMA and non-CMA pages", NULL,
218*4882a593Smuzhiyun to_user, 0, n);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun #endif
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
check_heap_object(const void * ptr,unsigned long n,bool to_user)223*4882a593Smuzhiyun static inline void check_heap_object(const void *ptr, unsigned long n,
224*4882a593Smuzhiyun bool to_user)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct page *page;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (!virt_addr_valid(ptr))
229*4882a593Smuzhiyun return;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
233*4882a593Smuzhiyun * highmem page or fallback to virt_to_page(). The following
234*4882a593Smuzhiyun * is effectively a highmem-aware virt_to_head_page().
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun page = compound_head(kmap_to_page((void *)ptr));
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (PageSlab(page)) {
239*4882a593Smuzhiyun /* Check slab allocator for flags and size. */
240*4882a593Smuzhiyun __check_heap_object(ptr, n, page, to_user);
241*4882a593Smuzhiyun } else {
242*4882a593Smuzhiyun /* Verify object does not incorrectly span multiple pages. */
243*4882a593Smuzhiyun check_page_span(ptr, n, page, to_user);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * Validates that the given object is:
251*4882a593Smuzhiyun * - not bogus address
252*4882a593Smuzhiyun * - fully contained by stack (or stack frame, when available)
253*4882a593Smuzhiyun * - fully within SLAB object (or object whitelist area, when available)
254*4882a593Smuzhiyun * - not in kernel text
255*4882a593Smuzhiyun */
__check_object_size(const void * ptr,unsigned long n,bool to_user)256*4882a593Smuzhiyun void __check_object_size(const void *ptr, unsigned long n, bool to_user)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun if (static_branch_unlikely(&bypass_usercopy_checks))
259*4882a593Smuzhiyun return;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* Skip all tests if size is zero. */
262*4882a593Smuzhiyun if (!n)
263*4882a593Smuzhiyun return;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* Check for invalid addresses. */
266*4882a593Smuzhiyun check_bogus_address((const unsigned long)ptr, n, to_user);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* Check for bad stack object. */
269*4882a593Smuzhiyun switch (check_stack_object(ptr, n)) {
270*4882a593Smuzhiyun case NOT_STACK:
271*4882a593Smuzhiyun /* Object is not touching the current process stack. */
272*4882a593Smuzhiyun break;
273*4882a593Smuzhiyun case GOOD_FRAME:
274*4882a593Smuzhiyun case GOOD_STACK:
275*4882a593Smuzhiyun /*
276*4882a593Smuzhiyun * Object is either in the correct frame (when it
277*4882a593Smuzhiyun * is possible to check) or just generally on the
278*4882a593Smuzhiyun * process stack (when frame checking not available).
279*4882a593Smuzhiyun */
280*4882a593Smuzhiyun return;
281*4882a593Smuzhiyun default:
282*4882a593Smuzhiyun usercopy_abort("process stack", NULL, to_user, 0, n);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Check for bad heap object. */
286*4882a593Smuzhiyun check_heap_object(ptr, n, to_user);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Check for object in kernel to avoid text exposure. */
289*4882a593Smuzhiyun check_kernel_text_object((const unsigned long)ptr, n, to_user);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun EXPORT_SYMBOL(__check_object_size);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun static bool enable_checks __initdata = true;
294*4882a593Smuzhiyun
parse_hardened_usercopy(char * str)295*4882a593Smuzhiyun static int __init parse_hardened_usercopy(char *str)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun if (strtobool(str, &enable_checks))
298*4882a593Smuzhiyun pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
299*4882a593Smuzhiyun str);
300*4882a593Smuzhiyun return 1;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun __setup("hardened_usercopy=", parse_hardened_usercopy);
304*4882a593Smuzhiyun
set_hardened_usercopy(void)305*4882a593Smuzhiyun static int __init set_hardened_usercopy(void)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun if (enable_checks == false)
308*4882a593Smuzhiyun static_branch_enable(&bypass_usercopy_checks);
309*4882a593Smuzhiyun return 1;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun late_initcall(set_hardened_usercopy);
313