1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright(c) 2017 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This code is based in part on work published here:
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * https://github.com/IAIK/KAISER
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * The original work was written by and and signed off by for the Linux
10*4882a593Smuzhiyun * kernel by:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
13*4882a593Smuzhiyun * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
14*4882a593Smuzhiyun * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
15*4882a593Smuzhiyun * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
18*4882a593Smuzhiyun * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
19*4882a593Smuzhiyun * Andy Lutomirsky <luto@amacapital.net>
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/errno.h>
23*4882a593Smuzhiyun #include <linux/string.h>
24*4882a593Smuzhiyun #include <linux/types.h>
25*4882a593Smuzhiyun #include <linux/bug.h>
26*4882a593Smuzhiyun #include <linux/init.h>
27*4882a593Smuzhiyun #include <linux/spinlock.h>
28*4882a593Smuzhiyun #include <linux/mm.h>
29*4882a593Smuzhiyun #include <linux/uaccess.h>
30*4882a593Smuzhiyun #include <linux/cpu.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include <asm/cpufeature.h>
33*4882a593Smuzhiyun #include <asm/hypervisor.h>
34*4882a593Smuzhiyun #include <asm/vsyscall.h>
35*4882a593Smuzhiyun #include <asm/cmdline.h>
36*4882a593Smuzhiyun #include <asm/pti.h>
37*4882a593Smuzhiyun #include <asm/tlbflush.h>
38*4882a593Smuzhiyun #include <asm/desc.h>
39*4882a593Smuzhiyun #include <asm/sections.h>
40*4882a593Smuzhiyun #include <asm/set_memory.h>
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #undef pr_fmt
43*4882a593Smuzhiyun #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* Backporting helper */
46*4882a593Smuzhiyun #ifndef __GFP_NOTRACK
47*4882a593Smuzhiyun #define __GFP_NOTRACK 0
48*4882a593Smuzhiyun #endif
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * Define the page-table levels we clone for user-space on 32
52*4882a593Smuzhiyun * and 64 bit.
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun #ifdef CONFIG_X86_64
55*4882a593Smuzhiyun #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
56*4882a593Smuzhiyun #else
57*4882a593Smuzhiyun #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
58*4882a593Smuzhiyun #endif
59*4882a593Smuzhiyun
pti_print_if_insecure(const char * reason)60*4882a593Smuzhiyun static void __init pti_print_if_insecure(const char *reason)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
63*4882a593Smuzhiyun pr_info("%s\n", reason);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
pti_print_if_secure(const char * reason)66*4882a593Smuzhiyun static void __init pti_print_if_secure(const char *reason)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
69*4882a593Smuzhiyun pr_info("%s\n", reason);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun static enum pti_mode {
73*4882a593Smuzhiyun PTI_AUTO = 0,
74*4882a593Smuzhiyun PTI_FORCE_OFF,
75*4882a593Smuzhiyun PTI_FORCE_ON
76*4882a593Smuzhiyun } pti_mode;
77*4882a593Smuzhiyun
pti_check_boottime_disable(void)78*4882a593Smuzhiyun void __init pti_check_boottime_disable(void)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun char arg[5];
81*4882a593Smuzhiyun int ret;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Assume mode is auto unless overridden. */
84*4882a593Smuzhiyun pti_mode = PTI_AUTO;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
87*4882a593Smuzhiyun pti_mode = PTI_FORCE_OFF;
88*4882a593Smuzhiyun pti_print_if_insecure("disabled on XEN PV.");
89*4882a593Smuzhiyun return;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
93*4882a593Smuzhiyun if (ret > 0) {
94*4882a593Smuzhiyun if (ret == 3 && !strncmp(arg, "off", 3)) {
95*4882a593Smuzhiyun pti_mode = PTI_FORCE_OFF;
96*4882a593Smuzhiyun pti_print_if_insecure("disabled on command line.");
97*4882a593Smuzhiyun return;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun if (ret == 2 && !strncmp(arg, "on", 2)) {
100*4882a593Smuzhiyun pti_mode = PTI_FORCE_ON;
101*4882a593Smuzhiyun pti_print_if_secure("force enabled on command line.");
102*4882a593Smuzhiyun goto enable;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun if (ret == 4 && !strncmp(arg, "auto", 4)) {
105*4882a593Smuzhiyun pti_mode = PTI_AUTO;
106*4882a593Smuzhiyun goto autosel;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (cmdline_find_option_bool(boot_command_line, "nopti") ||
111*4882a593Smuzhiyun cpu_mitigations_off()) {
112*4882a593Smuzhiyun pti_mode = PTI_FORCE_OFF;
113*4882a593Smuzhiyun pti_print_if_insecure("disabled on command line.");
114*4882a593Smuzhiyun return;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun autosel:
118*4882a593Smuzhiyun if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
119*4882a593Smuzhiyun return;
120*4882a593Smuzhiyun enable:
121*4882a593Smuzhiyun setup_force_cpu_cap(X86_FEATURE_PTI);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
__pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)124*4882a593Smuzhiyun pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun * Changes to the high (kernel) portion of the kernelmode page
128*4882a593Smuzhiyun * tables are not automatically propagated to the usermode tables.
129*4882a593Smuzhiyun *
130*4882a593Smuzhiyun * Users should keep in mind that, unlike the kernelmode tables,
131*4882a593Smuzhiyun * there is no vmalloc_fault equivalent for the usermode tables.
132*4882a593Smuzhiyun * Top-level entries added to init_mm's usermode pgd after boot
133*4882a593Smuzhiyun * will not be automatically propagated to other mms.
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun if (!pgdp_maps_userspace(pgdp))
136*4882a593Smuzhiyun return pgd;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * The user page tables get the full PGD, accessible from
140*4882a593Smuzhiyun * userspace:
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun * If this is normal user memory, make it NX in the kernel
146*4882a593Smuzhiyun * pagetables so that, if we somehow screw up and return to
147*4882a593Smuzhiyun * usermode with the kernel CR3 loaded, we'll get a page fault
148*4882a593Smuzhiyun * instead of allowing user code to execute with the wrong CR3.
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun * As exceptions, we don't set NX if:
151*4882a593Smuzhiyun * - _PAGE_USER is not set. This could be an executable
152*4882a593Smuzhiyun * EFI runtime mapping or something similar, and the kernel
153*4882a593Smuzhiyun * may execute from it
154*4882a593Smuzhiyun * - we don't have NX support
155*4882a593Smuzhiyun * - we're clearing the PGD (i.e. the new pgd is not present).
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
158*4882a593Smuzhiyun (__supported_pte_mask & _PAGE_NX))
159*4882a593Smuzhiyun pgd.pgd |= _PAGE_NX;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* return the copy of the PGD we want the kernel to use: */
162*4882a593Smuzhiyun return pgd;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun * Walk the user copy of the page tables (optionally) trying to allocate
167*4882a593Smuzhiyun * page table pages on the way down.
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * Returns a pointer to a P4D on success, or NULL on failure.
170*4882a593Smuzhiyun */
pti_user_pagetable_walk_p4d(unsigned long address)171*4882a593Smuzhiyun static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
174*4882a593Smuzhiyun gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (address < PAGE_OFFSET) {
177*4882a593Smuzhiyun WARN_ONCE(1, "attempt to walk user address\n");
178*4882a593Smuzhiyun return NULL;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if (pgd_none(*pgd)) {
182*4882a593Smuzhiyun unsigned long new_p4d_page = __get_free_page(gfp);
183*4882a593Smuzhiyun if (WARN_ON_ONCE(!new_p4d_page))
184*4882a593Smuzhiyun return NULL;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun BUILD_BUG_ON(pgd_large(*pgd) != 0);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun return p4d_offset(pgd, address);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Walk the user copy of the page tables (optionally) trying to allocate
195*4882a593Smuzhiyun * page table pages on the way down.
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun * Returns a pointer to a PMD on success, or NULL on failure.
198*4882a593Smuzhiyun */
pti_user_pagetable_walk_pmd(unsigned long address)199*4882a593Smuzhiyun static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
202*4882a593Smuzhiyun p4d_t *p4d;
203*4882a593Smuzhiyun pud_t *pud;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun p4d = pti_user_pagetable_walk_p4d(address);
206*4882a593Smuzhiyun if (!p4d)
207*4882a593Smuzhiyun return NULL;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun BUILD_BUG_ON(p4d_large(*p4d) != 0);
210*4882a593Smuzhiyun if (p4d_none(*p4d)) {
211*4882a593Smuzhiyun unsigned long new_pud_page = __get_free_page(gfp);
212*4882a593Smuzhiyun if (WARN_ON_ONCE(!new_pud_page))
213*4882a593Smuzhiyun return NULL;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun pud = pud_offset(p4d, address);
219*4882a593Smuzhiyun /* The user page tables do not use large mappings: */
220*4882a593Smuzhiyun if (pud_large(*pud)) {
221*4882a593Smuzhiyun WARN_ON(1);
222*4882a593Smuzhiyun return NULL;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun if (pud_none(*pud)) {
225*4882a593Smuzhiyun unsigned long new_pmd_page = __get_free_page(gfp);
226*4882a593Smuzhiyun if (WARN_ON_ONCE(!new_pmd_page))
227*4882a593Smuzhiyun return NULL;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun return pmd_offset(pud, address);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * Walk the shadow copy of the page tables (optionally) trying to allocate
237*4882a593Smuzhiyun * page table pages on the way down. Does not support large pages.
238*4882a593Smuzhiyun *
239*4882a593Smuzhiyun * Note: this is only used when mapping *new* kernel data into the
240*4882a593Smuzhiyun * user/shadow page tables. It is never used for userspace data.
241*4882a593Smuzhiyun *
242*4882a593Smuzhiyun * Returns a pointer to a PTE on success, or NULL on failure.
243*4882a593Smuzhiyun */
pti_user_pagetable_walk_pte(unsigned long address)244*4882a593Smuzhiyun static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
247*4882a593Smuzhiyun pmd_t *pmd;
248*4882a593Smuzhiyun pte_t *pte;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun pmd = pti_user_pagetable_walk_pmd(address);
251*4882a593Smuzhiyun if (!pmd)
252*4882a593Smuzhiyun return NULL;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* We can't do anything sensible if we hit a large mapping. */
255*4882a593Smuzhiyun if (pmd_large(*pmd)) {
256*4882a593Smuzhiyun WARN_ON(1);
257*4882a593Smuzhiyun return NULL;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (pmd_none(*pmd)) {
261*4882a593Smuzhiyun unsigned long new_pte_page = __get_free_page(gfp);
262*4882a593Smuzhiyun if (!new_pte_page)
263*4882a593Smuzhiyun return NULL;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun pte = pte_offset_kernel(pmd, address);
269*4882a593Smuzhiyun if (pte_flags(*pte) & _PAGE_USER) {
270*4882a593Smuzhiyun WARN_ONCE(1, "attempt to walk to user pte\n");
271*4882a593Smuzhiyun return NULL;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun return pte;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun #ifdef CONFIG_X86_VSYSCALL_EMULATION
pti_setup_vsyscall(void)277*4882a593Smuzhiyun static void __init pti_setup_vsyscall(void)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun pte_t *pte, *target_pte;
280*4882a593Smuzhiyun unsigned int level;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun pte = lookup_address(VSYSCALL_ADDR, &level);
283*4882a593Smuzhiyun if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
284*4882a593Smuzhiyun return;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
287*4882a593Smuzhiyun if (WARN_ON(!target_pte))
288*4882a593Smuzhiyun return;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun *target_pte = *pte;
291*4882a593Smuzhiyun set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun #else
pti_setup_vsyscall(void)294*4882a593Smuzhiyun static void __init pti_setup_vsyscall(void) { }
295*4882a593Smuzhiyun #endif
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun enum pti_clone_level {
298*4882a593Smuzhiyun PTI_CLONE_PMD,
299*4882a593Smuzhiyun PTI_CLONE_PTE,
300*4882a593Smuzhiyun };
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun static void
pti_clone_pgtable(unsigned long start,unsigned long end,enum pti_clone_level level)303*4882a593Smuzhiyun pti_clone_pgtable(unsigned long start, unsigned long end,
304*4882a593Smuzhiyun enum pti_clone_level level)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun unsigned long addr;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun * Clone the populated PMDs which cover start to end. These PMD areas
310*4882a593Smuzhiyun * can have holes.
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun for (addr = start; addr < end;) {
313*4882a593Smuzhiyun pte_t *pte, *target_pte;
314*4882a593Smuzhiyun pmd_t *pmd, *target_pmd;
315*4882a593Smuzhiyun pgd_t *pgd;
316*4882a593Smuzhiyun p4d_t *p4d;
317*4882a593Smuzhiyun pud_t *pud;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* Overflow check */
320*4882a593Smuzhiyun if (addr < start)
321*4882a593Smuzhiyun break;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun pgd = pgd_offset_k(addr);
324*4882a593Smuzhiyun if (WARN_ON(pgd_none(*pgd)))
325*4882a593Smuzhiyun return;
326*4882a593Smuzhiyun p4d = p4d_offset(pgd, addr);
327*4882a593Smuzhiyun if (WARN_ON(p4d_none(*p4d)))
328*4882a593Smuzhiyun return;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun pud = pud_offset(p4d, addr);
331*4882a593Smuzhiyun if (pud_none(*pud)) {
332*4882a593Smuzhiyun WARN_ON_ONCE(addr & ~PUD_MASK);
333*4882a593Smuzhiyun addr = round_up(addr + 1, PUD_SIZE);
334*4882a593Smuzhiyun continue;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun pmd = pmd_offset(pud, addr);
338*4882a593Smuzhiyun if (pmd_none(*pmd)) {
339*4882a593Smuzhiyun WARN_ON_ONCE(addr & ~PMD_MASK);
340*4882a593Smuzhiyun addr = round_up(addr + 1, PMD_SIZE);
341*4882a593Smuzhiyun continue;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
345*4882a593Smuzhiyun target_pmd = pti_user_pagetable_walk_pmd(addr);
346*4882a593Smuzhiyun if (WARN_ON(!target_pmd))
347*4882a593Smuzhiyun return;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /*
350*4882a593Smuzhiyun * Only clone present PMDs. This ensures only setting
351*4882a593Smuzhiyun * _PAGE_GLOBAL on present PMDs. This should only be
352*4882a593Smuzhiyun * called on well-known addresses anyway, so a non-
353*4882a593Smuzhiyun * present PMD would be a surprise.
354*4882a593Smuzhiyun */
355*4882a593Smuzhiyun if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
356*4882a593Smuzhiyun return;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /*
359*4882a593Smuzhiyun * Setting 'target_pmd' below creates a mapping in both
360*4882a593Smuzhiyun * the user and kernel page tables. It is effectively
361*4882a593Smuzhiyun * global, so set it as global in both copies. Note:
362*4882a593Smuzhiyun * the X86_FEATURE_PGE check is not _required_ because
363*4882a593Smuzhiyun * the CPU ignores _PAGE_GLOBAL when PGE is not
364*4882a593Smuzhiyun * supported. The check keeps consistentency with
365*4882a593Smuzhiyun * code that only set this bit when supported.
366*4882a593Smuzhiyun */
367*4882a593Smuzhiyun if (boot_cpu_has(X86_FEATURE_PGE))
368*4882a593Smuzhiyun *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * Copy the PMD. That is, the kernelmode and usermode
372*4882a593Smuzhiyun * tables will share the last-level page tables of this
373*4882a593Smuzhiyun * address range
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun *target_pmd = *pmd;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun addr += PMD_SIZE;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun } else if (level == PTI_CLONE_PTE) {
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /* Walk the page-table down to the pte level */
382*4882a593Smuzhiyun pte = pte_offset_kernel(pmd, addr);
383*4882a593Smuzhiyun if (pte_none(*pte)) {
384*4882a593Smuzhiyun addr += PAGE_SIZE;
385*4882a593Smuzhiyun continue;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* Only clone present PTEs */
389*4882a593Smuzhiyun if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
390*4882a593Smuzhiyun return;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* Allocate PTE in the user page-table */
393*4882a593Smuzhiyun target_pte = pti_user_pagetable_walk_pte(addr);
394*4882a593Smuzhiyun if (WARN_ON(!target_pte))
395*4882a593Smuzhiyun return;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* Set GLOBAL bit in both PTEs */
398*4882a593Smuzhiyun if (boot_cpu_has(X86_FEATURE_PGE))
399*4882a593Smuzhiyun *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /* Clone the PTE */
402*4882a593Smuzhiyun *target_pte = *pte;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun addr += PAGE_SIZE;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun } else {
407*4882a593Smuzhiyun BUG();
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun #ifdef CONFIG_X86_64
413*4882a593Smuzhiyun /*
414*4882a593Smuzhiyun * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
415*4882a593Smuzhiyun * next-level entry on 5-level systems.
416*4882a593Smuzhiyun */
pti_clone_p4d(unsigned long addr)417*4882a593Smuzhiyun static void __init pti_clone_p4d(unsigned long addr)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun p4d_t *kernel_p4d, *user_p4d;
420*4882a593Smuzhiyun pgd_t *kernel_pgd;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun user_p4d = pti_user_pagetable_walk_p4d(addr);
423*4882a593Smuzhiyun if (!user_p4d)
424*4882a593Smuzhiyun return;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun kernel_pgd = pgd_offset_k(addr);
427*4882a593Smuzhiyun kernel_p4d = p4d_offset(kernel_pgd, addr);
428*4882a593Smuzhiyun *user_p4d = *kernel_p4d;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun * Clone the CPU_ENTRY_AREA and associated data into the user space visible
433*4882a593Smuzhiyun * page table.
434*4882a593Smuzhiyun */
pti_clone_user_shared(void)435*4882a593Smuzhiyun static void __init pti_clone_user_shared(void)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun unsigned int cpu;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun pti_clone_p4d(CPU_ENTRY_AREA_BASE);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
442*4882a593Smuzhiyun /*
443*4882a593Smuzhiyun * The SYSCALL64 entry code needs to be able to find the
444*4882a593Smuzhiyun * thread stack and needs one word of scratch space in which
445*4882a593Smuzhiyun * to spill a register. All of this lives in the TSS, in
446*4882a593Smuzhiyun * the sp1 and sp2 slots.
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * This is done for all possible CPUs during boot to ensure
449*4882a593Smuzhiyun * that it's propagated to all mms.
450*4882a593Smuzhiyun */
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
453*4882a593Smuzhiyun phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
454*4882a593Smuzhiyun pte_t *target_pte;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun target_pte = pti_user_pagetable_walk_pte(va);
457*4882a593Smuzhiyun if (WARN_ON(!target_pte))
458*4882a593Smuzhiyun return;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun #else /* CONFIG_X86_64 */
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun * On 32 bit PAE systems with 1GB of Kernel address space there is only
468*4882a593Smuzhiyun * one pgd/p4d for the whole kernel. Cloning that would map the whole
469*4882a593Smuzhiyun * address space into the user page-tables, making PTI useless. So clone
470*4882a593Smuzhiyun * the page-table on the PMD level to prevent that.
471*4882a593Smuzhiyun */
pti_clone_user_shared(void)472*4882a593Smuzhiyun static void __init pti_clone_user_shared(void)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun unsigned long start, end;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun start = CPU_ENTRY_AREA_BASE;
477*4882a593Smuzhiyun end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun pti_clone_pgtable(start, end, PTI_CLONE_PMD);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun #endif /* CONFIG_X86_64 */
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /*
484*4882a593Smuzhiyun * Clone the ESPFIX P4D into the user space visible page table
485*4882a593Smuzhiyun */
pti_setup_espfix64(void)486*4882a593Smuzhiyun static void __init pti_setup_espfix64(void)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun #ifdef CONFIG_X86_ESPFIX64
489*4882a593Smuzhiyun pti_clone_p4d(ESPFIX_BASE_ADDR);
490*4882a593Smuzhiyun #endif
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun * Clone the populated PMDs of the entry text and force it RO.
495*4882a593Smuzhiyun */
pti_clone_entry_text(void)496*4882a593Smuzhiyun static void pti_clone_entry_text(void)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun pti_clone_pgtable((unsigned long) __entry_text_start,
499*4882a593Smuzhiyun (unsigned long) __entry_text_end,
500*4882a593Smuzhiyun PTI_CLONE_PMD);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /*
503*4882a593Smuzhiyun * If CFI is enabled, also map jump tables, so the entry code can
504*4882a593Smuzhiyun * make indirect calls.
505*4882a593Smuzhiyun */
506*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_CFI_CLANG))
507*4882a593Smuzhiyun pti_clone_pgtable((unsigned long) __cfi_jt_start,
508*4882a593Smuzhiyun (unsigned long) __cfi_jt_end,
509*4882a593Smuzhiyun PTI_CLONE_PMD);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun * Global pages and PCIDs are both ways to make kernel TLB entries
514*4882a593Smuzhiyun * live longer, reduce TLB misses and improve kernel performance.
515*4882a593Smuzhiyun * But, leaving all kernel text Global makes it potentially accessible
516*4882a593Smuzhiyun * to Meltdown-style attacks which make it trivial to find gadgets or
517*4882a593Smuzhiyun * defeat KASLR.
518*4882a593Smuzhiyun *
519*4882a593Smuzhiyun * Only use global pages when it is really worth it.
520*4882a593Smuzhiyun */
pti_kernel_image_global_ok(void)521*4882a593Smuzhiyun static inline bool pti_kernel_image_global_ok(void)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun /*
524*4882a593Smuzhiyun * Systems with PCIDs get litlle benefit from global
525*4882a593Smuzhiyun * kernel text and are not worth the downsides.
526*4882a593Smuzhiyun */
527*4882a593Smuzhiyun if (cpu_feature_enabled(X86_FEATURE_PCID))
528*4882a593Smuzhiyun return false;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /*
531*4882a593Smuzhiyun * Only do global kernel image for pti=auto. Do the most
532*4882a593Smuzhiyun * secure thing (not global) if pti=on specified.
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun if (pti_mode != PTI_AUTO)
535*4882a593Smuzhiyun return false;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /*
538*4882a593Smuzhiyun * K8 may not tolerate the cleared _PAGE_RW on the userspace
539*4882a593Smuzhiyun * global kernel image pages. Do the safe thing (disable
540*4882a593Smuzhiyun * global kernel image). This is unlikely to ever be
541*4882a593Smuzhiyun * noticed because PTI is disabled by default on AMD CPUs.
542*4882a593Smuzhiyun */
543*4882a593Smuzhiyun if (boot_cpu_has(X86_FEATURE_K8))
544*4882a593Smuzhiyun return false;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /*
547*4882a593Smuzhiyun * RANDSTRUCT derives its hardening benefits from the
548*4882a593Smuzhiyun * attacker's lack of knowledge about the layout of kernel
549*4882a593Smuzhiyun * data structures. Keep the kernel image non-global in
550*4882a593Smuzhiyun * cases where RANDSTRUCT is in use to help keep the layout a
551*4882a593Smuzhiyun * secret.
552*4882a593Smuzhiyun */
553*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
554*4882a593Smuzhiyun return false;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun return true;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /*
560*4882a593Smuzhiyun * For some configurations, map all of kernel text into the user page
561*4882a593Smuzhiyun * tables. This reduces TLB misses, especially on non-PCID systems.
562*4882a593Smuzhiyun */
pti_clone_kernel_text(void)563*4882a593Smuzhiyun static void pti_clone_kernel_text(void)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun /*
566*4882a593Smuzhiyun * rodata is part of the kernel image and is normally
567*4882a593Smuzhiyun * readable on the filesystem or on the web. But, do not
568*4882a593Smuzhiyun * clone the areas past rodata, they might contain secrets.
569*4882a593Smuzhiyun */
570*4882a593Smuzhiyun unsigned long start = PFN_ALIGN(_text);
571*4882a593Smuzhiyun unsigned long end_clone = (unsigned long)__end_rodata_aligned;
572*4882a593Smuzhiyun unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (!pti_kernel_image_global_ok())
575*4882a593Smuzhiyun return;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun pr_debug("mapping partial kernel image into user address space\n");
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /*
580*4882a593Smuzhiyun * Note that this will undo _some_ of the work that
581*4882a593Smuzhiyun * pti_set_kernel_image_nonglobal() did to clear the
582*4882a593Smuzhiyun * global bit.
583*4882a593Smuzhiyun */
584*4882a593Smuzhiyun pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /*
587*4882a593Smuzhiyun * pti_clone_pgtable() will set the global bit in any PMDs
588*4882a593Smuzhiyun * that it clones, but we also need to get any PTEs in
589*4882a593Smuzhiyun * the last level for areas that are not huge-page-aligned.
590*4882a593Smuzhiyun */
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* Set the global bit for normal non-__init kernel text: */
593*4882a593Smuzhiyun set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
pti_set_kernel_image_nonglobal(void)596*4882a593Smuzhiyun static void pti_set_kernel_image_nonglobal(void)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun /*
599*4882a593Smuzhiyun * The identity map is created with PMDs, regardless of the
600*4882a593Smuzhiyun * actual length of the kernel. We need to clear
601*4882a593Smuzhiyun * _PAGE_GLOBAL up to a PMD boundary, not just to the end
602*4882a593Smuzhiyun * of the image.
603*4882a593Smuzhiyun */
604*4882a593Smuzhiyun unsigned long start = PFN_ALIGN(_text);
605*4882a593Smuzhiyun unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /*
608*4882a593Smuzhiyun * This clears _PAGE_GLOBAL from the entire kernel image.
609*4882a593Smuzhiyun * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
610*4882a593Smuzhiyun * areas that are mapped to userspace.
611*4882a593Smuzhiyun */
612*4882a593Smuzhiyun set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /*
616*4882a593Smuzhiyun * Initialize kernel page table isolation
617*4882a593Smuzhiyun */
pti_init(void)618*4882a593Smuzhiyun void __init pti_init(void)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun if (!boot_cpu_has(X86_FEATURE_PTI))
621*4882a593Smuzhiyun return;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun pr_info("enabled\n");
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun #ifdef CONFIG_X86_32
626*4882a593Smuzhiyun /*
627*4882a593Smuzhiyun * We check for X86_FEATURE_PCID here. But the init-code will
628*4882a593Smuzhiyun * clear the feature flag on 32 bit because the feature is not
629*4882a593Smuzhiyun * supported on 32 bit anyway. To print the warning we need to
630*4882a593Smuzhiyun * check with cpuid directly again.
631*4882a593Smuzhiyun */
632*4882a593Smuzhiyun if (cpuid_ecx(0x1) & BIT(17)) {
633*4882a593Smuzhiyun /* Use printk to work around pr_fmt() */
634*4882a593Smuzhiyun printk(KERN_WARNING "\n");
635*4882a593Smuzhiyun printk(KERN_WARNING "************************************************************\n");
636*4882a593Smuzhiyun printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
637*4882a593Smuzhiyun printk(KERN_WARNING "** **\n");
638*4882a593Smuzhiyun printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
639*4882a593Smuzhiyun printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
640*4882a593Smuzhiyun printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
641*4882a593Smuzhiyun printk(KERN_WARNING "** **\n");
642*4882a593Smuzhiyun printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
643*4882a593Smuzhiyun printk(KERN_WARNING "************************************************************\n");
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun #endif
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun pti_clone_user_shared();
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /* Undo all global bits from the init pagetables in head_64.S: */
650*4882a593Smuzhiyun pti_set_kernel_image_nonglobal();
651*4882a593Smuzhiyun /* Replace some of the global bits just for shared entry text: */
652*4882a593Smuzhiyun pti_clone_entry_text();
653*4882a593Smuzhiyun pti_setup_espfix64();
654*4882a593Smuzhiyun pti_setup_vsyscall();
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /*
658*4882a593Smuzhiyun * Finalize the kernel mappings in the userspace page-table. Some of the
659*4882a593Smuzhiyun * mappings for the kernel image might have changed since pti_init()
660*4882a593Smuzhiyun * cloned them. This is because parts of the kernel image have been
661*4882a593Smuzhiyun * mapped RO and/or NX. These changes need to be cloned again to the
662*4882a593Smuzhiyun * userspace page-table.
663*4882a593Smuzhiyun */
pti_finalize(void)664*4882a593Smuzhiyun void pti_finalize(void)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun if (!boot_cpu_has(X86_FEATURE_PTI))
667*4882a593Smuzhiyun return;
668*4882a593Smuzhiyun /*
669*4882a593Smuzhiyun * We need to clone everything (again) that maps parts of the
670*4882a593Smuzhiyun * kernel image.
671*4882a593Smuzhiyun */
672*4882a593Smuzhiyun pti_clone_entry_text();
673*4882a593Smuzhiyun pti_clone_kernel_text();
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun debug_checkwx_user();
676*4882a593Smuzhiyun }
677