1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* ----------------------------------------------------------------------- *
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2014 Intel Corporation; author: H. Peter Anvin
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * ----------------------------------------------------------------------- */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun * The IRET instruction, when returning to a 16-bit segment, only
10*4882a593Smuzhiyun * restores the bottom 16 bits of the user space stack pointer. This
11*4882a593Smuzhiyun * causes some 16-bit software to break, but it also leaks kernel state
12*4882a593Smuzhiyun * to user space.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * This works around this by creating percpu "ministacks", each of which
15*4882a593Smuzhiyun * is mapped 2^16 times 64K apart. When we detect that the return SS is
16*4882a593Smuzhiyun * on the LDT, we copy the IRET frame to the ministack and use the
17*4882a593Smuzhiyun * relevant alias to return to userspace. The ministacks are mapped
18*4882a593Smuzhiyun * readonly, so if the IRET fault we promote #GP to #DF which is an IST
19*4882a593Smuzhiyun * vector and thus has its own stack; we then do the fixup in the #DF
20*4882a593Smuzhiyun * handler.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * This file sets up the ministacks and the related page tables. The
23*4882a593Smuzhiyun * actual ministack invocation is in entry_64.S.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <linux/init.h>
27*4882a593Smuzhiyun #include <linux/init_task.h>
28*4882a593Smuzhiyun #include <linux/kernel.h>
29*4882a593Smuzhiyun #include <linux/percpu.h>
30*4882a593Smuzhiyun #include <linux/gfp.h>
31*4882a593Smuzhiyun #include <linux/random.h>
32*4882a593Smuzhiyun #include <linux/pgtable.h>
33*4882a593Smuzhiyun #include <asm/pgalloc.h>
34*4882a593Smuzhiyun #include <asm/setup.h>
35*4882a593Smuzhiyun #include <asm/espfix.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
39*4882a593Smuzhiyun * it up to a cache line to avoid unnecessary sharing.
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun #define ESPFIX_STACK_SIZE (8*8UL)
42*4882a593Smuzhiyun #define ESPFIX_STACKS_PER_PAGE (PAGE_SIZE/ESPFIX_STACK_SIZE)
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* There is address space for how many espfix pages? */
45*4882a593Smuzhiyun #define ESPFIX_PAGE_SPACE (1UL << (P4D_SHIFT-PAGE_SHIFT-16))
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define ESPFIX_MAX_CPUS (ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
48*4882a593Smuzhiyun #if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
49*4882a593Smuzhiyun # error "Need more virtual address space for the ESPFIX hack"
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* This contains the *bottom* address of the espfix stack */
55*4882a593Smuzhiyun DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
56*4882a593Smuzhiyun DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* Initialization mutex - should this be a spinlock? */
59*4882a593Smuzhiyun static DEFINE_MUTEX(espfix_init_mutex);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
62*4882a593Smuzhiyun #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
63*4882a593Smuzhiyun static void *espfix_pages[ESPFIX_MAX_PAGES];
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
66*4882a593Smuzhiyun __aligned(PAGE_SIZE);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static unsigned int page_random, slot_random;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * This returns the bottom address of the espfix stack for a specific CPU.
72*4882a593Smuzhiyun * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case
73*4882a593Smuzhiyun * we have to account for some amount of padding at the end of each page.
74*4882a593Smuzhiyun */
espfix_base_addr(unsigned int cpu)75*4882a593Smuzhiyun static inline unsigned long espfix_base_addr(unsigned int cpu)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun unsigned long page, slot;
78*4882a593Smuzhiyun unsigned long addr;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
81*4882a593Smuzhiyun slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
82*4882a593Smuzhiyun addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
83*4882a593Smuzhiyun addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16);
84*4882a593Smuzhiyun addr += ESPFIX_BASE_ADDR;
85*4882a593Smuzhiyun return addr;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #define PTE_STRIDE (65536/PAGE_SIZE)
89*4882a593Smuzhiyun #define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
90*4882a593Smuzhiyun #define ESPFIX_PMD_CLONES PTRS_PER_PMD
91*4882a593Smuzhiyun #define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #define PGTABLE_PROT ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
94*4882a593Smuzhiyun
init_espfix_random(void)95*4882a593Smuzhiyun static void init_espfix_random(void)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun unsigned long rand;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * This is run before the entropy pools are initialized,
101*4882a593Smuzhiyun * but this is hopefully better than nothing.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun if (!arch_get_random_long(&rand)) {
104*4882a593Smuzhiyun /* The constant is an arbitrary large prime */
105*4882a593Smuzhiyun rand = rdtsc();
106*4882a593Smuzhiyun rand *= 0xc345c6b72fd16123UL;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun slot_random = rand % ESPFIX_STACKS_PER_PAGE;
110*4882a593Smuzhiyun page_random = (rand / ESPFIX_STACKS_PER_PAGE)
111*4882a593Smuzhiyun & (ESPFIX_PAGE_SPACE - 1);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
init_espfix_bsp(void)114*4882a593Smuzhiyun void __init init_espfix_bsp(void)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun pgd_t *pgd;
117*4882a593Smuzhiyun p4d_t *p4d;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* Install the espfix pud into the kernel page directory */
120*4882a593Smuzhiyun pgd = &init_top_pgt[pgd_index(ESPFIX_BASE_ADDR)];
121*4882a593Smuzhiyun p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
122*4882a593Smuzhiyun p4d_populate(&init_mm, p4d, espfix_pud_page);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* Randomize the locations */
125*4882a593Smuzhiyun init_espfix_random();
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* The rest is the same as for any other processor */
128*4882a593Smuzhiyun init_espfix_ap(0);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
init_espfix_ap(int cpu)131*4882a593Smuzhiyun void init_espfix_ap(int cpu)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun unsigned int page;
134*4882a593Smuzhiyun unsigned long addr;
135*4882a593Smuzhiyun pud_t pud, *pud_p;
136*4882a593Smuzhiyun pmd_t pmd, *pmd_p;
137*4882a593Smuzhiyun pte_t pte, *pte_p;
138*4882a593Smuzhiyun int n, node;
139*4882a593Smuzhiyun void *stack_page;
140*4882a593Smuzhiyun pteval_t ptemask;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* We only have to do this once... */
143*4882a593Smuzhiyun if (likely(per_cpu(espfix_stack, cpu)))
144*4882a593Smuzhiyun return; /* Already initialized */
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun addr = espfix_base_addr(cpu);
147*4882a593Smuzhiyun page = cpu/ESPFIX_STACKS_PER_PAGE;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* Did another CPU already set this up? */
150*4882a593Smuzhiyun stack_page = READ_ONCE(espfix_pages[page]);
151*4882a593Smuzhiyun if (likely(stack_page))
152*4882a593Smuzhiyun goto done;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun mutex_lock(&espfix_init_mutex);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* Did we race on the lock? */
157*4882a593Smuzhiyun stack_page = READ_ONCE(espfix_pages[page]);
158*4882a593Smuzhiyun if (stack_page)
159*4882a593Smuzhiyun goto unlock_done;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun node = cpu_to_node(cpu);
162*4882a593Smuzhiyun ptemask = __supported_pte_mask;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun pud_p = &espfix_pud_page[pud_index(addr)];
165*4882a593Smuzhiyun pud = *pud_p;
166*4882a593Smuzhiyun if (!pud_present(pud)) {
167*4882a593Smuzhiyun struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun pmd_p = (pmd_t *)page_address(page);
170*4882a593Smuzhiyun pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
171*4882a593Smuzhiyun paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
172*4882a593Smuzhiyun for (n = 0; n < ESPFIX_PUD_CLONES; n++)
173*4882a593Smuzhiyun set_pud(&pud_p[n], pud);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun pmd_p = pmd_offset(&pud, addr);
177*4882a593Smuzhiyun pmd = *pmd_p;
178*4882a593Smuzhiyun if (!pmd_present(pmd)) {
179*4882a593Smuzhiyun struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun pte_p = (pte_t *)page_address(page);
182*4882a593Smuzhiyun pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
183*4882a593Smuzhiyun paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
184*4882a593Smuzhiyun for (n = 0; n < ESPFIX_PMD_CLONES; n++)
185*4882a593Smuzhiyun set_pmd(&pmd_p[n], pmd);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun pte_p = pte_offset_kernel(&pmd, addr);
189*4882a593Smuzhiyun stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * __PAGE_KERNEL_* includes _PAGE_GLOBAL, which we want since
192*4882a593Smuzhiyun * this is mapped to userspace.
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun pte = __pte(__pa(stack_page) | ((__PAGE_KERNEL_RO | _PAGE_ENC) & ptemask));
195*4882a593Smuzhiyun for (n = 0; n < ESPFIX_PTE_CLONES; n++)
196*4882a593Smuzhiyun set_pte(&pte_p[n*PTE_STRIDE], pte);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* Job is done for this CPU and any CPU which shares this page */
199*4882a593Smuzhiyun WRITE_ONCE(espfix_pages[page], stack_page);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun unlock_done:
202*4882a593Smuzhiyun mutex_unlock(&espfix_init_mutex);
203*4882a593Smuzhiyun done:
204*4882a593Smuzhiyun per_cpu(espfix_stack, cpu) = addr;
205*4882a593Smuzhiyun per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
206*4882a593Smuzhiyun + (addr & ~PAGE_MASK);
207*4882a593Smuzhiyun }
208