1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * linux/arch/i386/kernel/head32.c -- prepare to run common code 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6*4882a593Smuzhiyun * Copyright (C) 2007 Eric Biederman <ebiederm@xmission.com> 7*4882a593Smuzhiyun */ 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun #include <linux/init.h> 10*4882a593Smuzhiyun #include <linux/start_kernel.h> 11*4882a593Smuzhiyun #include <linux/mm.h> 12*4882a593Smuzhiyun #include <linux/memblock.h> 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun #include <asm/desc.h> 15*4882a593Smuzhiyun #include <asm/setup.h> 16*4882a593Smuzhiyun #include <asm/sections.h> 17*4882a593Smuzhiyun #include <asm/e820/api.h> 18*4882a593Smuzhiyun #include <asm/page.h> 19*4882a593Smuzhiyun #include <asm/apic.h> 20*4882a593Smuzhiyun #include <asm/io_apic.h> 21*4882a593Smuzhiyun #include <asm/bios_ebda.h> 22*4882a593Smuzhiyun #include <asm/tlbflush.h> 23*4882a593Smuzhiyun #include <asm/bootparam_utils.h> 24*4882a593Smuzhiyun i386_default_early_setup(void)25*4882a593Smuzhiyunstatic void __init i386_default_early_setup(void) 26*4882a593Smuzhiyun { 27*4882a593Smuzhiyun /* Initialize 32bit specific setup functions */ 28*4882a593Smuzhiyun x86_init.resources.reserve_resources = i386_reserve_resources; 29*4882a593Smuzhiyun x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; 30*4882a593Smuzhiyun } 31*4882a593Smuzhiyun i386_start_kernel(void)32*4882a593Smuzhiyunasmlinkage __visible void __init i386_start_kernel(void) 33*4882a593Smuzhiyun { 34*4882a593Smuzhiyun /* Make sure IDT is set up before any exception happens */ 35*4882a593Smuzhiyun idt_setup_early_handler(); 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun cr4_init_shadow(); 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun sanitize_boot_params(&boot_params); 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun x86_early_init_platform_quirks(); 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun /* Call the subarch specific early setup function */ 44*4882a593Smuzhiyun switch (boot_params.hdr.hardware_subarch) { 45*4882a593Smuzhiyun case X86_SUBARCH_INTEL_MID: 46*4882a593Smuzhiyun x86_intel_mid_early_setup(); 47*4882a593Smuzhiyun break; 48*4882a593Smuzhiyun case X86_SUBARCH_CE4100: 49*4882a593Smuzhiyun x86_ce4100_early_setup(); 50*4882a593Smuzhiyun break; 51*4882a593Smuzhiyun default: 52*4882a593Smuzhiyun i386_default_early_setup(); 53*4882a593Smuzhiyun break; 54*4882a593Smuzhiyun } 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun start_kernel(); 57*4882a593Smuzhiyun } 58*4882a593Smuzhiyun 59*4882a593Smuzhiyun /* 60*4882a593Smuzhiyun * Initialize page tables. This creates a PDE and a set of page 61*4882a593Smuzhiyun * tables, which are located immediately beyond __brk_base. The variable 62*4882a593Smuzhiyun * _brk_end is set up to point to the first "safe" location. 63*4882a593Smuzhiyun * Mappings are created both at virtual address 0 (identity mapping) 64*4882a593Smuzhiyun * and PAGE_OFFSET for up to _end. 65*4882a593Smuzhiyun * 66*4882a593Smuzhiyun * In PAE mode initial_page_table is statically defined to contain 67*4882a593Smuzhiyun * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 68*4882a593Smuzhiyun * entries). The identity mapping is handled by pointing two PGD entries 69*4882a593Smuzhiyun * to the first kernel PMD. Note the upper half of each PMD or PTE are 70*4882a593Smuzhiyun * always zero at this stage. 71*4882a593Smuzhiyun */ mk_early_pgtbl_32(void)72*4882a593Smuzhiyunvoid __init mk_early_pgtbl_32(void) 73*4882a593Smuzhiyun { 74*4882a593Smuzhiyun #ifdef __pa 75*4882a593Smuzhiyun #undef __pa 76*4882a593Smuzhiyun #endif 77*4882a593Smuzhiyun #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) 78*4882a593Smuzhiyun pte_t pte, *ptep; 79*4882a593Smuzhiyun int i; 80*4882a593Smuzhiyun unsigned long *ptr; 81*4882a593Smuzhiyun /* Enough space to fit pagetables for the low memory linear map */ 82*4882a593Smuzhiyun const unsigned long limit = __pa(_end) + 83*4882a593Smuzhiyun (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); 84*4882a593Smuzhiyun #ifdef CONFIG_X86_PAE 85*4882a593Smuzhiyun pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd); 86*4882a593Smuzhiyun #define SET_PL2(pl2, val) { (pl2).pmd = (val); } 87*4882a593Smuzhiyun #else 88*4882a593Smuzhiyun pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table); 89*4882a593Smuzhiyun #define SET_PL2(pl2, val) { (pl2).pgd = (val); } 90*4882a593Smuzhiyun #endif 91*4882a593Smuzhiyun 92*4882a593Smuzhiyun ptep = (pte_t *)__pa(__brk_base); 93*4882a593Smuzhiyun pte.pte = PTE_IDENT_ATTR; 94*4882a593Smuzhiyun 95*4882a593Smuzhiyun while ((pte.pte & PTE_PFN_MASK) < limit) { 96*4882a593Smuzhiyun 97*4882a593Smuzhiyun SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR); 98*4882a593Smuzhiyun *pl2p = pl2; 99*4882a593Smuzhiyun #ifndef CONFIG_X86_PAE 100*4882a593Smuzhiyun /* Kernel PDE entry */ 101*4882a593Smuzhiyun *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; 102*4882a593Smuzhiyun #endif 103*4882a593Smuzhiyun for (i = 0; i < PTRS_PER_PTE; i++) { 104*4882a593Smuzhiyun *ptep = pte; 105*4882a593Smuzhiyun pte.pte += PAGE_SIZE; 106*4882a593Smuzhiyun ptep++; 107*4882a593Smuzhiyun } 108*4882a593Smuzhiyun 109*4882a593Smuzhiyun pl2p++; 110*4882a593Smuzhiyun } 111*4882a593Smuzhiyun 112*4882a593Smuzhiyun ptr = (unsigned long *)__pa(&max_pfn_mapped); 113*4882a593Smuzhiyun /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */ 114*4882a593Smuzhiyun *ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; 115*4882a593Smuzhiyun 116*4882a593Smuzhiyun ptr = (unsigned long *)__pa(&_brk_end); 117*4882a593Smuzhiyun *ptr = (unsigned long)ptep + PAGE_OFFSET; 118*4882a593Smuzhiyun } 119*4882a593Smuzhiyun 120