1 /* 2 * (C) Copyright 2008-2011 3 * Graeme Russ, <graeme.russ@gmail.com> 4 * 5 * (C) Copyright 2002 6 * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se> 7 * 8 * (C) Copyright 2002 9 * Sysgo Real-Time Solutions, GmbH <www.elinos.com> 10 * Marius Groeger <mgroeger@sysgo.de> 11 * 12 * (C) Copyright 2002 13 * Sysgo Real-Time Solutions, GmbH <www.elinos.com> 14 * Alex Zuepke <azu@sysgo.de> 15 * 16 * Part of this file is adapted from coreboot 17 * src/arch/x86/lib/cpu.c 18 * 19 * SPDX-License-Identifier: GPL-2.0+ 20 */ 21 22 #include <common.h> 23 #include <command.h> 24 #include <dm.h> 25 #include <errno.h> 26 #include <malloc.h> 27 #include <asm/control_regs.h> 28 #include <asm/cpu.h> 29 #include <asm/lapic.h> 30 #include <asm/microcode.h> 31 #include <asm/mp.h> 32 #include <asm/mrccache.h> 33 #include <asm/msr.h> 34 #include <asm/mtrr.h> 35 #include <asm/post.h> 36 #include <asm/processor.h> 37 #include <asm/processor-flags.h> 38 #include <asm/interrupt.h> 39 #include <asm/tables.h> 40 #include <linux/compiler.h> 41 42 DECLARE_GLOBAL_DATA_PTR; 43 44 /* 45 * Constructor for a conventional segment GDT (or LDT) entry 46 * This is a macro so it can be used in initialisers 47 */ 48 #define GDT_ENTRY(flags, base, limit) \ 49 ((((base) & 0xff000000ULL) << (56-24)) | \ 50 (((flags) & 0x0000f0ffULL) << 40) | \ 51 (((limit) & 0x000f0000ULL) << (48-16)) | \ 52 (((base) & 0x00ffffffULL) << 16) | \ 53 (((limit) & 0x0000ffffULL))) 54 55 struct gdt_ptr { 56 u16 len; 57 u32 ptr; 58 } __packed; 59 60 struct cpu_device_id { 61 unsigned vendor; 62 unsigned device; 63 }; 64 65 struct cpuinfo_x86 { 66 uint8_t x86; /* CPU family */ 67 uint8_t x86_vendor; /* CPU vendor */ 68 uint8_t x86_model; 69 uint8_t x86_mask; 70 }; 71 72 /* 73 * List of cpu vendor strings along with their normalized 74 * id values. 75 */ 76 static const struct { 77 int vendor; 78 const char *name; 79 } x86_vendors[] = { 80 { X86_VENDOR_INTEL, "GenuineIntel", }, 81 { X86_VENDOR_CYRIX, "CyrixInstead", }, 82 { X86_VENDOR_AMD, "AuthenticAMD", }, 83 { X86_VENDOR_UMC, "UMC UMC UMC ", }, 84 { X86_VENDOR_NEXGEN, "NexGenDriven", }, 85 { X86_VENDOR_CENTAUR, "CentaurHauls", }, 86 { X86_VENDOR_RISE, "RiseRiseRise", }, 87 { X86_VENDOR_TRANSMETA, "GenuineTMx86", }, 88 { X86_VENDOR_TRANSMETA, "TransmetaCPU", }, 89 { X86_VENDOR_NSC, "Geode by NSC", }, 90 { X86_VENDOR_SIS, "SiS SiS SiS ", }, 91 }; 92 93 static const char *const x86_vendor_name[] = { 94 [X86_VENDOR_INTEL] = "Intel", 95 [X86_VENDOR_CYRIX] = "Cyrix", 96 [X86_VENDOR_AMD] = "AMD", 97 [X86_VENDOR_UMC] = "UMC", 98 [X86_VENDOR_NEXGEN] = "NexGen", 99 [X86_VENDOR_CENTAUR] = "Centaur", 100 [X86_VENDOR_RISE] = "Rise", 101 [X86_VENDOR_TRANSMETA] = "Transmeta", 102 [X86_VENDOR_NSC] = "NSC", 103 [X86_VENDOR_SIS] = "SiS", 104 }; 105 106 static void load_ds(u32 segment) 107 { 108 asm volatile("movl %0, %%ds" : : "r" (segment * X86_GDT_ENTRY_SIZE)); 109 } 110 111 static void load_es(u32 segment) 112 { 113 asm volatile("movl %0, %%es" : : "r" (segment * X86_GDT_ENTRY_SIZE)); 114 } 115 116 static void load_fs(u32 segment) 117 { 118 asm volatile("movl %0, %%fs" : : "r" (segment * X86_GDT_ENTRY_SIZE)); 119 } 120 121 static void load_gs(u32 segment) 122 { 123 asm volatile("movl %0, %%gs" : : "r" (segment * X86_GDT_ENTRY_SIZE)); 124 } 125 126 static void load_ss(u32 segment) 127 { 128 asm volatile("movl %0, %%ss" : : "r" (segment * X86_GDT_ENTRY_SIZE)); 129 } 130 131 static void load_gdt(const u64 *boot_gdt, u16 num_entries) 132 { 133 struct gdt_ptr gdt; 134 135 gdt.len = (num_entries * X86_GDT_ENTRY_SIZE) - 1; 136 gdt.ptr = (u32)boot_gdt; 137 138 asm volatile("lgdtl %0\n" : : "m" (gdt)); 139 } 140 141 void arch_setup_gd(gd_t *new_gd) 142 { 143 u64 *gdt_addr; 144 145 gdt_addr = new_gd->arch.gdt; 146 147 /* 148 * CS: code, read/execute, 4 GB, base 0 149 * 150 * Some OS (like VxWorks) requires GDT entry 1 to be the 32-bit CS 151 */ 152 gdt_addr[X86_GDT_ENTRY_UNUSED] = GDT_ENTRY(0xc09b, 0, 0xfffff); 153 gdt_addr[X86_GDT_ENTRY_32BIT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff); 154 155 /* DS: data, read/write, 4 GB, base 0 */ 156 gdt_addr[X86_GDT_ENTRY_32BIT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff); 157 158 /* FS: data, read/write, 4 GB, base (Global Data Pointer) */ 159 new_gd->arch.gd_addr = new_gd; 160 gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0xc093, 161 (ulong)&new_gd->arch.gd_addr, 0xfffff); 162 163 /* 16-bit CS: code, read/execute, 64 kB, base 0 */ 164 gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x009b, 0, 0x0ffff); 165 166 /* 16-bit DS: data, read/write, 64 kB, base 0 */ 167 gdt_addr[X86_GDT_ENTRY_16BIT_DS] = GDT_ENTRY(0x0093, 0, 0x0ffff); 168 169 gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_CS] = GDT_ENTRY(0x809b, 0, 0xfffff); 170 gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_DS] = GDT_ENTRY(0x8093, 0, 0xfffff); 171 172 load_gdt(gdt_addr, X86_GDT_NUM_ENTRIES); 173 load_ds(X86_GDT_ENTRY_32BIT_DS); 174 load_es(X86_GDT_ENTRY_32BIT_DS); 175 load_gs(X86_GDT_ENTRY_32BIT_DS); 176 load_ss(X86_GDT_ENTRY_32BIT_DS); 177 load_fs(X86_GDT_ENTRY_32BIT_FS); 178 } 179 180 #ifdef CONFIG_HAVE_FSP 181 /* 182 * Setup FSP execution environment GDT 183 * 184 * Per Intel FSP external architecture specification, before calling any FSP 185 * APIs, we need make sure the system is in flat 32-bit mode and both the code 186 * and data selectors should have full 4GB access range. Here we reuse the one 187 * we used in arch/x86/cpu/start16.S, and reload the segement registers. 188 */ 189 void setup_fsp_gdt(void) 190 { 191 load_gdt((const u64 *)(gdt_rom + CONFIG_RESET_SEG_START), 4); 192 load_ds(X86_GDT_ENTRY_32BIT_DS); 193 load_ss(X86_GDT_ENTRY_32BIT_DS); 194 load_es(X86_GDT_ENTRY_32BIT_DS); 195 load_fs(X86_GDT_ENTRY_32BIT_DS); 196 load_gs(X86_GDT_ENTRY_32BIT_DS); 197 } 198 #endif 199 200 int __weak x86_cleanup_before_linux(void) 201 { 202 #ifdef CONFIG_BOOTSTAGE_STASH 203 bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH_ADDR, 204 CONFIG_BOOTSTAGE_STASH_SIZE); 205 #endif 206 207 return 0; 208 } 209 210 /* 211 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected 212 * by the fact that they preserve the flags across the division of 5/2. 213 * PII and PPro exhibit this behavior too, but they have cpuid available. 214 */ 215 216 /* 217 * Perform the Cyrix 5/2 test. A Cyrix won't change 218 * the flags, while other 486 chips will. 219 */ 220 static inline int test_cyrix_52div(void) 221 { 222 unsigned int test; 223 224 __asm__ __volatile__( 225 "sahf\n\t" /* clear flags (%eax = 0x0005) */ 226 "div %b2\n\t" /* divide 5 by 2 */ 227 "lahf" /* store flags into %ah */ 228 : "=a" (test) 229 : "0" (5), "q" (2) 230 : "cc"); 231 232 /* AH is 0x02 on Cyrix after the divide.. */ 233 return (unsigned char) (test >> 8) == 0x02; 234 } 235 236 /* 237 * Detect a NexGen CPU running without BIOS hypercode new enough 238 * to have CPUID. (Thanks to Herbert Oppmann) 239 */ 240 241 static int deep_magic_nexgen_probe(void) 242 { 243 int ret; 244 245 __asm__ __volatile__ ( 246 " movw $0x5555, %%ax\n" 247 " xorw %%dx,%%dx\n" 248 " movw $2, %%cx\n" 249 " divw %%cx\n" 250 " movl $0, %%eax\n" 251 " jnz 1f\n" 252 " movl $1, %%eax\n" 253 "1:\n" 254 : "=a" (ret) : : "cx", "dx"); 255 return ret; 256 } 257 258 static bool has_cpuid(void) 259 { 260 return flag_is_changeable_p(X86_EFLAGS_ID); 261 } 262 263 static bool has_mtrr(void) 264 { 265 return cpuid_edx(0x00000001) & (1 << 12) ? true : false; 266 } 267 268 static int build_vendor_name(char *vendor_name) 269 { 270 struct cpuid_result result; 271 result = cpuid(0x00000000); 272 unsigned int *name_as_ints = (unsigned int *)vendor_name; 273 274 name_as_ints[0] = result.ebx; 275 name_as_ints[1] = result.edx; 276 name_as_ints[2] = result.ecx; 277 278 return result.eax; 279 } 280 281 static void identify_cpu(struct cpu_device_id *cpu) 282 { 283 char vendor_name[16]; 284 int i; 285 286 vendor_name[0] = '\0'; /* Unset */ 287 cpu->device = 0; /* fix gcc 4.4.4 warning */ 288 289 /* Find the id and vendor_name */ 290 if (!has_cpuid()) { 291 /* Its a 486 if we can modify the AC flag */ 292 if (flag_is_changeable_p(X86_EFLAGS_AC)) 293 cpu->device = 0x00000400; /* 486 */ 294 else 295 cpu->device = 0x00000300; /* 386 */ 296 if ((cpu->device == 0x00000400) && test_cyrix_52div()) { 297 memcpy(vendor_name, "CyrixInstead", 13); 298 /* If we ever care we can enable cpuid here */ 299 } 300 /* Detect NexGen with old hypercode */ 301 else if (deep_magic_nexgen_probe()) 302 memcpy(vendor_name, "NexGenDriven", 13); 303 } 304 if (has_cpuid()) { 305 int cpuid_level; 306 307 cpuid_level = build_vendor_name(vendor_name); 308 vendor_name[12] = '\0'; 309 310 /* Intel-defined flags: level 0x00000001 */ 311 if (cpuid_level >= 0x00000001) { 312 cpu->device = cpuid_eax(0x00000001); 313 } else { 314 /* Have CPUID level 0 only unheard of */ 315 cpu->device = 0x00000400; 316 } 317 } 318 cpu->vendor = X86_VENDOR_UNKNOWN; 319 for (i = 0; i < ARRAY_SIZE(x86_vendors); i++) { 320 if (memcmp(vendor_name, x86_vendors[i].name, 12) == 0) { 321 cpu->vendor = x86_vendors[i].vendor; 322 break; 323 } 324 } 325 } 326 327 static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms) 328 { 329 c->x86 = (tfms >> 8) & 0xf; 330 c->x86_model = (tfms >> 4) & 0xf; 331 c->x86_mask = tfms & 0xf; 332 if (c->x86 == 0xf) 333 c->x86 += (tfms >> 20) & 0xff; 334 if (c->x86 >= 0x6) 335 c->x86_model += ((tfms >> 16) & 0xF) << 4; 336 } 337 338 u32 cpu_get_family_model(void) 339 { 340 return gd->arch.x86_device & 0x0fff0ff0; 341 } 342 343 u32 cpu_get_stepping(void) 344 { 345 return gd->arch.x86_mask; 346 } 347 348 int x86_cpu_init_f(void) 349 { 350 const u32 em_rst = ~X86_CR0_EM; 351 const u32 mp_ne_set = X86_CR0_MP | X86_CR0_NE; 352 353 if (ll_boot_init()) { 354 /* initialize FPU, reset EM, set MP and NE */ 355 asm ("fninit\n" \ 356 "movl %%cr0, %%eax\n" \ 357 "andl %0, %%eax\n" \ 358 "orl %1, %%eax\n" \ 359 "movl %%eax, %%cr0\n" \ 360 : : "i" (em_rst), "i" (mp_ne_set) : "eax"); 361 } 362 363 /* identify CPU via cpuid and store the decoded info into gd->arch */ 364 if (has_cpuid()) { 365 struct cpu_device_id cpu; 366 struct cpuinfo_x86 c; 367 368 identify_cpu(&cpu); 369 get_fms(&c, cpu.device); 370 gd->arch.x86 = c.x86; 371 gd->arch.x86_vendor = cpu.vendor; 372 gd->arch.x86_model = c.x86_model; 373 gd->arch.x86_mask = c.x86_mask; 374 gd->arch.x86_device = cpu.device; 375 376 gd->arch.has_mtrr = has_mtrr(); 377 } 378 /* Don't allow PCI region 3 to use memory in the 2-4GB memory hole */ 379 gd->pci_ram_top = 0x80000000U; 380 381 /* Configure fixed range MTRRs for some legacy regions */ 382 if (gd->arch.has_mtrr) { 383 u64 mtrr_cap; 384 385 mtrr_cap = native_read_msr(MTRR_CAP_MSR); 386 if (mtrr_cap & MTRR_CAP_FIX) { 387 /* Mark the VGA RAM area as uncacheable */ 388 native_write_msr(MTRR_FIX_16K_A0000_MSR, 389 MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE), 390 MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE)); 391 392 /* 393 * Mark the PCI ROM area as cacheable to improve ROM 394 * execution performance. 395 */ 396 native_write_msr(MTRR_FIX_4K_C0000_MSR, 397 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK), 398 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK)); 399 native_write_msr(MTRR_FIX_4K_C8000_MSR, 400 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK), 401 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK)); 402 native_write_msr(MTRR_FIX_4K_D0000_MSR, 403 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK), 404 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK)); 405 native_write_msr(MTRR_FIX_4K_D8000_MSR, 406 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK), 407 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK)); 408 409 /* Enable the fixed range MTRRs */ 410 msr_setbits_64(MTRR_DEF_TYPE_MSR, MTRR_DEF_TYPE_FIX_EN); 411 } 412 } 413 414 #ifdef CONFIG_I8254_TIMER 415 /* Set up the i8254 timer if required */ 416 i8254_init(); 417 #endif 418 419 return 0; 420 } 421 422 void x86_enable_caches(void) 423 { 424 unsigned long cr0; 425 426 cr0 = read_cr0(); 427 cr0 &= ~(X86_CR0_NW | X86_CR0_CD); 428 write_cr0(cr0); 429 wbinvd(); 430 } 431 void enable_caches(void) __attribute__((weak, alias("x86_enable_caches"))); 432 433 void x86_disable_caches(void) 434 { 435 unsigned long cr0; 436 437 cr0 = read_cr0(); 438 cr0 |= X86_CR0_NW | X86_CR0_CD; 439 wbinvd(); 440 write_cr0(cr0); 441 wbinvd(); 442 } 443 void disable_caches(void) __attribute__((weak, alias("x86_disable_caches"))); 444 445 int x86_init_cache(void) 446 { 447 enable_caches(); 448 449 return 0; 450 } 451 int init_cache(void) __attribute__((weak, alias("x86_init_cache"))); 452 453 int do_reset(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) 454 { 455 printf("resetting ...\n"); 456 457 /* wait 50 ms */ 458 udelay(50000); 459 disable_interrupts(); 460 reset_cpu(0); 461 462 /*NOTREACHED*/ 463 return 0; 464 } 465 466 void flush_cache(unsigned long dummy1, unsigned long dummy2) 467 { 468 asm("wbinvd\n"); 469 } 470 471 __weak void reset_cpu(ulong addr) 472 { 473 /* Do a hard reset through the chipset's reset control register */ 474 outb(SYS_RST | RST_CPU, IO_PORT_RESET); 475 for (;;) 476 cpu_hlt(); 477 } 478 479 void x86_full_reset(void) 480 { 481 outb(FULL_RST | SYS_RST | RST_CPU, IO_PORT_RESET); 482 } 483 484 int dcache_status(void) 485 { 486 return !(read_cr0() & X86_CR0_CD); 487 } 488 489 /* Define these functions to allow ehch-hcd to function */ 490 void flush_dcache_range(unsigned long start, unsigned long stop) 491 { 492 } 493 494 void invalidate_dcache_range(unsigned long start, unsigned long stop) 495 { 496 } 497 498 void dcache_enable(void) 499 { 500 enable_caches(); 501 } 502 503 void dcache_disable(void) 504 { 505 disable_caches(); 506 } 507 508 void icache_enable(void) 509 { 510 } 511 512 void icache_disable(void) 513 { 514 } 515 516 int icache_status(void) 517 { 518 return 1; 519 } 520 521 void cpu_enable_paging_pae(ulong cr3) 522 { 523 __asm__ __volatile__( 524 /* Load the page table address */ 525 "movl %0, %%cr3\n" 526 /* Enable pae */ 527 "movl %%cr4, %%eax\n" 528 "orl $0x00000020, %%eax\n" 529 "movl %%eax, %%cr4\n" 530 /* Enable paging */ 531 "movl %%cr0, %%eax\n" 532 "orl $0x80000000, %%eax\n" 533 "movl %%eax, %%cr0\n" 534 : 535 : "r" (cr3) 536 : "eax"); 537 } 538 539 void cpu_disable_paging_pae(void) 540 { 541 /* Turn off paging */ 542 __asm__ __volatile__ ( 543 /* Disable paging */ 544 "movl %%cr0, %%eax\n" 545 "andl $0x7fffffff, %%eax\n" 546 "movl %%eax, %%cr0\n" 547 /* Disable pae */ 548 "movl %%cr4, %%eax\n" 549 "andl $0xffffffdf, %%eax\n" 550 "movl %%eax, %%cr4\n" 551 : 552 : 553 : "eax"); 554 } 555 556 static bool can_detect_long_mode(void) 557 { 558 return cpuid_eax(0x80000000) > 0x80000000UL; 559 } 560 561 static bool has_long_mode(void) 562 { 563 return cpuid_edx(0x80000001) & (1 << 29) ? true : false; 564 } 565 566 int cpu_has_64bit(void) 567 { 568 return has_cpuid() && can_detect_long_mode() && 569 has_long_mode(); 570 } 571 572 const char *cpu_vendor_name(int vendor) 573 { 574 const char *name; 575 name = "<invalid cpu vendor>"; 576 if ((vendor < (ARRAY_SIZE(x86_vendor_name))) && 577 (x86_vendor_name[vendor] != 0)) 578 name = x86_vendor_name[vendor]; 579 580 return name; 581 } 582 583 char *cpu_get_name(char *name) 584 { 585 unsigned int *name_as_ints = (unsigned int *)name; 586 struct cpuid_result regs; 587 char *ptr; 588 int i; 589 590 /* This bit adds up to 48 bytes */ 591 for (i = 0; i < 3; i++) { 592 regs = cpuid(0x80000002 + i); 593 name_as_ints[i * 4 + 0] = regs.eax; 594 name_as_ints[i * 4 + 1] = regs.ebx; 595 name_as_ints[i * 4 + 2] = regs.ecx; 596 name_as_ints[i * 4 + 3] = regs.edx; 597 } 598 name[CPU_MAX_NAME_LEN - 1] = '\0'; 599 600 /* Skip leading spaces. */ 601 ptr = name; 602 while (*ptr == ' ') 603 ptr++; 604 605 return ptr; 606 } 607 608 int default_print_cpuinfo(void) 609 { 610 printf("CPU: %s, vendor %s, device %xh\n", 611 cpu_has_64bit() ? "x86_64" : "x86", 612 cpu_vendor_name(gd->arch.x86_vendor), gd->arch.x86_device); 613 614 return 0; 615 } 616 617 #define PAGETABLE_SIZE (6 * 4096) 618 619 /** 620 * build_pagetable() - build a flat 4GiB page table structure for 64-bti mode 621 * 622 * @pgtable: Pointer to a 24iKB block of memory 623 */ 624 static void build_pagetable(uint32_t *pgtable) 625 { 626 uint i; 627 628 memset(pgtable, '\0', PAGETABLE_SIZE); 629 630 /* Level 4 needs a single entry */ 631 pgtable[0] = (uint32_t)&pgtable[1024] + 7; 632 633 /* Level 3 has one 64-bit entry for each GiB of memory */ 634 for (i = 0; i < 4; i++) { 635 pgtable[1024 + i * 2] = (uint32_t)&pgtable[2048] + 636 0x1000 * i + 7; 637 } 638 639 /* Level 2 has 2048 64-bit entries, each repesenting 2MiB */ 640 for (i = 0; i < 2048; i++) 641 pgtable[2048 + i * 2] = 0x183 + (i << 21UL); 642 } 643 644 int cpu_jump_to_64bit(ulong setup_base, ulong target) 645 { 646 uint32_t *pgtable; 647 648 pgtable = memalign(4096, PAGETABLE_SIZE); 649 if (!pgtable) 650 return -ENOMEM; 651 652 build_pagetable(pgtable); 653 cpu_call64((ulong)pgtable, setup_base, target); 654 free(pgtable); 655 656 return -EFAULT; 657 } 658 659 void show_boot_progress(int val) 660 { 661 outb(val, POST_PORT); 662 } 663 664 #ifndef CONFIG_SYS_COREBOOT 665 /* 666 * Implement a weak default function for boards that optionally 667 * need to clean up the system before jumping to the kernel. 668 */ 669 __weak void board_final_cleanup(void) 670 { 671 } 672 673 int last_stage_init(void) 674 { 675 write_tables(); 676 677 board_final_cleanup(); 678 679 return 0; 680 } 681 #endif 682 683 #ifdef CONFIG_SMP 684 static int enable_smis(struct udevice *cpu, void *unused) 685 { 686 return 0; 687 } 688 689 static struct mp_flight_record mp_steps[] = { 690 MP_FR_BLOCK_APS(mp_init_cpu, NULL, mp_init_cpu, NULL), 691 /* Wait for APs to finish initialization before proceeding */ 692 MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL), 693 }; 694 695 static int x86_mp_init(void) 696 { 697 struct mp_params mp_params; 698 699 mp_params.parallel_microcode_load = 0, 700 mp_params.flight_plan = &mp_steps[0]; 701 mp_params.num_records = ARRAY_SIZE(mp_steps); 702 mp_params.microcode_pointer = 0; 703 704 if (mp_init(&mp_params)) { 705 printf("Warning: MP init failure\n"); 706 return -EIO; 707 } 708 709 return 0; 710 } 711 #endif 712 713 static int x86_init_cpus(void) 714 { 715 #ifdef CONFIG_SMP 716 debug("Init additional CPUs\n"); 717 x86_mp_init(); 718 #else 719 struct udevice *dev; 720 721 /* 722 * This causes the cpu-x86 driver to be probed. 723 * We don't check return value here as we want to allow boards 724 * which have not been converted to use cpu uclass driver to boot. 725 */ 726 uclass_first_device(UCLASS_CPU, &dev); 727 #endif 728 729 return 0; 730 } 731 732 int cpu_init_r(void) 733 { 734 struct udevice *dev; 735 int ret; 736 737 if (!ll_boot_init()) 738 return 0; 739 740 ret = x86_init_cpus(); 741 if (ret) 742 return ret; 743 744 /* 745 * Set up the northbridge, PCH and LPC if available. Note that these 746 * may have had some limited pre-relocation init if they were probed 747 * before relocation, but this is post relocation. 748 */ 749 uclass_first_device(UCLASS_NORTHBRIDGE, &dev); 750 uclass_first_device(UCLASS_PCH, &dev); 751 uclass_first_device(UCLASS_LPC, &dev); 752 753 return 0; 754 } 755 756 #ifndef CONFIG_EFI_STUB 757 int reserve_arch(void) 758 { 759 #ifdef CONFIG_ENABLE_MRC_CACHE 760 return mrccache_reserve(); 761 #else 762 return 0; 763 #endif 764 } 765 #endif 766