1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun/* hvtramp.S: Hypervisor start-cpu trampoline code. 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 5*4882a593Smuzhiyun */ 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun#include <asm/thread_info.h> 9*4882a593Smuzhiyun#include <asm/hypervisor.h> 10*4882a593Smuzhiyun#include <asm/scratchpad.h> 11*4882a593Smuzhiyun#include <asm/spitfire.h> 12*4882a593Smuzhiyun#include <asm/hvtramp.h> 13*4882a593Smuzhiyun#include <asm/pstate.h> 14*4882a593Smuzhiyun#include <asm/ptrace.h> 15*4882a593Smuzhiyun#include <asm/head.h> 16*4882a593Smuzhiyun#include <asm/asi.h> 17*4882a593Smuzhiyun#include <asm/pil.h> 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun .align 8 20*4882a593Smuzhiyun .globl hv_cpu_startup, hv_cpu_startup_end 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun /* This code executes directly out of the hypervisor 23*4882a593Smuzhiyun * with physical addressing (va==pa). %o0 contains 24*4882a593Smuzhiyun * our client argument which for Linux points to 25*4882a593Smuzhiyun * a descriptor data structure which defines the 26*4882a593Smuzhiyun * MMU entries we need to load up. 27*4882a593Smuzhiyun * 28*4882a593Smuzhiyun * After we set things up we enable the MMU and call 29*4882a593Smuzhiyun * into the kernel. 30*4882a593Smuzhiyun * 31*4882a593Smuzhiyun * First setup basic privileged cpu state. 32*4882a593Smuzhiyun */ 33*4882a593Smuzhiyunhv_cpu_startup: 34*4882a593Smuzhiyun SET_GL(0) 35*4882a593Smuzhiyun wrpr %g0, PIL_NORMAL_MAX, %pil 36*4882a593Smuzhiyun wrpr %g0, 0, %canrestore 37*4882a593Smuzhiyun wrpr %g0, 0, %otherwin 38*4882a593Smuzhiyun wrpr %g0, 6, %cansave 39*4882a593Smuzhiyun wrpr %g0, 6, %cleanwin 40*4882a593Smuzhiyun wrpr %g0, 0, %cwp 41*4882a593Smuzhiyun wrpr %g0, 0, %wstate 42*4882a593Smuzhiyun wrpr %g0, 0, %tl 43*4882a593Smuzhiyun 44*4882a593Smuzhiyun sethi %hi(sparc64_ttable_tl0), %g1 45*4882a593Smuzhiyun wrpr %g1, %tba 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun mov %o0, %l0 48*4882a593Smuzhiyun 49*4882a593Smuzhiyun lduw [%l0 + HVTRAMP_DESCR_CPU], %g1 50*4882a593Smuzhiyun mov SCRATCHPAD_CPUID, %g2 51*4882a593Smuzhiyun stxa %g1, [%g2] ASI_SCRATCHPAD 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun ldx [%l0 + HVTRAMP_DESCR_FAULT_INFO_VA], %g2 54*4882a593Smuzhiyun stxa %g2, [%g0] ASI_SCRATCHPAD 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun mov 0, %l1 57*4882a593Smuzhiyun lduw [%l0 + HVTRAMP_DESCR_NUM_MAPPINGS], %l2 58*4882a593Smuzhiyun add %l0, HVTRAMP_DESCR_MAPS, %l3 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun1: ldx [%l3 + HVTRAMP_MAPPING_VADDR], %o0 61*4882a593Smuzhiyun clr %o1 62*4882a593Smuzhiyun ldx [%l3 + HVTRAMP_MAPPING_TTE], %o2 63*4882a593Smuzhiyun mov HV_MMU_IMMU | HV_MMU_DMMU, %o3 64*4882a593Smuzhiyun mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 65*4882a593Smuzhiyun ta HV_FAST_TRAP 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun brnz,pn %o0, 80f 68*4882a593Smuzhiyun nop 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun add %l1, 1, %l1 71*4882a593Smuzhiyun cmp %l1, %l2 72*4882a593Smuzhiyun blt,a,pt %xcc, 1b 73*4882a593Smuzhiyun add %l3, HVTRAMP_MAPPING_SIZE, %l3 74*4882a593Smuzhiyun 75*4882a593Smuzhiyun ldx [%l0 + HVTRAMP_DESCR_FAULT_INFO_PA], %o0 76*4882a593Smuzhiyun mov HV_FAST_MMU_FAULT_AREA_CONF, %o5 77*4882a593Smuzhiyun ta HV_FAST_TRAP 78*4882a593Smuzhiyun 79*4882a593Smuzhiyun brnz,pn %o0, 80f 80*4882a593Smuzhiyun nop 81*4882a593Smuzhiyun 82*4882a593Smuzhiyun wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun ldx [%l0 + HVTRAMP_DESCR_THREAD_REG], %l6 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun mov 1, %o0 87*4882a593Smuzhiyun set 1f, %o1 88*4882a593Smuzhiyun mov HV_FAST_MMU_ENABLE, %o5 89*4882a593Smuzhiyun ta HV_FAST_TRAP 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun ba,pt %xcc, 80f 92*4882a593Smuzhiyun nop 93*4882a593Smuzhiyun 94*4882a593Smuzhiyun1: 95*4882a593Smuzhiyun wr %g0, 0, %fprs 96*4882a593Smuzhiyun wr %g0, ASI_P, %asi 97*4882a593Smuzhiyun 98*4882a593Smuzhiyun mov PRIMARY_CONTEXT, %g7 99*4882a593Smuzhiyun stxa %g0, [%g7] ASI_MMU 100*4882a593Smuzhiyun membar #Sync 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun mov SECONDARY_CONTEXT, %g7 103*4882a593Smuzhiyun stxa %g0, [%g7] ASI_MMU 104*4882a593Smuzhiyun membar #Sync 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun mov %l6, %g6 107*4882a593Smuzhiyun ldx [%g6 + TI_TASK], %g4 108*4882a593Smuzhiyun 109*4882a593Smuzhiyun mov 1, %g5 110*4882a593Smuzhiyun sllx %g5, THREAD_SHIFT, %g5 111*4882a593Smuzhiyun sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 112*4882a593Smuzhiyun add %g6, %g5, %sp 113*4882a593Smuzhiyun 114*4882a593Smuzhiyun call init_irqwork_curcpu 115*4882a593Smuzhiyun nop 116*4882a593Smuzhiyun call hard_smp_processor_id 117*4882a593Smuzhiyun nop 118*4882a593Smuzhiyun 119*4882a593Smuzhiyun call sun4v_register_mondo_queues 120*4882a593Smuzhiyun nop 121*4882a593Smuzhiyun 122*4882a593Smuzhiyun call init_cur_cpu_trap 123*4882a593Smuzhiyun mov %g6, %o0 124*4882a593Smuzhiyun 125*4882a593Smuzhiyun wrpr %g0, (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE), %pstate 126*4882a593Smuzhiyun 127*4882a593Smuzhiyun call smp_callin 128*4882a593Smuzhiyun nop 129*4882a593Smuzhiyun 130*4882a593Smuzhiyun call cpu_panic 131*4882a593Smuzhiyun nop 132*4882a593Smuzhiyun 133*4882a593Smuzhiyun80: ba,pt %xcc, 80b 134*4882a593Smuzhiyun nop 135*4882a593Smuzhiyun 136*4882a593Smuzhiyun .align 8 137*4882a593Smuzhiyunhv_cpu_startup_end: 138