1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 2*4882a593Smuzhiyun #include <linux/thread_info.h> 3*4882a593Smuzhiyun #include <asm/smp.h> 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #include <xen/events.h> 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #include "xen-ops.h" 8*4882a593Smuzhiyun #include "smp.h" 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun xen_hvm_smp_prepare_boot_cpu(void)11*4882a593Smuzhiyunstatic void __init xen_hvm_smp_prepare_boot_cpu(void) 12*4882a593Smuzhiyun { 13*4882a593Smuzhiyun BUG_ON(smp_processor_id() != 0); 14*4882a593Smuzhiyun native_smp_prepare_boot_cpu(); 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun /* 17*4882a593Smuzhiyun * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info 18*4882a593Smuzhiyun * in xen_cpu_up_prepare_hvm(). 19*4882a593Smuzhiyun */ 20*4882a593Smuzhiyun xen_vcpu_setup(0); 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun /* 23*4882a593Smuzhiyun * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS. 24*4882a593Smuzhiyun * Refer to comments in xen_hvm_init_time_ops(). 25*4882a593Smuzhiyun */ 26*4882a593Smuzhiyun xen_hvm_init_time_ops(); 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun /* 29*4882a593Smuzhiyun * The alternative logic (which patches the unlock/lock) runs before 30*4882a593Smuzhiyun * the smp bootup up code is activated. Hence we need to set this up 31*4882a593Smuzhiyun * the core kernel is being patched. Otherwise we will have only 32*4882a593Smuzhiyun * modules patched but not core code. 33*4882a593Smuzhiyun */ 34*4882a593Smuzhiyun xen_init_spinlocks(); 35*4882a593Smuzhiyun } 36*4882a593Smuzhiyun xen_hvm_smp_prepare_cpus(unsigned int max_cpus)37*4882a593Smuzhiyunstatic void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) 38*4882a593Smuzhiyun { 39*4882a593Smuzhiyun int cpu; 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun native_smp_prepare_cpus(max_cpus); 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun if (xen_have_vector_callback) { 44*4882a593Smuzhiyun WARN_ON(xen_smp_intr_init(0)); 45*4882a593Smuzhiyun xen_init_lock_cpu(0); 46*4882a593Smuzhiyun } 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun for_each_possible_cpu(cpu) { 49*4882a593Smuzhiyun if (cpu == 0) 50*4882a593Smuzhiyun continue; 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun /* Set default vcpu_id to make sure that we don't use cpu-0's */ 53*4882a593Smuzhiyun per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID; 54*4882a593Smuzhiyun } 55*4882a593Smuzhiyun } 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU xen_hvm_cpu_die(unsigned int cpu)58*4882a593Smuzhiyunstatic void xen_hvm_cpu_die(unsigned int cpu) 59*4882a593Smuzhiyun { 60*4882a593Smuzhiyun if (common_cpu_die(cpu) == 0) { 61*4882a593Smuzhiyun if (xen_have_vector_callback) { 62*4882a593Smuzhiyun xen_smp_intr_free(cpu); 63*4882a593Smuzhiyun xen_uninit_lock_cpu(cpu); 64*4882a593Smuzhiyun xen_teardown_timer(cpu); 65*4882a593Smuzhiyun } 66*4882a593Smuzhiyun } 67*4882a593Smuzhiyun } 68*4882a593Smuzhiyun #else xen_hvm_cpu_die(unsigned int cpu)69*4882a593Smuzhiyunstatic void xen_hvm_cpu_die(unsigned int cpu) 70*4882a593Smuzhiyun { 71*4882a593Smuzhiyun BUG(); 72*4882a593Smuzhiyun } 73*4882a593Smuzhiyun #endif 74*4882a593Smuzhiyun xen_hvm_smp_init(void)75*4882a593Smuzhiyunvoid __init xen_hvm_smp_init(void) 76*4882a593Smuzhiyun { 77*4882a593Smuzhiyun smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu; 78*4882a593Smuzhiyun smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; 79*4882a593Smuzhiyun smp_ops.smp_cpus_done = xen_smp_cpus_done; 80*4882a593Smuzhiyun smp_ops.cpu_die = xen_hvm_cpu_die; 81*4882a593Smuzhiyun 82*4882a593Smuzhiyun if (!xen_have_vector_callback) { 83*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT_SPINLOCKS 84*4882a593Smuzhiyun nopvspin = true; 85*4882a593Smuzhiyun #endif 86*4882a593Smuzhiyun return; 87*4882a593Smuzhiyun } 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun smp_ops.smp_send_reschedule = xen_smp_send_reschedule; 90*4882a593Smuzhiyun smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; 91*4882a593Smuzhiyun smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; 92*4882a593Smuzhiyun } 93