1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General 3*4882a593Smuzhiyun * Public License. See the file "COPYING" in the main directory of this 4*4882a593Smuzhiyun * archive for more details. 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com) 7*4882a593Smuzhiyun * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc. 8*4882a593Smuzhiyun * Copyright (C) 2000, 2001, 2002 Ralf Baechle 9*4882a593Smuzhiyun * Copyright (C) 2000, 2001 Broadcom Corporation 10*4882a593Smuzhiyun */ 11*4882a593Smuzhiyun #ifndef __ASM_SMP_H 12*4882a593Smuzhiyun #define __ASM_SMP_H 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun #include <linux/bitops.h> 15*4882a593Smuzhiyun #include <linux/linkage.h> 16*4882a593Smuzhiyun #include <linux/smp.h> 17*4882a593Smuzhiyun #include <linux/threads.h> 18*4882a593Smuzhiyun #include <linux/cpumask.h> 19*4882a593Smuzhiyun 20*4882a593Smuzhiyun #include <linux/atomic.h> 21*4882a593Smuzhiyun #include <asm/smp-ops.h> 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun extern int smp_num_siblings; 24*4882a593Smuzhiyun extern cpumask_t cpu_sibling_map[]; 25*4882a593Smuzhiyun extern cpumask_t cpu_core_map[]; 26*4882a593Smuzhiyun extern cpumask_t cpu_foreign_map[]; 27*4882a593Smuzhiyun raw_smp_processor_id(void)28*4882a593Smuzhiyunstatic inline int raw_smp_processor_id(void) 29*4882a593Smuzhiyun { 30*4882a593Smuzhiyun #if defined(__VDSO__) 31*4882a593Smuzhiyun extern int vdso_smp_processor_id(void) 32*4882a593Smuzhiyun __compiletime_error("VDSO should not call smp_processor_id()"); 33*4882a593Smuzhiyun return vdso_smp_processor_id(); 34*4882a593Smuzhiyun #else 35*4882a593Smuzhiyun return current_thread_info()->cpu; 36*4882a593Smuzhiyun #endif 37*4882a593Smuzhiyun } 38*4882a593Smuzhiyun #define raw_smp_processor_id raw_smp_processor_id 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun /* Map from cpu id to sequential logical cpu number. This will only 41*4882a593Smuzhiyun not be idempotent when cpus failed to come on-line. */ 42*4882a593Smuzhiyun extern int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; 43*4882a593Smuzhiyun #define cpu_number_map(cpu) __cpu_number_map[cpu] 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun /* The reverse map from sequential logical cpu number to cpu id. */ 46*4882a593Smuzhiyun extern int __cpu_logical_map[NR_CPUS]; 47*4882a593Smuzhiyun #define cpu_logical_map(cpu) __cpu_logical_map[cpu] 48*4882a593Smuzhiyun 49*4882a593Smuzhiyun #define NO_PROC_ID (-1) 50*4882a593Smuzhiyun 51*4882a593Smuzhiyun #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ 52*4882a593Smuzhiyun #define SMP_CALL_FUNCTION 0x2 53*4882a593Smuzhiyun /* Octeon - Tell another core to flush its icache */ 54*4882a593Smuzhiyun #define SMP_ICACHE_FLUSH 0x4 55*4882a593Smuzhiyun #define SMP_ASK_C0COUNT 0x8 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun /* Mask of CPUs which are currently definitely operating coherently */ 58*4882a593Smuzhiyun extern cpumask_t cpu_coherent_mask; 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun extern asmlinkage void smp_bootstrap(void); 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun extern void calculate_cpu_foreign_map(void); 63*4882a593Smuzhiyun 64*4882a593Smuzhiyun /* 65*4882a593Smuzhiyun * this function sends a 'reschedule' IPI to another CPU. 66*4882a593Smuzhiyun * it goes straight through and wastes no time serializing 67*4882a593Smuzhiyun * anything. Worst case is that we lose a reschedule ... 68*4882a593Smuzhiyun */ smp_send_reschedule(int cpu)69*4882a593Smuzhiyunstatic inline void smp_send_reschedule(int cpu) 70*4882a593Smuzhiyun { 71*4882a593Smuzhiyun extern const struct plat_smp_ops *mp_ops; /* private */ 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); 74*4882a593Smuzhiyun } 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU __cpu_disable(void)77*4882a593Smuzhiyunstatic inline int __cpu_disable(void) 78*4882a593Smuzhiyun { 79*4882a593Smuzhiyun extern const struct plat_smp_ops *mp_ops; /* private */ 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun return mp_ops->cpu_disable(); 82*4882a593Smuzhiyun } 83*4882a593Smuzhiyun __cpu_die(unsigned int cpu)84*4882a593Smuzhiyunstatic inline void __cpu_die(unsigned int cpu) 85*4882a593Smuzhiyun { 86*4882a593Smuzhiyun extern const struct plat_smp_ops *mp_ops; /* private */ 87*4882a593Smuzhiyun 88*4882a593Smuzhiyun mp_ops->cpu_die(cpu); 89*4882a593Smuzhiyun } 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun extern void play_dead(void); 92*4882a593Smuzhiyun #endif 93*4882a593Smuzhiyun 94*4882a593Smuzhiyun #ifdef CONFIG_KEXEC kexec_nonboot_cpu(void)95*4882a593Smuzhiyunstatic inline void kexec_nonboot_cpu(void) 96*4882a593Smuzhiyun { 97*4882a593Smuzhiyun extern const struct plat_smp_ops *mp_ops; /* private */ 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun return mp_ops->kexec_nonboot_cpu(); 100*4882a593Smuzhiyun } 101*4882a593Smuzhiyun kexec_nonboot_cpu_func(void)102*4882a593Smuzhiyunstatic inline void *kexec_nonboot_cpu_func(void) 103*4882a593Smuzhiyun { 104*4882a593Smuzhiyun extern const struct plat_smp_ops *mp_ops; /* private */ 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun return mp_ops->kexec_nonboot_cpu; 107*4882a593Smuzhiyun } 108*4882a593Smuzhiyun #endif 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun /* 111*4882a593Smuzhiyun * This function will set up the necessary IPIs for Linux to communicate 112*4882a593Smuzhiyun * with the CPUs in mask. 113*4882a593Smuzhiyun * Return 0 on success. 114*4882a593Smuzhiyun */ 115*4882a593Smuzhiyun int mips_smp_ipi_allocate(const struct cpumask *mask); 116*4882a593Smuzhiyun 117*4882a593Smuzhiyun /* 118*4882a593Smuzhiyun * This function will free up IPIs allocated with mips_smp_ipi_allocate to the 119*4882a593Smuzhiyun * CPUs in mask, which must be a subset of the IPIs that have been configured. 120*4882a593Smuzhiyun * Return 0 on success. 121*4882a593Smuzhiyun */ 122*4882a593Smuzhiyun int mips_smp_ipi_free(const struct cpumask *mask); 123*4882a593Smuzhiyun arch_send_call_function_single_ipi(int cpu)124*4882a593Smuzhiyunstatic inline void arch_send_call_function_single_ipi(int cpu) 125*4882a593Smuzhiyun { 126*4882a593Smuzhiyun extern const struct plat_smp_ops *mp_ops; /* private */ 127*4882a593Smuzhiyun 128*4882a593Smuzhiyun mp_ops->send_ipi_single(cpu, SMP_CALL_FUNCTION); 129*4882a593Smuzhiyun } 130*4882a593Smuzhiyun arch_send_call_function_ipi_mask(const struct cpumask * mask)131*4882a593Smuzhiyunstatic inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) 132*4882a593Smuzhiyun { 133*4882a593Smuzhiyun extern const struct plat_smp_ops *mp_ops; /* private */ 134*4882a593Smuzhiyun 135*4882a593Smuzhiyun mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 136*4882a593Smuzhiyun } 137*4882a593Smuzhiyun 138*4882a593Smuzhiyun #endif /* __ASM_SMP_H */ 139