1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef _ASM_SPARC64_TOPOLOGY_H 3*4882a593Smuzhiyun #define _ASM_SPARC64_TOPOLOGY_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #ifdef CONFIG_NUMA 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #include <asm/mmzone.h> 8*4882a593Smuzhiyun cpu_to_node(int cpu)9*4882a593Smuzhiyunstatic inline int cpu_to_node(int cpu) 10*4882a593Smuzhiyun { 11*4882a593Smuzhiyun return numa_cpu_lookup_table[cpu]; 12*4882a593Smuzhiyun } 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun #define cpumask_of_node(node) ((node) == -1 ? \ 15*4882a593Smuzhiyun cpu_all_mask : \ 16*4882a593Smuzhiyun &numa_cpumask_lookup_table[node]) 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun struct pci_bus; 19*4882a593Smuzhiyun #ifdef CONFIG_PCI 20*4882a593Smuzhiyun int pcibus_to_node(struct pci_bus *pbus); 21*4882a593Smuzhiyun #else pcibus_to_node(struct pci_bus * pbus)22*4882a593Smuzhiyunstatic inline int pcibus_to_node(struct pci_bus *pbus) 23*4882a593Smuzhiyun { 24*4882a593Smuzhiyun return -1; 25*4882a593Smuzhiyun } 26*4882a593Smuzhiyun #endif 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun #define cpumask_of_pcibus(bus) \ 29*4882a593Smuzhiyun (pcibus_to_node(bus) == -1 ? \ 30*4882a593Smuzhiyun cpu_all_mask : \ 31*4882a593Smuzhiyun cpumask_of_node(pcibus_to_node(bus))) 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun int __node_distance(int, int); 34*4882a593Smuzhiyun #define node_distance(a, b) __node_distance(a, b) 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun #else /* CONFIG_NUMA */ 37*4882a593Smuzhiyun 38*4882a593Smuzhiyun #include <asm-generic/topology.h> 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun #endif /* !(CONFIG_NUMA) */ 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun #ifdef CONFIG_SMP 43*4882a593Smuzhiyun 44*4882a593Smuzhiyun #include <asm/cpudata.h> 45*4882a593Smuzhiyun 46*4882a593Smuzhiyun #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 47*4882a593Smuzhiyun #define topology_core_id(cpu) (cpu_data(cpu).core_id) 48*4882a593Smuzhiyun #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) 49*4882a593Smuzhiyun #define topology_core_cache_cpumask(cpu) (&cpu_core_sib_cache_map[cpu]) 50*4882a593Smuzhiyun #define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 51*4882a593Smuzhiyun #endif /* CONFIG_SMP */ 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun extern cpumask_t cpu_core_map[NR_CPUS]; 54*4882a593Smuzhiyun extern cpumask_t cpu_core_sib_map[NR_CPUS]; 55*4882a593Smuzhiyun extern cpumask_t cpu_core_sib_cache_map[NR_CPUS]; 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun /** 58*4882a593Smuzhiyun * Return cores that shares the last level cache. 59*4882a593Smuzhiyun */ cpu_coregroup_mask(int cpu)60*4882a593Smuzhiyunstatic inline const struct cpumask *cpu_coregroup_mask(int cpu) 61*4882a593Smuzhiyun { 62*4882a593Smuzhiyun return &cpu_core_sib_cache_map[cpu]; 63*4882a593Smuzhiyun } 64*4882a593Smuzhiyun 65*4882a593Smuzhiyun #endif /* _ASM_SPARC64_TOPOLOGY_H */ 66