1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Written by: Matthew Dobson, IBM Corporation
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2002, IBM Corp.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * All rights reserved.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
9*4882a593Smuzhiyun * it under the terms of the GNU General Public License as published by
10*4882a593Smuzhiyun * the Free Software Foundation; either version 2 of the License, or
11*4882a593Smuzhiyun * (at your option) any later version.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16*4882a593Smuzhiyun * NON INFRINGEMENT. See the GNU General Public License for more
17*4882a593Smuzhiyun * details.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
20*4882a593Smuzhiyun * along with this program; if not, write to the Free Software
21*4882a593Smuzhiyun * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Send feedback to <colpatch@us.ibm.com>
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun #ifndef _ASM_X86_TOPOLOGY_H
26*4882a593Smuzhiyun #define _ASM_X86_TOPOLOGY_H
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * to preserve the visibility of NUMA_NO_NODE definition,
30*4882a593Smuzhiyun * moved to there from here. May be used independent of
31*4882a593Smuzhiyun * CONFIG_NUMA.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun #include <linux/numa.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #ifdef CONFIG_NUMA
36*4882a593Smuzhiyun #include <linux/cpumask.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <asm/mpspec.h>
39*4882a593Smuzhiyun #include <asm/percpu.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* Mappings between logical cpu number and node number */
42*4882a593Smuzhiyun DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_PER_CPU_MAPS
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * override generic percpu implementation of cpu_to_node
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun extern int __cpu_to_node(int cpu);
49*4882a593Smuzhiyun #define cpu_to_node __cpu_to_node
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun extern int early_cpu_to_node(int cpu);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* Same function but used if called before per_cpu areas are setup */
early_cpu_to_node(int cpu)56*4882a593Smuzhiyun static inline int early_cpu_to_node(int cpu)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun return early_per_cpu(x86_cpu_to_node_map, cpu);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* Mappings between node number and cpus on that node. */
64*4882a593Smuzhiyun extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_PER_CPU_MAPS
67*4882a593Smuzhiyun extern const struct cpumask *cpumask_of_node(int node);
68*4882a593Smuzhiyun #else
69*4882a593Smuzhiyun /* Returns a pointer to the cpumask of CPUs on Node 'node'. */
cpumask_of_node(int node)70*4882a593Smuzhiyun static inline const struct cpumask *cpumask_of_node(int node)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun return node_to_cpumask_map[node];
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun extern void setup_node_to_cpumask_map(void);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define pcibus_to_node(bus) __pcibus_to_node(bus)
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun extern int __node_distance(int, int);
81*4882a593Smuzhiyun #define node_distance(a, b) __node_distance(a, b)
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #else /* !CONFIG_NUMA */
84*4882a593Smuzhiyun
numa_node_id(void)85*4882a593Smuzhiyun static inline int numa_node_id(void)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * indicate override:
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun #define numa_node_id numa_node_id
93*4882a593Smuzhiyun
early_cpu_to_node(int cpu)94*4882a593Smuzhiyun static inline int early_cpu_to_node(int cpu)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
setup_node_to_cpumask_map(void)99*4882a593Smuzhiyun static inline void setup_node_to_cpumask_map(void) { }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #endif
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #include <asm-generic/topology.h>
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun extern const struct cpumask *cpu_coregroup_mask(int cpu);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun #define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id)
108*4882a593Smuzhiyun #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
109*4882a593Smuzhiyun #define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id)
110*4882a593Smuzhiyun #define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
111*4882a593Smuzhiyun #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun extern unsigned int __max_die_per_package;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #ifdef CONFIG_SMP
116*4882a593Smuzhiyun #define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
117*4882a593Smuzhiyun #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
118*4882a593Smuzhiyun #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun extern unsigned int __max_logical_packages;
121*4882a593Smuzhiyun #define topology_max_packages() (__max_logical_packages)
122*4882a593Smuzhiyun
topology_max_die_per_package(void)123*4882a593Smuzhiyun static inline int topology_max_die_per_package(void)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun return __max_die_per_package;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun extern int __max_smt_threads;
129*4882a593Smuzhiyun
topology_max_smt_threads(void)130*4882a593Smuzhiyun static inline int topology_max_smt_threads(void)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun return __max_smt_threads;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun int topology_update_package_map(unsigned int apicid, unsigned int cpu);
136*4882a593Smuzhiyun int topology_update_die_map(unsigned int dieid, unsigned int cpu);
137*4882a593Smuzhiyun int topology_phys_to_logical_pkg(unsigned int pkg);
138*4882a593Smuzhiyun int topology_phys_to_logical_die(unsigned int die, unsigned int cpu);
139*4882a593Smuzhiyun bool topology_is_primary_thread(unsigned int cpu);
140*4882a593Smuzhiyun bool topology_smt_supported(void);
141*4882a593Smuzhiyun #else
142*4882a593Smuzhiyun #define topology_max_packages() (1)
143*4882a593Smuzhiyun static inline int
topology_update_package_map(unsigned int apicid,unsigned int cpu)144*4882a593Smuzhiyun topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
145*4882a593Smuzhiyun static inline int
topology_update_die_map(unsigned int dieid,unsigned int cpu)146*4882a593Smuzhiyun topology_update_die_map(unsigned int dieid, unsigned int cpu) { return 0; }
topology_phys_to_logical_pkg(unsigned int pkg)147*4882a593Smuzhiyun static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
topology_phys_to_logical_die(unsigned int die,unsigned int cpu)148*4882a593Smuzhiyun static inline int topology_phys_to_logical_die(unsigned int die,
149*4882a593Smuzhiyun unsigned int cpu) { return 0; }
topology_max_die_per_package(void)150*4882a593Smuzhiyun static inline int topology_max_die_per_package(void) { return 1; }
topology_max_smt_threads(void)151*4882a593Smuzhiyun static inline int topology_max_smt_threads(void) { return 1; }
topology_is_primary_thread(unsigned int cpu)152*4882a593Smuzhiyun static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
topology_smt_supported(void)153*4882a593Smuzhiyun static inline bool topology_smt_supported(void) { return false; }
154*4882a593Smuzhiyun #endif
155*4882a593Smuzhiyun
arch_fix_phys_package_id(int num,u32 slot)156*4882a593Smuzhiyun static inline void arch_fix_phys_package_id(int num, u32 slot)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun struct pci_bus;
161*4882a593Smuzhiyun int x86_pci_root_bus_node(int bus);
162*4882a593Smuzhiyun void x86_pci_root_bus_resources(int bus, struct list_head *resources);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun extern bool x86_topology_update;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun #ifdef CONFIG_SCHED_MC_PRIO
167*4882a593Smuzhiyun #include <asm/percpu.h>
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun DECLARE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
170*4882a593Smuzhiyun extern unsigned int __read_mostly sysctl_sched_itmt_enabled;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* Interface to set priority of a cpu */
173*4882a593Smuzhiyun void sched_set_itmt_core_prio(int prio, int core_cpu);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Interface to notify scheduler that system supports ITMT */
176*4882a593Smuzhiyun int sched_set_itmt_support(void);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* Interface to notify scheduler that system revokes ITMT support */
179*4882a593Smuzhiyun void sched_clear_itmt_support(void);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun #else /* CONFIG_SCHED_MC_PRIO */
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun #define sysctl_sched_itmt_enabled 0
sched_set_itmt_core_prio(int prio,int core_cpu)184*4882a593Smuzhiyun static inline void sched_set_itmt_core_prio(int prio, int core_cpu)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun }
sched_set_itmt_support(void)187*4882a593Smuzhiyun static inline int sched_set_itmt_support(void)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun return 0;
190*4882a593Smuzhiyun }
sched_clear_itmt_support(void)191*4882a593Smuzhiyun static inline void sched_clear_itmt_support(void)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun #endif /* CONFIG_SCHED_MC_PRIO */
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun #if defined(CONFIG_SMP) && defined(CONFIG_X86_64)
197*4882a593Smuzhiyun #include <asm/cpufeature.h>
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(arch_scale_freq_key);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun #define arch_scale_freq_invariant() static_branch_likely(&arch_scale_freq_key)
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun DECLARE_PER_CPU(unsigned long, arch_freq_scale);
204*4882a593Smuzhiyun
arch_scale_freq_capacity(int cpu)205*4882a593Smuzhiyun static inline long arch_scale_freq_capacity(int cpu)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun return per_cpu(arch_freq_scale, cpu);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun #define arch_scale_freq_capacity arch_scale_freq_capacity
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun extern void arch_scale_freq_tick(void);
212*4882a593Smuzhiyun #define arch_scale_freq_tick arch_scale_freq_tick
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun extern void arch_set_max_freq_ratio(bool turbo_disabled);
215*4882a593Smuzhiyun #else
arch_set_max_freq_ratio(bool turbo_disabled)216*4882a593Smuzhiyun static inline void arch_set_max_freq_ratio(bool turbo_disabled)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun #endif
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun #endif /* _ASM_X86_TOPOLOGY_H */
222