1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include "cpumap.h"
3*4882a593Smuzhiyun #include "debug.h"
4*4882a593Smuzhiyun #include "env.h"
5*4882a593Smuzhiyun #include "util/header.h"
6*4882a593Smuzhiyun #include <linux/ctype.h>
7*4882a593Smuzhiyun #include <linux/zalloc.h>
8*4882a593Smuzhiyun #include "bpf-event.h"
9*4882a593Smuzhiyun #include "cgroup.h"
10*4882a593Smuzhiyun #include <errno.h>
11*4882a593Smuzhiyun #include <sys/utsname.h>
12*4882a593Smuzhiyun #include <bpf/libbpf.h>
13*4882a593Smuzhiyun #include <stdlib.h>
14*4882a593Smuzhiyun #include <string.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun struct perf_env perf_env;
17*4882a593Smuzhiyun
perf_env__insert_bpf_prog_info(struct perf_env * env,struct bpf_prog_info_node * info_node)18*4882a593Smuzhiyun void perf_env__insert_bpf_prog_info(struct perf_env *env,
19*4882a593Smuzhiyun struct bpf_prog_info_node *info_node)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun __u32 prog_id = info_node->info_linear->info.id;
22*4882a593Smuzhiyun struct bpf_prog_info_node *node;
23*4882a593Smuzhiyun struct rb_node *parent = NULL;
24*4882a593Smuzhiyun struct rb_node **p;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun down_write(&env->bpf_progs.lock);
27*4882a593Smuzhiyun p = &env->bpf_progs.infos.rb_node;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun while (*p != NULL) {
30*4882a593Smuzhiyun parent = *p;
31*4882a593Smuzhiyun node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
32*4882a593Smuzhiyun if (prog_id < node->info_linear->info.id) {
33*4882a593Smuzhiyun p = &(*p)->rb_left;
34*4882a593Smuzhiyun } else if (prog_id > node->info_linear->info.id) {
35*4882a593Smuzhiyun p = &(*p)->rb_right;
36*4882a593Smuzhiyun } else {
37*4882a593Smuzhiyun pr_debug("duplicated bpf prog info %u\n", prog_id);
38*4882a593Smuzhiyun goto out;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun rb_link_node(&info_node->rb_node, parent, p);
43*4882a593Smuzhiyun rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
44*4882a593Smuzhiyun env->bpf_progs.infos_cnt++;
45*4882a593Smuzhiyun out:
46*4882a593Smuzhiyun up_write(&env->bpf_progs.lock);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
perf_env__find_bpf_prog_info(struct perf_env * env,__u32 prog_id)49*4882a593Smuzhiyun struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
50*4882a593Smuzhiyun __u32 prog_id)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun struct bpf_prog_info_node *node = NULL;
53*4882a593Smuzhiyun struct rb_node *n;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun down_read(&env->bpf_progs.lock);
56*4882a593Smuzhiyun n = env->bpf_progs.infos.rb_node;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun while (n) {
59*4882a593Smuzhiyun node = rb_entry(n, struct bpf_prog_info_node, rb_node);
60*4882a593Smuzhiyun if (prog_id < node->info_linear->info.id)
61*4882a593Smuzhiyun n = n->rb_left;
62*4882a593Smuzhiyun else if (prog_id > node->info_linear->info.id)
63*4882a593Smuzhiyun n = n->rb_right;
64*4882a593Smuzhiyun else
65*4882a593Smuzhiyun goto out;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun node = NULL;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun out:
70*4882a593Smuzhiyun up_read(&env->bpf_progs.lock);
71*4882a593Smuzhiyun return node;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
perf_env__insert_btf(struct perf_env * env,struct btf_node * btf_node)74*4882a593Smuzhiyun bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct rb_node *parent = NULL;
77*4882a593Smuzhiyun __u32 btf_id = btf_node->id;
78*4882a593Smuzhiyun struct btf_node *node;
79*4882a593Smuzhiyun struct rb_node **p;
80*4882a593Smuzhiyun bool ret = true;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun down_write(&env->bpf_progs.lock);
83*4882a593Smuzhiyun p = &env->bpf_progs.btfs.rb_node;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun while (*p != NULL) {
86*4882a593Smuzhiyun parent = *p;
87*4882a593Smuzhiyun node = rb_entry(parent, struct btf_node, rb_node);
88*4882a593Smuzhiyun if (btf_id < node->id) {
89*4882a593Smuzhiyun p = &(*p)->rb_left;
90*4882a593Smuzhiyun } else if (btf_id > node->id) {
91*4882a593Smuzhiyun p = &(*p)->rb_right;
92*4882a593Smuzhiyun } else {
93*4882a593Smuzhiyun pr_debug("duplicated btf %u\n", btf_id);
94*4882a593Smuzhiyun ret = false;
95*4882a593Smuzhiyun goto out;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun rb_link_node(&btf_node->rb_node, parent, p);
100*4882a593Smuzhiyun rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
101*4882a593Smuzhiyun env->bpf_progs.btfs_cnt++;
102*4882a593Smuzhiyun out:
103*4882a593Smuzhiyun up_write(&env->bpf_progs.lock);
104*4882a593Smuzhiyun return ret;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
perf_env__find_btf(struct perf_env * env,__u32 btf_id)107*4882a593Smuzhiyun struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct btf_node *node = NULL;
110*4882a593Smuzhiyun struct rb_node *n;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun down_read(&env->bpf_progs.lock);
113*4882a593Smuzhiyun n = env->bpf_progs.btfs.rb_node;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun while (n) {
116*4882a593Smuzhiyun node = rb_entry(n, struct btf_node, rb_node);
117*4882a593Smuzhiyun if (btf_id < node->id)
118*4882a593Smuzhiyun n = n->rb_left;
119*4882a593Smuzhiyun else if (btf_id > node->id)
120*4882a593Smuzhiyun n = n->rb_right;
121*4882a593Smuzhiyun else
122*4882a593Smuzhiyun goto out;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun node = NULL;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun out:
127*4882a593Smuzhiyun up_read(&env->bpf_progs.lock);
128*4882a593Smuzhiyun return node;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* purge data in bpf_progs.infos tree */
perf_env__purge_bpf(struct perf_env * env)132*4882a593Smuzhiyun static void perf_env__purge_bpf(struct perf_env *env)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct rb_root *root;
135*4882a593Smuzhiyun struct rb_node *next;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun down_write(&env->bpf_progs.lock);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun root = &env->bpf_progs.infos;
140*4882a593Smuzhiyun next = rb_first(root);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun while (next) {
143*4882a593Smuzhiyun struct bpf_prog_info_node *node;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun node = rb_entry(next, struct bpf_prog_info_node, rb_node);
146*4882a593Smuzhiyun next = rb_next(&node->rb_node);
147*4882a593Smuzhiyun rb_erase(&node->rb_node, root);
148*4882a593Smuzhiyun free(node->info_linear);
149*4882a593Smuzhiyun free(node);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun env->bpf_progs.infos_cnt = 0;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun root = &env->bpf_progs.btfs;
155*4882a593Smuzhiyun next = rb_first(root);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun while (next) {
158*4882a593Smuzhiyun struct btf_node *node;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun node = rb_entry(next, struct btf_node, rb_node);
161*4882a593Smuzhiyun next = rb_next(&node->rb_node);
162*4882a593Smuzhiyun rb_erase(&node->rb_node, root);
163*4882a593Smuzhiyun free(node);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun env->bpf_progs.btfs_cnt = 0;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun up_write(&env->bpf_progs.lock);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
perf_env__exit(struct perf_env * env)171*4882a593Smuzhiyun void perf_env__exit(struct perf_env *env)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun int i;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun perf_env__purge_bpf(env);
176*4882a593Smuzhiyun perf_env__purge_cgroups(env);
177*4882a593Smuzhiyun zfree(&env->hostname);
178*4882a593Smuzhiyun zfree(&env->os_release);
179*4882a593Smuzhiyun zfree(&env->version);
180*4882a593Smuzhiyun zfree(&env->arch);
181*4882a593Smuzhiyun zfree(&env->cpu_desc);
182*4882a593Smuzhiyun zfree(&env->cpuid);
183*4882a593Smuzhiyun zfree(&env->cmdline);
184*4882a593Smuzhiyun zfree(&env->cmdline_argv);
185*4882a593Smuzhiyun zfree(&env->sibling_dies);
186*4882a593Smuzhiyun zfree(&env->sibling_cores);
187*4882a593Smuzhiyun zfree(&env->sibling_threads);
188*4882a593Smuzhiyun zfree(&env->pmu_mappings);
189*4882a593Smuzhiyun zfree(&env->cpu);
190*4882a593Smuzhiyun zfree(&env->cpu_pmu_caps);
191*4882a593Smuzhiyun zfree(&env->numa_map);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun for (i = 0; i < env->nr_numa_nodes; i++)
194*4882a593Smuzhiyun perf_cpu_map__put(env->numa_nodes[i].map);
195*4882a593Smuzhiyun zfree(&env->numa_nodes);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun for (i = 0; i < env->caches_cnt; i++)
198*4882a593Smuzhiyun cpu_cache_level__free(&env->caches[i]);
199*4882a593Smuzhiyun zfree(&env->caches);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun for (i = 0; i < env->nr_memory_nodes; i++)
202*4882a593Smuzhiyun zfree(&env->memory_nodes[i].set);
203*4882a593Smuzhiyun zfree(&env->memory_nodes);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
perf_env__init(struct perf_env * env)206*4882a593Smuzhiyun void perf_env__init(struct perf_env *env)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun env->bpf_progs.infos = RB_ROOT;
209*4882a593Smuzhiyun env->bpf_progs.btfs = RB_ROOT;
210*4882a593Smuzhiyun init_rwsem(&env->bpf_progs.lock);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
perf_env__set_cmdline(struct perf_env * env,int argc,const char * argv[])213*4882a593Smuzhiyun int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun int i;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* do not include NULL termination */
218*4882a593Smuzhiyun env->cmdline_argv = calloc(argc, sizeof(char *));
219*4882a593Smuzhiyun if (env->cmdline_argv == NULL)
220*4882a593Smuzhiyun goto out_enomem;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * Must copy argv contents because it gets moved around during option
224*4882a593Smuzhiyun * parsing:
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun for (i = 0; i < argc ; i++) {
227*4882a593Smuzhiyun env->cmdline_argv[i] = argv[i];
228*4882a593Smuzhiyun if (env->cmdline_argv[i] == NULL)
229*4882a593Smuzhiyun goto out_free;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun env->nr_cmdline = argc;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun return 0;
235*4882a593Smuzhiyun out_free:
236*4882a593Smuzhiyun zfree(&env->cmdline_argv);
237*4882a593Smuzhiyun out_enomem:
238*4882a593Smuzhiyun return -ENOMEM;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
perf_env__read_cpu_topology_map(struct perf_env * env)241*4882a593Smuzhiyun int perf_env__read_cpu_topology_map(struct perf_env *env)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun int cpu, nr_cpus;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (env->cpu != NULL)
246*4882a593Smuzhiyun return 0;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (env->nr_cpus_avail == 0)
249*4882a593Smuzhiyun env->nr_cpus_avail = cpu__max_present_cpu();
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun nr_cpus = env->nr_cpus_avail;
252*4882a593Smuzhiyun if (nr_cpus == -1)
253*4882a593Smuzhiyun return -EINVAL;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
256*4882a593Smuzhiyun if (env->cpu == NULL)
257*4882a593Smuzhiyun return -ENOMEM;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun for (cpu = 0; cpu < nr_cpus; ++cpu) {
260*4882a593Smuzhiyun env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
261*4882a593Smuzhiyun env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
262*4882a593Smuzhiyun env->cpu[cpu].die_id = cpu_map__get_die_id(cpu);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun env->nr_cpus_avail = nr_cpus;
266*4882a593Smuzhiyun return 0;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
perf_env__read_cpuid(struct perf_env * env)269*4882a593Smuzhiyun int perf_env__read_cpuid(struct perf_env *env)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun char cpuid[128];
272*4882a593Smuzhiyun int err = get_cpuid(cpuid, sizeof(cpuid));
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (err)
275*4882a593Smuzhiyun return err;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun free(env->cpuid);
278*4882a593Smuzhiyun env->cpuid = strdup(cpuid);
279*4882a593Smuzhiyun if (env->cpuid == NULL)
280*4882a593Smuzhiyun return ENOMEM;
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
perf_env__read_arch(struct perf_env * env)284*4882a593Smuzhiyun static int perf_env__read_arch(struct perf_env *env)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct utsname uts;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (env->arch)
289*4882a593Smuzhiyun return 0;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (!uname(&uts))
292*4882a593Smuzhiyun env->arch = strdup(uts.machine);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun return env->arch ? 0 : -ENOMEM;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
perf_env__read_nr_cpus_avail(struct perf_env * env)297*4882a593Smuzhiyun static int perf_env__read_nr_cpus_avail(struct perf_env *env)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun if (env->nr_cpus_avail == 0)
300*4882a593Smuzhiyun env->nr_cpus_avail = cpu__max_present_cpu();
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun return env->nr_cpus_avail ? 0 : -ENOENT;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
perf_env__raw_arch(struct perf_env * env)305*4882a593Smuzhiyun const char *perf_env__raw_arch(struct perf_env *env)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun return env && !perf_env__read_arch(env) ? env->arch : "unknown";
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
perf_env__nr_cpus_avail(struct perf_env * env)310*4882a593Smuzhiyun int perf_env__nr_cpus_avail(struct perf_env *env)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
cpu_cache_level__free(struct cpu_cache_level * cache)315*4882a593Smuzhiyun void cpu_cache_level__free(struct cpu_cache_level *cache)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun zfree(&cache->type);
318*4882a593Smuzhiyun zfree(&cache->map);
319*4882a593Smuzhiyun zfree(&cache->size);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /*
323*4882a593Smuzhiyun * Return architecture name in a normalized form.
324*4882a593Smuzhiyun * The conversion logic comes from the Makefile.
325*4882a593Smuzhiyun */
normalize_arch(char * arch)326*4882a593Smuzhiyun static const char *normalize_arch(char *arch)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun if (!strcmp(arch, "x86_64"))
329*4882a593Smuzhiyun return "x86";
330*4882a593Smuzhiyun if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
331*4882a593Smuzhiyun return "x86";
332*4882a593Smuzhiyun if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
333*4882a593Smuzhiyun return "sparc";
334*4882a593Smuzhiyun if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
335*4882a593Smuzhiyun return "arm64";
336*4882a593Smuzhiyun if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
337*4882a593Smuzhiyun return "arm";
338*4882a593Smuzhiyun if (!strncmp(arch, "s390", 4))
339*4882a593Smuzhiyun return "s390";
340*4882a593Smuzhiyun if (!strncmp(arch, "parisc", 6))
341*4882a593Smuzhiyun return "parisc";
342*4882a593Smuzhiyun if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
343*4882a593Smuzhiyun return "powerpc";
344*4882a593Smuzhiyun if (!strncmp(arch, "mips", 4))
345*4882a593Smuzhiyun return "mips";
346*4882a593Smuzhiyun if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
347*4882a593Smuzhiyun return "sh";
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun return arch;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
perf_env__arch(struct perf_env * env)352*4882a593Smuzhiyun const char *perf_env__arch(struct perf_env *env)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun char *arch_name;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (!env || !env->arch) { /* Assume local operation */
357*4882a593Smuzhiyun static struct utsname uts = { .machine[0] = '\0', };
358*4882a593Smuzhiyun if (uts.machine[0] == '\0' && uname(&uts) < 0)
359*4882a593Smuzhiyun return NULL;
360*4882a593Smuzhiyun arch_name = uts.machine;
361*4882a593Smuzhiyun } else
362*4882a593Smuzhiyun arch_name = env->arch;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return normalize_arch(arch_name);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun
perf_env__numa_node(struct perf_env * env,int cpu)368*4882a593Smuzhiyun int perf_env__numa_node(struct perf_env *env, int cpu)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun if (!env->nr_numa_map) {
371*4882a593Smuzhiyun struct numa_node *nn;
372*4882a593Smuzhiyun int i, nr = 0;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun for (i = 0; i < env->nr_numa_nodes; i++) {
375*4882a593Smuzhiyun nn = &env->numa_nodes[i];
376*4882a593Smuzhiyun nr = max(nr, perf_cpu_map__max(nn->map));
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun nr++;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /*
382*4882a593Smuzhiyun * We initialize the numa_map array to prepare
383*4882a593Smuzhiyun * it for missing cpus, which return node -1
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun env->numa_map = malloc(nr * sizeof(int));
386*4882a593Smuzhiyun if (!env->numa_map)
387*4882a593Smuzhiyun return -1;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun for (i = 0; i < nr; i++)
390*4882a593Smuzhiyun env->numa_map[i] = -1;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun env->nr_numa_map = nr;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun for (i = 0; i < env->nr_numa_nodes; i++) {
395*4882a593Smuzhiyun int tmp, j;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun nn = &env->numa_nodes[i];
398*4882a593Smuzhiyun perf_cpu_map__for_each_cpu(j, tmp, nn->map)
399*4882a593Smuzhiyun env->numa_map[j] = i;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
404*4882a593Smuzhiyun }
405