1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <api/fs/fs.h>
3*4882a593Smuzhiyun #include "cpumap.h"
4*4882a593Smuzhiyun #include "debug.h"
5*4882a593Smuzhiyun #include "event.h"
6*4882a593Smuzhiyun #include <assert.h>
7*4882a593Smuzhiyun #include <dirent.h>
8*4882a593Smuzhiyun #include <stdio.h>
9*4882a593Smuzhiyun #include <stdlib.h>
10*4882a593Smuzhiyun #include <linux/bitmap.h>
11*4882a593Smuzhiyun #include "asm/bug.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/ctype.h>
14*4882a593Smuzhiyun #include <linux/zalloc.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun static int max_cpu_num;
17*4882a593Smuzhiyun static int max_present_cpu_num;
18*4882a593Smuzhiyun static int max_node_num;
19*4882a593Smuzhiyun static int *cpunode_map;
20*4882a593Smuzhiyun
cpu_map__from_entries(struct cpu_map_entries * cpus)21*4882a593Smuzhiyun static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun struct perf_cpu_map *map;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun map = perf_cpu_map__empty_new(cpus->nr);
26*4882a593Smuzhiyun if (map) {
27*4882a593Smuzhiyun unsigned i;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun for (i = 0; i < cpus->nr; i++) {
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * Special treatment for -1, which is not real cpu number,
32*4882a593Smuzhiyun * and we need to use (int) -1 to initialize map[i],
33*4882a593Smuzhiyun * otherwise it would become 65535.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun if (cpus->cpu[i] == (u16) -1)
36*4882a593Smuzhiyun map->map[i] = -1;
37*4882a593Smuzhiyun else
38*4882a593Smuzhiyun map->map[i] = (int) cpus->cpu[i];
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun return map;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
cpu_map__from_mask(struct perf_record_record_cpu_map * mask)45*4882a593Smuzhiyun static struct perf_cpu_map *cpu_map__from_mask(struct perf_record_record_cpu_map *mask)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct perf_cpu_map *map;
48*4882a593Smuzhiyun int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun nr = bitmap_weight(mask->mask, nbits);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun map = perf_cpu_map__empty_new(nr);
53*4882a593Smuzhiyun if (map) {
54*4882a593Smuzhiyun int cpu, i = 0;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun for_each_set_bit(cpu, mask->mask, nbits)
57*4882a593Smuzhiyun map->map[i++] = cpu;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun return map;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
cpu_map__new_data(struct perf_record_cpu_map_data * data)63*4882a593Smuzhiyun struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun if (data->type == PERF_CPU_MAP__CPUS)
66*4882a593Smuzhiyun return cpu_map__from_entries((struct cpu_map_entries *)data->data);
67*4882a593Smuzhiyun else
68*4882a593Smuzhiyun return cpu_map__from_mask((struct perf_record_record_cpu_map *)data->data);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
cpu_map__fprintf(struct perf_cpu_map * map,FILE * fp)71*4882a593Smuzhiyun size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun #define BUFSIZE 1024
74*4882a593Smuzhiyun char buf[BUFSIZE];
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun cpu_map__snprint(map, buf, sizeof(buf));
77*4882a593Smuzhiyun return fprintf(fp, "%s\n", buf);
78*4882a593Smuzhiyun #undef BUFSIZE
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
perf_cpu_map__empty_new(int nr)81*4882a593Smuzhiyun struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (cpus != NULL) {
86*4882a593Smuzhiyun int i;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun cpus->nr = nr;
89*4882a593Smuzhiyun for (i = 0; i < nr; i++)
90*4882a593Smuzhiyun cpus->map[i] = -1;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun refcount_set(&cpus->refcnt, 1);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun return cpus;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
cpu__get_topology_int(int cpu,const char * name,int * value)98*4882a593Smuzhiyun static int cpu__get_topology_int(int cpu, const char *name, int *value)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun char path[PATH_MAX];
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun snprintf(path, PATH_MAX,
103*4882a593Smuzhiyun "devices/system/cpu/cpu%d/topology/%s", cpu, name);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun return sysfs__read_int(path, value);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
cpu_map__get_socket_id(int cpu)108*4882a593Smuzhiyun int cpu_map__get_socket_id(int cpu)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
111*4882a593Smuzhiyun return ret ?: value;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
cpu_map__get_socket(struct perf_cpu_map * map,int idx,void * data __maybe_unused)114*4882a593Smuzhiyun int cpu_map__get_socket(struct perf_cpu_map *map, int idx, void *data __maybe_unused)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun int cpu;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (idx > map->nr)
119*4882a593Smuzhiyun return -1;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun cpu = map->map[idx];
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return cpu_map__get_socket_id(cpu);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
cmp_ids(const void * a,const void * b)126*4882a593Smuzhiyun static int cmp_ids(const void *a, const void *b)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun return *(int *)a - *(int *)b;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
cpu_map__build_map(struct perf_cpu_map * cpus,struct perf_cpu_map ** res,int (* f)(struct perf_cpu_map * map,int cpu,void * data),void * data)131*4882a593Smuzhiyun int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res,
132*4882a593Smuzhiyun int (*f)(struct perf_cpu_map *map, int cpu, void *data),
133*4882a593Smuzhiyun void *data)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun struct perf_cpu_map *c;
136*4882a593Smuzhiyun int nr = cpus->nr;
137*4882a593Smuzhiyun int cpu, s1, s2;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* allocate as much as possible */
140*4882a593Smuzhiyun c = calloc(1, sizeof(*c) + nr * sizeof(int));
141*4882a593Smuzhiyun if (!c)
142*4882a593Smuzhiyun return -1;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun for (cpu = 0; cpu < nr; cpu++) {
145*4882a593Smuzhiyun s1 = f(cpus, cpu, data);
146*4882a593Smuzhiyun for (s2 = 0; s2 < c->nr; s2++) {
147*4882a593Smuzhiyun if (s1 == c->map[s2])
148*4882a593Smuzhiyun break;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun if (s2 == c->nr) {
151*4882a593Smuzhiyun c->map[c->nr] = s1;
152*4882a593Smuzhiyun c->nr++;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun /* ensure we process id in increasing order */
156*4882a593Smuzhiyun qsort(c->map, c->nr, sizeof(int), cmp_ids);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun refcount_set(&c->refcnt, 1);
159*4882a593Smuzhiyun *res = c;
160*4882a593Smuzhiyun return 0;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
cpu_map__get_die_id(int cpu)163*4882a593Smuzhiyun int cpu_map__get_die_id(int cpu)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun int value, ret = cpu__get_topology_int(cpu, "die_id", &value);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun return ret ?: value;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
cpu_map__get_die(struct perf_cpu_map * map,int idx,void * data)170*4882a593Smuzhiyun int cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun int cpu, die_id, s;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (idx > map->nr)
175*4882a593Smuzhiyun return -1;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun cpu = map->map[idx];
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun die_id = cpu_map__get_die_id(cpu);
180*4882a593Smuzhiyun /* There is no die_id on legacy system. */
181*4882a593Smuzhiyun if (die_id == -1)
182*4882a593Smuzhiyun die_id = 0;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun s = cpu_map__get_socket(map, idx, data);
185*4882a593Smuzhiyun if (s == -1)
186*4882a593Smuzhiyun return -1;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * Encode socket in bit range 15:8
190*4882a593Smuzhiyun * die_id is relative to socket, and
191*4882a593Smuzhiyun * we need a global id. So we combine
192*4882a593Smuzhiyun * socket + die id
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun if (WARN_ONCE(die_id >> 8, "The die id number is too big.\n"))
195*4882a593Smuzhiyun return -1;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (WARN_ONCE(s >> 8, "The socket id number is too big.\n"))
198*4882a593Smuzhiyun return -1;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun return (s << 8) | (die_id & 0xff);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
cpu_map__get_core_id(int cpu)203*4882a593Smuzhiyun int cpu_map__get_core_id(int cpu)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
206*4882a593Smuzhiyun return ret ?: value;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
cpu_map__get_node_id(int cpu)209*4882a593Smuzhiyun int cpu_map__get_node_id(int cpu)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun return cpu__get_node(cpu);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
cpu_map__get_core(struct perf_cpu_map * map,int idx,void * data)214*4882a593Smuzhiyun int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun int cpu, s_die;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (idx > map->nr)
219*4882a593Smuzhiyun return -1;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun cpu = map->map[idx];
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun cpu = cpu_map__get_core_id(cpu);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* s_die is the combination of socket + die id */
226*4882a593Smuzhiyun s_die = cpu_map__get_die(map, idx, data);
227*4882a593Smuzhiyun if (s_die == -1)
228*4882a593Smuzhiyun return -1;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * encode socket in bit range 31:24
232*4882a593Smuzhiyun * encode die id in bit range 23:16
233*4882a593Smuzhiyun * core_id is relative to socket and die,
234*4882a593Smuzhiyun * we need a global id. So we combine
235*4882a593Smuzhiyun * socket + die id + core id
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun if (WARN_ONCE(cpu >> 16, "The core id number is too big.\n"))
238*4882a593Smuzhiyun return -1;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return (s_die << 16) | (cpu & 0xffff);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
cpu_map__get_node(struct perf_cpu_map * map,int idx,void * data __maybe_unused)243*4882a593Smuzhiyun int cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data __maybe_unused)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun if (idx < 0 || idx >= map->nr)
246*4882a593Smuzhiyun return -1;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun return cpu_map__get_node_id(map->map[idx]);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
cpu_map__build_socket_map(struct perf_cpu_map * cpus,struct perf_cpu_map ** sockp)251*4882a593Smuzhiyun int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct perf_cpu_map **sockp)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
cpu_map__build_die_map(struct perf_cpu_map * cpus,struct perf_cpu_map ** diep)256*4882a593Smuzhiyun int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct perf_cpu_map **diep)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun return cpu_map__build_map(cpus, diep, cpu_map__get_die, NULL);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
cpu_map__build_core_map(struct perf_cpu_map * cpus,struct perf_cpu_map ** corep)261*4882a593Smuzhiyun int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **corep)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
cpu_map__build_node_map(struct perf_cpu_map * cpus,struct perf_cpu_map ** numap)266*4882a593Smuzhiyun int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct perf_cpu_map **numap)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun return cpu_map__build_map(cpus, numap, cpu_map__get_node, NULL);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /* setup simple routines to easily access node numbers given a cpu number */
get_max_num(char * path,int * max)272*4882a593Smuzhiyun static int get_max_num(char *path, int *max)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun size_t num;
275*4882a593Smuzhiyun char *buf;
276*4882a593Smuzhiyun int err = 0;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (filename__read_str(path, &buf, &num))
279*4882a593Smuzhiyun return -1;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun buf[num] = '\0';
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* start on the right, to find highest node num */
284*4882a593Smuzhiyun while (--num) {
285*4882a593Smuzhiyun if ((buf[num] == ',') || (buf[num] == '-')) {
286*4882a593Smuzhiyun num++;
287*4882a593Smuzhiyun break;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun if (sscanf(&buf[num], "%d", max) < 1) {
291*4882a593Smuzhiyun err = -1;
292*4882a593Smuzhiyun goto out;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* convert from 0-based to 1-based */
296*4882a593Smuzhiyun (*max)++;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun out:
299*4882a593Smuzhiyun free(buf);
300*4882a593Smuzhiyun return err;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* Determine highest possible cpu in the system for sparse allocation */
set_max_cpu_num(void)304*4882a593Smuzhiyun static void set_max_cpu_num(void)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun const char *mnt;
307*4882a593Smuzhiyun char path[PATH_MAX];
308*4882a593Smuzhiyun int ret = -1;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* set up default */
311*4882a593Smuzhiyun max_cpu_num = 4096;
312*4882a593Smuzhiyun max_present_cpu_num = 4096;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun mnt = sysfs__mountpoint();
315*4882a593Smuzhiyun if (!mnt)
316*4882a593Smuzhiyun goto out;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* get the highest possible cpu number for a sparse allocation */
319*4882a593Smuzhiyun ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
320*4882a593Smuzhiyun if (ret >= PATH_MAX) {
321*4882a593Smuzhiyun pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
322*4882a593Smuzhiyun goto out;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun ret = get_max_num(path, &max_cpu_num);
326*4882a593Smuzhiyun if (ret)
327*4882a593Smuzhiyun goto out;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* get the highest present cpu number for a sparse allocation */
330*4882a593Smuzhiyun ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
331*4882a593Smuzhiyun if (ret >= PATH_MAX) {
332*4882a593Smuzhiyun pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
333*4882a593Smuzhiyun goto out;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun ret = get_max_num(path, &max_present_cpu_num);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun out:
339*4882a593Smuzhiyun if (ret)
340*4882a593Smuzhiyun pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* Determine highest possible node in the system for sparse allocation */
set_max_node_num(void)344*4882a593Smuzhiyun static void set_max_node_num(void)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun const char *mnt;
347*4882a593Smuzhiyun char path[PATH_MAX];
348*4882a593Smuzhiyun int ret = -1;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* set up default */
351*4882a593Smuzhiyun max_node_num = 8;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun mnt = sysfs__mountpoint();
354*4882a593Smuzhiyun if (!mnt)
355*4882a593Smuzhiyun goto out;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* get the highest possible cpu number for a sparse allocation */
358*4882a593Smuzhiyun ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
359*4882a593Smuzhiyun if (ret >= PATH_MAX) {
360*4882a593Smuzhiyun pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
361*4882a593Smuzhiyun goto out;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun ret = get_max_num(path, &max_node_num);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun out:
367*4882a593Smuzhiyun if (ret)
368*4882a593Smuzhiyun pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
cpu__max_node(void)371*4882a593Smuzhiyun int cpu__max_node(void)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun if (unlikely(!max_node_num))
374*4882a593Smuzhiyun set_max_node_num();
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun return max_node_num;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
cpu__max_cpu(void)379*4882a593Smuzhiyun int cpu__max_cpu(void)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun if (unlikely(!max_cpu_num))
382*4882a593Smuzhiyun set_max_cpu_num();
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun return max_cpu_num;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
cpu__max_present_cpu(void)387*4882a593Smuzhiyun int cpu__max_present_cpu(void)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun if (unlikely(!max_present_cpu_num))
390*4882a593Smuzhiyun set_max_cpu_num();
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun return max_present_cpu_num;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun
cpu__get_node(int cpu)396*4882a593Smuzhiyun int cpu__get_node(int cpu)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun if (unlikely(cpunode_map == NULL)) {
399*4882a593Smuzhiyun pr_debug("cpu_map not initialized\n");
400*4882a593Smuzhiyun return -1;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return cpunode_map[cpu];
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
init_cpunode_map(void)406*4882a593Smuzhiyun static int init_cpunode_map(void)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun int i;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun set_max_cpu_num();
411*4882a593Smuzhiyun set_max_node_num();
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun cpunode_map = calloc(max_cpu_num, sizeof(int));
414*4882a593Smuzhiyun if (!cpunode_map) {
415*4882a593Smuzhiyun pr_err("%s: calloc failed\n", __func__);
416*4882a593Smuzhiyun return -1;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun for (i = 0; i < max_cpu_num; i++)
420*4882a593Smuzhiyun cpunode_map[i] = -1;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
cpu__setup_cpunode_map(void)425*4882a593Smuzhiyun int cpu__setup_cpunode_map(void)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun struct dirent *dent1, *dent2;
428*4882a593Smuzhiyun DIR *dir1, *dir2;
429*4882a593Smuzhiyun unsigned int cpu, mem;
430*4882a593Smuzhiyun char buf[PATH_MAX];
431*4882a593Smuzhiyun char path[PATH_MAX];
432*4882a593Smuzhiyun const char *mnt;
433*4882a593Smuzhiyun int n;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* initialize globals */
436*4882a593Smuzhiyun if (init_cpunode_map())
437*4882a593Smuzhiyun return -1;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun mnt = sysfs__mountpoint();
440*4882a593Smuzhiyun if (!mnt)
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
444*4882a593Smuzhiyun if (n >= PATH_MAX) {
445*4882a593Smuzhiyun pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
446*4882a593Smuzhiyun return -1;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun dir1 = opendir(path);
450*4882a593Smuzhiyun if (!dir1)
451*4882a593Smuzhiyun return 0;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /* walk tree and setup map */
454*4882a593Smuzhiyun while ((dent1 = readdir(dir1)) != NULL) {
455*4882a593Smuzhiyun if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
456*4882a593Smuzhiyun continue;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
459*4882a593Smuzhiyun if (n >= PATH_MAX) {
460*4882a593Smuzhiyun pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
461*4882a593Smuzhiyun continue;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun dir2 = opendir(buf);
465*4882a593Smuzhiyun if (!dir2)
466*4882a593Smuzhiyun continue;
467*4882a593Smuzhiyun while ((dent2 = readdir(dir2)) != NULL) {
468*4882a593Smuzhiyun if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
469*4882a593Smuzhiyun continue;
470*4882a593Smuzhiyun cpunode_map[cpu] = mem;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun closedir(dir2);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun closedir(dir1);
475*4882a593Smuzhiyun return 0;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
cpu_map__has(struct perf_cpu_map * cpus,int cpu)478*4882a593Smuzhiyun bool cpu_map__has(struct perf_cpu_map *cpus, int cpu)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun return perf_cpu_map__idx(cpus, cpu) != -1;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
cpu_map__cpu(struct perf_cpu_map * cpus,int idx)483*4882a593Smuzhiyun int cpu_map__cpu(struct perf_cpu_map *cpus, int idx)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun return cpus->map[idx];
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
cpu_map__snprint(struct perf_cpu_map * map,char * buf,size_t size)488*4882a593Smuzhiyun size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun int i, cpu, start = -1;
491*4882a593Smuzhiyun bool first = true;
492*4882a593Smuzhiyun size_t ret = 0;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun #define COMMA first ? "" : ","
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun for (i = 0; i < map->nr + 1; i++) {
497*4882a593Smuzhiyun bool last = i == map->nr;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun cpu = last ? INT_MAX : map->map[i];
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (start == -1) {
502*4882a593Smuzhiyun start = i;
503*4882a593Smuzhiyun if (last) {
504*4882a593Smuzhiyun ret += snprintf(buf + ret, size - ret,
505*4882a593Smuzhiyun "%s%d", COMMA,
506*4882a593Smuzhiyun map->map[i]);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun } else if (((i - start) != (cpu - map->map[start])) || last) {
509*4882a593Smuzhiyun int end = i - 1;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun if (start == end) {
512*4882a593Smuzhiyun ret += snprintf(buf + ret, size - ret,
513*4882a593Smuzhiyun "%s%d", COMMA,
514*4882a593Smuzhiyun map->map[start]);
515*4882a593Smuzhiyun } else {
516*4882a593Smuzhiyun ret += snprintf(buf + ret, size - ret,
517*4882a593Smuzhiyun "%s%d-%d", COMMA,
518*4882a593Smuzhiyun map->map[start], map->map[end]);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun first = false;
521*4882a593Smuzhiyun start = i;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun #undef COMMA
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun pr_debug2("cpumask list: %s\n", buf);
528*4882a593Smuzhiyun return ret;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
hex_char(unsigned char val)531*4882a593Smuzhiyun static char hex_char(unsigned char val)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun if (val < 10)
534*4882a593Smuzhiyun return val + '0';
535*4882a593Smuzhiyun if (val < 16)
536*4882a593Smuzhiyun return val - 10 + 'a';
537*4882a593Smuzhiyun return '?';
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
cpu_map__snprint_mask(struct perf_cpu_map * map,char * buf,size_t size)540*4882a593Smuzhiyun size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun int i, cpu;
543*4882a593Smuzhiyun char *ptr = buf;
544*4882a593Smuzhiyun unsigned char *bitmap;
545*4882a593Smuzhiyun int last_cpu = cpu_map__cpu(map, map->nr - 1);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (buf == NULL)
548*4882a593Smuzhiyun return 0;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun bitmap = zalloc(last_cpu / 8 + 1);
551*4882a593Smuzhiyun if (bitmap == NULL) {
552*4882a593Smuzhiyun buf[0] = '\0';
553*4882a593Smuzhiyun return 0;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun for (i = 0; i < map->nr; i++) {
557*4882a593Smuzhiyun cpu = cpu_map__cpu(map, i);
558*4882a593Smuzhiyun bitmap[cpu / 8] |= 1 << (cpu % 8);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) {
562*4882a593Smuzhiyun unsigned char bits = bitmap[cpu / 8];
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun if (cpu % 8)
565*4882a593Smuzhiyun bits >>= 4;
566*4882a593Smuzhiyun else
567*4882a593Smuzhiyun bits &= 0xf;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun *ptr++ = hex_char(bits);
570*4882a593Smuzhiyun if ((cpu % 32) == 0 && cpu > 0)
571*4882a593Smuzhiyun *ptr++ = ',';
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun *ptr = '\0';
574*4882a593Smuzhiyun free(bitmap);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun buf[size - 1] = '\0';
577*4882a593Smuzhiyun return ptr - buf;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
cpu_map__online(void)580*4882a593Smuzhiyun const struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun static const struct perf_cpu_map *online = NULL;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun if (!online)
585*4882a593Smuzhiyun online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun return online;
588*4882a593Smuzhiyun }
589