xref: /OK3568_Linux_fs/kernel/tools/lib/perf/cpumap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun #include <perf/cpumap.h>
3*4882a593Smuzhiyun #include <stdlib.h>
4*4882a593Smuzhiyun #include <linux/refcount.h>
5*4882a593Smuzhiyun #include <internal/cpumap.h>
6*4882a593Smuzhiyun #include <asm/bug.h>
7*4882a593Smuzhiyun #include <stdio.h>
8*4882a593Smuzhiyun #include <string.h>
9*4882a593Smuzhiyun #include <unistd.h>
10*4882a593Smuzhiyun #include <ctype.h>
11*4882a593Smuzhiyun #include <limits.h>
12*4882a593Smuzhiyun 
perf_cpu_map__dummy_new(void)13*4882a593Smuzhiyun struct perf_cpu_map *perf_cpu_map__dummy_new(void)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	if (cpus != NULL) {
18*4882a593Smuzhiyun 		cpus->nr = 1;
19*4882a593Smuzhiyun 		cpus->map[0] = -1;
20*4882a593Smuzhiyun 		refcount_set(&cpus->refcnt, 1);
21*4882a593Smuzhiyun 	}
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	return cpus;
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
cpu_map__delete(struct perf_cpu_map * map)26*4882a593Smuzhiyun static void cpu_map__delete(struct perf_cpu_map *map)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	if (map) {
29*4882a593Smuzhiyun 		WARN_ONCE(refcount_read(&map->refcnt) != 0,
30*4882a593Smuzhiyun 			  "cpu_map refcnt unbalanced\n");
31*4882a593Smuzhiyun 		free(map);
32*4882a593Smuzhiyun 	}
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
perf_cpu_map__get(struct perf_cpu_map * map)35*4882a593Smuzhiyun struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	if (map)
38*4882a593Smuzhiyun 		refcount_inc(&map->refcnt);
39*4882a593Smuzhiyun 	return map;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
perf_cpu_map__put(struct perf_cpu_map * map)42*4882a593Smuzhiyun void perf_cpu_map__put(struct perf_cpu_map *map)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	if (map && refcount_dec_and_test(&map->refcnt))
45*4882a593Smuzhiyun 		cpu_map__delete(map);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
cpu_map__default_new(void)48*4882a593Smuzhiyun static struct perf_cpu_map *cpu_map__default_new(void)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct perf_cpu_map *cpus;
51*4882a593Smuzhiyun 	int nr_cpus;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
54*4882a593Smuzhiyun 	if (nr_cpus < 0)
55*4882a593Smuzhiyun 		return NULL;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
58*4882a593Smuzhiyun 	if (cpus != NULL) {
59*4882a593Smuzhiyun 		int i;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 		for (i = 0; i < nr_cpus; ++i)
62*4882a593Smuzhiyun 			cpus->map[i] = i;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		cpus->nr = nr_cpus;
65*4882a593Smuzhiyun 		refcount_set(&cpus->refcnt, 1);
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	return cpus;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
cmp_int(const void * a,const void * b)71*4882a593Smuzhiyun static int cmp_int(const void *a, const void *b)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	return *(const int *)a - *(const int*)b;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
cpu_map__trim_new(int nr_cpus,int * tmp_cpus)76*4882a593Smuzhiyun static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	size_t payload_size = nr_cpus * sizeof(int);
79*4882a593Smuzhiyun 	struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
80*4882a593Smuzhiyun 	int i, j;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	if (cpus != NULL) {
83*4882a593Smuzhiyun 		memcpy(cpus->map, tmp_cpus, payload_size);
84*4882a593Smuzhiyun 		qsort(cpus->map, nr_cpus, sizeof(int), cmp_int);
85*4882a593Smuzhiyun 		/* Remove dups */
86*4882a593Smuzhiyun 		j = 0;
87*4882a593Smuzhiyun 		for (i = 0; i < nr_cpus; i++) {
88*4882a593Smuzhiyun 			if (i == 0 || cpus->map[i] != cpus->map[i - 1])
89*4882a593Smuzhiyun 				cpus->map[j++] = cpus->map[i];
90*4882a593Smuzhiyun 		}
91*4882a593Smuzhiyun 		cpus->nr = j;
92*4882a593Smuzhiyun 		assert(j <= nr_cpus);
93*4882a593Smuzhiyun 		refcount_set(&cpus->refcnt, 1);
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	return cpus;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
perf_cpu_map__read(FILE * file)99*4882a593Smuzhiyun struct perf_cpu_map *perf_cpu_map__read(FILE *file)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	struct perf_cpu_map *cpus = NULL;
102*4882a593Smuzhiyun 	int nr_cpus = 0;
103*4882a593Smuzhiyun 	int *tmp_cpus = NULL, *tmp;
104*4882a593Smuzhiyun 	int max_entries = 0;
105*4882a593Smuzhiyun 	int n, cpu, prev;
106*4882a593Smuzhiyun 	char sep;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	sep = 0;
109*4882a593Smuzhiyun 	prev = -1;
110*4882a593Smuzhiyun 	for (;;) {
111*4882a593Smuzhiyun 		n = fscanf(file, "%u%c", &cpu, &sep);
112*4882a593Smuzhiyun 		if (n <= 0)
113*4882a593Smuzhiyun 			break;
114*4882a593Smuzhiyun 		if (prev >= 0) {
115*4882a593Smuzhiyun 			int new_max = nr_cpus + cpu - prev - 1;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 			WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
118*4882a593Smuzhiyun 							  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 			if (new_max >= max_entries) {
121*4882a593Smuzhiyun 				max_entries = new_max + MAX_NR_CPUS / 2;
122*4882a593Smuzhiyun 				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
123*4882a593Smuzhiyun 				if (tmp == NULL)
124*4882a593Smuzhiyun 					goto out_free_tmp;
125*4882a593Smuzhiyun 				tmp_cpus = tmp;
126*4882a593Smuzhiyun 			}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 			while (++prev < cpu)
129*4882a593Smuzhiyun 				tmp_cpus[nr_cpus++] = prev;
130*4882a593Smuzhiyun 		}
131*4882a593Smuzhiyun 		if (nr_cpus == max_entries) {
132*4882a593Smuzhiyun 			max_entries += MAX_NR_CPUS;
133*4882a593Smuzhiyun 			tmp = realloc(tmp_cpus, max_entries * sizeof(int));
134*4882a593Smuzhiyun 			if (tmp == NULL)
135*4882a593Smuzhiyun 				goto out_free_tmp;
136*4882a593Smuzhiyun 			tmp_cpus = tmp;
137*4882a593Smuzhiyun 		}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 		tmp_cpus[nr_cpus++] = cpu;
140*4882a593Smuzhiyun 		if (n == 2 && sep == '-')
141*4882a593Smuzhiyun 			prev = cpu;
142*4882a593Smuzhiyun 		else
143*4882a593Smuzhiyun 			prev = -1;
144*4882a593Smuzhiyun 		if (n == 1 || sep == '\n')
145*4882a593Smuzhiyun 			break;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (nr_cpus > 0)
149*4882a593Smuzhiyun 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
150*4882a593Smuzhiyun 	else
151*4882a593Smuzhiyun 		cpus = cpu_map__default_new();
152*4882a593Smuzhiyun out_free_tmp:
153*4882a593Smuzhiyun 	free(tmp_cpus);
154*4882a593Smuzhiyun 	return cpus;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
cpu_map__read_all_cpu_map(void)157*4882a593Smuzhiyun static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct perf_cpu_map *cpus = NULL;
160*4882a593Smuzhiyun 	FILE *onlnf;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	onlnf = fopen("/sys/devices/system/cpu/online", "r");
163*4882a593Smuzhiyun 	if (!onlnf)
164*4882a593Smuzhiyun 		return cpu_map__default_new();
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	cpus = perf_cpu_map__read(onlnf);
167*4882a593Smuzhiyun 	fclose(onlnf);
168*4882a593Smuzhiyun 	return cpus;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
perf_cpu_map__new(const char * cpu_list)171*4882a593Smuzhiyun struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct perf_cpu_map *cpus = NULL;
174*4882a593Smuzhiyun 	unsigned long start_cpu, end_cpu = 0;
175*4882a593Smuzhiyun 	char *p = NULL;
176*4882a593Smuzhiyun 	int i, nr_cpus = 0;
177*4882a593Smuzhiyun 	int *tmp_cpus = NULL, *tmp;
178*4882a593Smuzhiyun 	int max_entries = 0;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (!cpu_list)
181*4882a593Smuzhiyun 		return cpu_map__read_all_cpu_map();
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/*
184*4882a593Smuzhiyun 	 * must handle the case of empty cpumap to cover
185*4882a593Smuzhiyun 	 * TOPOLOGY header for NUMA nodes with no CPU
186*4882a593Smuzhiyun 	 * ( e.g., because of CPU hotplug)
187*4882a593Smuzhiyun 	 */
188*4882a593Smuzhiyun 	if (!isdigit(*cpu_list) && *cpu_list != '\0')
189*4882a593Smuzhiyun 		goto out;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	while (isdigit(*cpu_list)) {
192*4882a593Smuzhiyun 		p = NULL;
193*4882a593Smuzhiyun 		start_cpu = strtoul(cpu_list, &p, 0);
194*4882a593Smuzhiyun 		if (start_cpu >= INT_MAX
195*4882a593Smuzhiyun 		    || (*p != '\0' && *p != ',' && *p != '-'))
196*4882a593Smuzhiyun 			goto invalid;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		if (*p == '-') {
199*4882a593Smuzhiyun 			cpu_list = ++p;
200*4882a593Smuzhiyun 			p = NULL;
201*4882a593Smuzhiyun 			end_cpu = strtoul(cpu_list, &p, 0);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
204*4882a593Smuzhiyun 				goto invalid;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 			if (end_cpu < start_cpu)
207*4882a593Smuzhiyun 				goto invalid;
208*4882a593Smuzhiyun 		} else {
209*4882a593Smuzhiyun 			end_cpu = start_cpu;
210*4882a593Smuzhiyun 		}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
213*4882a593Smuzhiyun 						  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 		for (; start_cpu <= end_cpu; start_cpu++) {
216*4882a593Smuzhiyun 			/* check for duplicates */
217*4882a593Smuzhiyun 			for (i = 0; i < nr_cpus; i++)
218*4882a593Smuzhiyun 				if (tmp_cpus[i] == (int)start_cpu)
219*4882a593Smuzhiyun 					goto invalid;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 			if (nr_cpus == max_entries) {
222*4882a593Smuzhiyun 				max_entries += MAX_NR_CPUS;
223*4882a593Smuzhiyun 				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
224*4882a593Smuzhiyun 				if (tmp == NULL)
225*4882a593Smuzhiyun 					goto invalid;
226*4882a593Smuzhiyun 				tmp_cpus = tmp;
227*4882a593Smuzhiyun 			}
228*4882a593Smuzhiyun 			tmp_cpus[nr_cpus++] = (int)start_cpu;
229*4882a593Smuzhiyun 		}
230*4882a593Smuzhiyun 		if (*p)
231*4882a593Smuzhiyun 			++p;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		cpu_list = p;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (nr_cpus > 0)
237*4882a593Smuzhiyun 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
238*4882a593Smuzhiyun 	else if (*cpu_list != '\0')
239*4882a593Smuzhiyun 		cpus = cpu_map__default_new();
240*4882a593Smuzhiyun 	else
241*4882a593Smuzhiyun 		cpus = perf_cpu_map__dummy_new();
242*4882a593Smuzhiyun invalid:
243*4882a593Smuzhiyun 	free(tmp_cpus);
244*4882a593Smuzhiyun out:
245*4882a593Smuzhiyun 	return cpus;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
perf_cpu_map__cpu(const struct perf_cpu_map * cpus,int idx)248*4882a593Smuzhiyun int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	if (cpus && idx < cpus->nr)
251*4882a593Smuzhiyun 		return cpus->map[idx];
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	return -1;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
perf_cpu_map__nr(const struct perf_cpu_map * cpus)256*4882a593Smuzhiyun int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	return cpus ? cpus->nr : 1;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
perf_cpu_map__empty(const struct perf_cpu_map * map)261*4882a593Smuzhiyun bool perf_cpu_map__empty(const struct perf_cpu_map *map)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	return map ? map->map[0] == -1 : true;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
perf_cpu_map__idx(struct perf_cpu_map * cpus,int cpu)266*4882a593Smuzhiyun int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	int i;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	for (i = 0; i < cpus->nr; ++i) {
271*4882a593Smuzhiyun 		if (cpus->map[i] == cpu)
272*4882a593Smuzhiyun 			return i;
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	return -1;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
perf_cpu_map__max(struct perf_cpu_map * map)278*4882a593Smuzhiyun int perf_cpu_map__max(struct perf_cpu_map *map)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	int i, max = -1;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	for (i = 0; i < map->nr; i++) {
283*4882a593Smuzhiyun 		if (map->map[i] > max)
284*4882a593Smuzhiyun 			max = map->map[i];
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return max;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /*
291*4882a593Smuzhiyun  * Merge two cpumaps
292*4882a593Smuzhiyun  *
293*4882a593Smuzhiyun  * orig either gets freed and replaced with a new map, or reused
294*4882a593Smuzhiyun  * with no reference count change (similar to "realloc")
295*4882a593Smuzhiyun  * other has its reference count increased.
296*4882a593Smuzhiyun  */
297*4882a593Smuzhiyun 
perf_cpu_map__merge(struct perf_cpu_map * orig,struct perf_cpu_map * other)298*4882a593Smuzhiyun struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
299*4882a593Smuzhiyun 					 struct perf_cpu_map *other)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	int *tmp_cpus;
302*4882a593Smuzhiyun 	int tmp_len;
303*4882a593Smuzhiyun 	int i, j, k;
304*4882a593Smuzhiyun 	struct perf_cpu_map *merged;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (!orig && !other)
307*4882a593Smuzhiyun 		return NULL;
308*4882a593Smuzhiyun 	if (!orig) {
309*4882a593Smuzhiyun 		perf_cpu_map__get(other);
310*4882a593Smuzhiyun 		return other;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 	if (!other)
313*4882a593Smuzhiyun 		return orig;
314*4882a593Smuzhiyun 	if (orig->nr == other->nr &&
315*4882a593Smuzhiyun 	    !memcmp(orig->map, other->map, orig->nr * sizeof(int)))
316*4882a593Smuzhiyun 		return orig;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	tmp_len = orig->nr + other->nr;
319*4882a593Smuzhiyun 	tmp_cpus = malloc(tmp_len * sizeof(int));
320*4882a593Smuzhiyun 	if (!tmp_cpus)
321*4882a593Smuzhiyun 		return NULL;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/* Standard merge algorithm from wikipedia */
324*4882a593Smuzhiyun 	i = j = k = 0;
325*4882a593Smuzhiyun 	while (i < orig->nr && j < other->nr) {
326*4882a593Smuzhiyun 		if (orig->map[i] <= other->map[j]) {
327*4882a593Smuzhiyun 			if (orig->map[i] == other->map[j])
328*4882a593Smuzhiyun 				j++;
329*4882a593Smuzhiyun 			tmp_cpus[k++] = orig->map[i++];
330*4882a593Smuzhiyun 		} else
331*4882a593Smuzhiyun 			tmp_cpus[k++] = other->map[j++];
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	while (i < orig->nr)
335*4882a593Smuzhiyun 		tmp_cpus[k++] = orig->map[i++];
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	while (j < other->nr)
338*4882a593Smuzhiyun 		tmp_cpus[k++] = other->map[j++];
339*4882a593Smuzhiyun 	assert(k <= tmp_len);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	merged = cpu_map__trim_new(k, tmp_cpus);
342*4882a593Smuzhiyun 	free(tmp_cpus);
343*4882a593Smuzhiyun 	perf_cpu_map__put(orig);
344*4882a593Smuzhiyun 	return merged;
345*4882a593Smuzhiyun }
346