xref: /OK3568_Linux_fs/kernel/tools/testing/selftests/powerpc/security/spectre_v2.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun  * Copyright 2018-2019 IBM Corporation.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #define __SANE_USERSPACE_TYPES__
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <sys/types.h>
10*4882a593Smuzhiyun #include <stdint.h>
11*4882a593Smuzhiyun #include <malloc.h>
12*4882a593Smuzhiyun #include <unistd.h>
13*4882a593Smuzhiyun #include <stdlib.h>
14*4882a593Smuzhiyun #include <string.h>
15*4882a593Smuzhiyun #include <stdio.h>
16*4882a593Smuzhiyun #include <sys/prctl.h>
17*4882a593Smuzhiyun #include "utils.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include "../pmu/event.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun extern void pattern_cache_loop(void);
23*4882a593Smuzhiyun extern void indirect_branch_loop(void);
24*4882a593Smuzhiyun 
do_count_loop(struct event * events,bool is_p9,s64 * miss_percent)25*4882a593Smuzhiyun static int do_count_loop(struct event *events, bool is_p9, s64 *miss_percent)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	u64 pred, mpred;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	prctl(PR_TASK_PERF_EVENTS_ENABLE);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	if (is_p9)
32*4882a593Smuzhiyun 		pattern_cache_loop();
33*4882a593Smuzhiyun 	else
34*4882a593Smuzhiyun 		indirect_branch_loop();
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	prctl(PR_TASK_PERF_EVENTS_DISABLE);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	event_read(&events[0]);
39*4882a593Smuzhiyun 	event_read(&events[1]);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	// We could scale all the events by running/enabled but we're lazy
42*4882a593Smuzhiyun 	// As long as the PMU is uncontended they should all run
43*4882a593Smuzhiyun 	FAIL_IF(events[0].result.running != events[0].result.enabled);
44*4882a593Smuzhiyun 	FAIL_IF(events[1].result.running != events[1].result.enabled);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	pred =  events[0].result.value;
47*4882a593Smuzhiyun 	mpred = events[1].result.value;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	if (is_p9) {
50*4882a593Smuzhiyun 		event_read(&events[2]);
51*4882a593Smuzhiyun 		event_read(&events[3]);
52*4882a593Smuzhiyun 		FAIL_IF(events[2].result.running != events[2].result.enabled);
53*4882a593Smuzhiyun 		FAIL_IF(events[3].result.running != events[3].result.enabled);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 		pred  += events[2].result.value;
56*4882a593Smuzhiyun 		mpred += events[3].result.value;
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	*miss_percent = 100 * mpred / pred;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	return 0;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
setup_event(struct event * e,u64 config,char * name)64*4882a593Smuzhiyun static void setup_event(struct event *e, u64 config, char *name)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	event_init_named(e, config, name);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	e->attr.disabled = 1;
69*4882a593Smuzhiyun 	e->attr.exclude_kernel = 1;
70*4882a593Smuzhiyun 	e->attr.exclude_hv = 1;
71*4882a593Smuzhiyun 	e->attr.exclude_idle = 1;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun enum spectre_v2_state {
75*4882a593Smuzhiyun 	VULNERABLE = 0,
76*4882a593Smuzhiyun 	UNKNOWN = 1,		// Works with FAIL_IF()
77*4882a593Smuzhiyun 	NOT_AFFECTED,
78*4882a593Smuzhiyun 	BRANCH_SERIALISATION,
79*4882a593Smuzhiyun 	COUNT_CACHE_DISABLED,
80*4882a593Smuzhiyun 	COUNT_CACHE_FLUSH_SW,
81*4882a593Smuzhiyun 	COUNT_CACHE_FLUSH_HW,
82*4882a593Smuzhiyun 	BTB_FLUSH,
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
get_sysfs_state(void)85*4882a593Smuzhiyun static enum spectre_v2_state get_sysfs_state(void)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	enum spectre_v2_state state = UNKNOWN;
88*4882a593Smuzhiyun 	char buf[256];
89*4882a593Smuzhiyun 	int len;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	memset(buf, 0, sizeof(buf));
92*4882a593Smuzhiyun 	FAIL_IF(read_sysfs_file("devices/system/cpu/vulnerabilities/spectre_v2", buf, sizeof(buf)));
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	// Make sure it's NULL terminated
95*4882a593Smuzhiyun 	buf[sizeof(buf) - 1] = '\0';
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	// Trim the trailing newline
98*4882a593Smuzhiyun 	len = strlen(buf);
99*4882a593Smuzhiyun 	FAIL_IF(len < 1);
100*4882a593Smuzhiyun 	buf[len - 1] = '\0';
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	printf("sysfs reports: '%s'\n", buf);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	// Order matters
105*4882a593Smuzhiyun 	if (strstr(buf, "Vulnerable"))
106*4882a593Smuzhiyun 		state = VULNERABLE;
107*4882a593Smuzhiyun 	else if (strstr(buf, "Not affected"))
108*4882a593Smuzhiyun 		state = NOT_AFFECTED;
109*4882a593Smuzhiyun 	else if (strstr(buf, "Indirect branch serialisation (kernel only)"))
110*4882a593Smuzhiyun 		state = BRANCH_SERIALISATION;
111*4882a593Smuzhiyun 	else if (strstr(buf, "Indirect branch cache disabled"))
112*4882a593Smuzhiyun 		state = COUNT_CACHE_DISABLED;
113*4882a593Smuzhiyun 	else if (strstr(buf, "Software count cache flush (hardware accelerated)"))
114*4882a593Smuzhiyun 		state = COUNT_CACHE_FLUSH_HW;
115*4882a593Smuzhiyun 	else if (strstr(buf, "Software count cache flush"))
116*4882a593Smuzhiyun 		state = COUNT_CACHE_FLUSH_SW;
117*4882a593Smuzhiyun 	else if (strstr(buf, "Branch predictor state flush"))
118*4882a593Smuzhiyun 		state = BTB_FLUSH;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return state;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define PM_BR_PRED_CCACHE	0x040a4	// P8 + P9
124*4882a593Smuzhiyun #define PM_BR_MPRED_CCACHE	0x040ac	// P8 + P9
125*4882a593Smuzhiyun #define PM_BR_PRED_PCACHE	0x048a0	// P9 only
126*4882a593Smuzhiyun #define PM_BR_MPRED_PCACHE	0x048b0	// P9 only
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #define SPRN_PVR 287
129*4882a593Smuzhiyun 
spectre_v2_test(void)130*4882a593Smuzhiyun int spectre_v2_test(void)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	enum spectre_v2_state state;
133*4882a593Smuzhiyun 	struct event events[4];
134*4882a593Smuzhiyun 	s64 miss_percent;
135*4882a593Smuzhiyun 	bool is_p9;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	// The PMU events we use only work on Power8 or later
138*4882a593Smuzhiyun 	SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	state = get_sysfs_state();
141*4882a593Smuzhiyun 	if (state == UNKNOWN) {
142*4882a593Smuzhiyun 		printf("Error: couldn't determine spectre_v2 mitigation state?\n");
143*4882a593Smuzhiyun 		return -1;
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	memset(events, 0, sizeof(events));
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	setup_event(&events[0], PM_BR_PRED_CCACHE,  "PM_BR_PRED_CCACHE");
149*4882a593Smuzhiyun 	setup_event(&events[1], PM_BR_MPRED_CCACHE, "PM_BR_MPRED_CCACHE");
150*4882a593Smuzhiyun 	FAIL_IF(event_open(&events[0]));
151*4882a593Smuzhiyun 	FAIL_IF(event_open_with_group(&events[1], events[0].fd) == -1);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	is_p9 = ((mfspr(SPRN_PVR) >>  16) & 0xFFFF) == 0x4e;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (is_p9) {
156*4882a593Smuzhiyun 		// Count pattern cache too
157*4882a593Smuzhiyun 		setup_event(&events[2], PM_BR_PRED_PCACHE,  "PM_BR_PRED_PCACHE");
158*4882a593Smuzhiyun 		setup_event(&events[3], PM_BR_MPRED_PCACHE, "PM_BR_MPRED_PCACHE");
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		FAIL_IF(event_open_with_group(&events[2], events[0].fd) == -1);
161*4882a593Smuzhiyun 		FAIL_IF(event_open_with_group(&events[3], events[0].fd) == -1);
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	FAIL_IF(do_count_loop(events, is_p9, &miss_percent));
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	event_report_justified(&events[0], 18, 10);
167*4882a593Smuzhiyun 	event_report_justified(&events[1], 18, 10);
168*4882a593Smuzhiyun 	event_close(&events[0]);
169*4882a593Smuzhiyun 	event_close(&events[1]);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (is_p9) {
172*4882a593Smuzhiyun 		event_report_justified(&events[2], 18, 10);
173*4882a593Smuzhiyun 		event_report_justified(&events[3], 18, 10);
174*4882a593Smuzhiyun 		event_close(&events[2]);
175*4882a593Smuzhiyun 		event_close(&events[3]);
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	printf("Miss percent %lld %%\n", miss_percent);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	switch (state) {
181*4882a593Smuzhiyun 	case VULNERABLE:
182*4882a593Smuzhiyun 	case NOT_AFFECTED:
183*4882a593Smuzhiyun 	case COUNT_CACHE_FLUSH_SW:
184*4882a593Smuzhiyun 	case COUNT_CACHE_FLUSH_HW:
185*4882a593Smuzhiyun 		// These should all not affect userspace branch prediction
186*4882a593Smuzhiyun 		if (miss_percent > 15) {
187*4882a593Smuzhiyun 			printf("Branch misses > 15%% unexpected in this configuration!\n");
188*4882a593Smuzhiyun 			printf("Possible mis-match between reported & actual mitigation\n");
189*4882a593Smuzhiyun 			/*
190*4882a593Smuzhiyun 			 * Such a mismatch may be caused by a guest system
191*4882a593Smuzhiyun 			 * reporting as vulnerable when the host is mitigated.
192*4882a593Smuzhiyun 			 * Return skip code to avoid detecting this as an error.
193*4882a593Smuzhiyun 			 * We are not vulnerable and reporting otherwise, so
194*4882a593Smuzhiyun 			 * missing such a mismatch is safe.
195*4882a593Smuzhiyun 			 */
196*4882a593Smuzhiyun 			if (miss_percent > 95)
197*4882a593Smuzhiyun 				return 4;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 			return 1;
200*4882a593Smuzhiyun 		}
201*4882a593Smuzhiyun 		break;
202*4882a593Smuzhiyun 	case BRANCH_SERIALISATION:
203*4882a593Smuzhiyun 		// This seems to affect userspace branch prediction a bit?
204*4882a593Smuzhiyun 		if (miss_percent > 25) {
205*4882a593Smuzhiyun 			printf("Branch misses > 25%% unexpected in this configuration!\n");
206*4882a593Smuzhiyun 			printf("Possible mis-match between reported & actual mitigation\n");
207*4882a593Smuzhiyun 			return 1;
208*4882a593Smuzhiyun 		}
209*4882a593Smuzhiyun 		break;
210*4882a593Smuzhiyun 	case COUNT_CACHE_DISABLED:
211*4882a593Smuzhiyun 		if (miss_percent < 95) {
212*4882a593Smuzhiyun 			printf("Branch misses < 20%% unexpected in this configuration!\n");
213*4882a593Smuzhiyun 			printf("Possible mis-match between reported & actual mitigation\n");
214*4882a593Smuzhiyun 			return 1;
215*4882a593Smuzhiyun 		}
216*4882a593Smuzhiyun 		break;
217*4882a593Smuzhiyun 	case UNKNOWN:
218*4882a593Smuzhiyun 	case BTB_FLUSH:
219*4882a593Smuzhiyun 		printf("Not sure!\n");
220*4882a593Smuzhiyun 		return 1;
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	printf("OK - Measured branch prediction rates match reported spectre v2 mitigation.\n");
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	return 0;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
main(int argc,char * argv[])228*4882a593Smuzhiyun int main(int argc, char *argv[])
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	return test_harness(spectre_v2_test, "spectre_v2");
231*4882a593Smuzhiyun }
232