xref: /OK3568_Linux_fs/u-boot/drivers/timer/tsc_timer.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2012 The Chromium OS Authors.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * TSC calibration codes are adapted from Linux kernel
5*4882a593Smuzhiyun  * arch/x86/kernel/tsc_msr.c and arch/x86/kernel/tsc.c
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <common.h>
11*4882a593Smuzhiyun #include <dm.h>
12*4882a593Smuzhiyun #include <malloc.h>
13*4882a593Smuzhiyun #include <timer.h>
14*4882a593Smuzhiyun #include <asm/cpu.h>
15*4882a593Smuzhiyun #include <asm/io.h>
16*4882a593Smuzhiyun #include <asm/i8254.h>
17*4882a593Smuzhiyun #include <asm/ibmpc.h>
18*4882a593Smuzhiyun #include <asm/msr.h>
19*4882a593Smuzhiyun #include <asm/u-boot-x86.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define MAX_NUM_FREQS	8
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * According to Intel 64 and IA-32 System Programming Guide,
27*4882a593Smuzhiyun  * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be
28*4882a593Smuzhiyun  * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40].
29*4882a593Smuzhiyun  * Unfortunately some Intel Atom SoCs aren't quite compliant to this,
30*4882a593Smuzhiyun  * so we need manually differentiate SoC families. This is what the
31*4882a593Smuzhiyun  * field msr_plat does.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun struct freq_desc {
34*4882a593Smuzhiyun 	u8 x86_family;	/* CPU family */
35*4882a593Smuzhiyun 	u8 x86_model;	/* model */
36*4882a593Smuzhiyun 	/* 2: use 100MHz, 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */
37*4882a593Smuzhiyun 	u8 msr_plat;
38*4882a593Smuzhiyun 	u32 freqs[MAX_NUM_FREQS];
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static struct freq_desc freq_desc_tables[] = {
42*4882a593Smuzhiyun 	/* PNW */
43*4882a593Smuzhiyun 	{ 6, 0x27, 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } },
44*4882a593Smuzhiyun 	/* CLV+ */
45*4882a593Smuzhiyun 	{ 6, 0x35, 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } },
46*4882a593Smuzhiyun 	/* TNG - Intel Atom processor Z3400 series */
47*4882a593Smuzhiyun 	{ 6, 0x4a, 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } },
48*4882a593Smuzhiyun 	/* VLV2 - Intel Atom processor E3000, Z3600, Z3700 series */
49*4882a593Smuzhiyun 	{ 6, 0x37, 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } },
50*4882a593Smuzhiyun 	/* ANN - Intel Atom processor Z3500 series */
51*4882a593Smuzhiyun 	{ 6, 0x5a, 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } },
52*4882a593Smuzhiyun 	/* Ivybridge */
53*4882a593Smuzhiyun 	{ 6, 0x3a, 2, { 0, 0, 0, 0, 0, 0, 0, 0 } },
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
match_cpu(u8 family,u8 model)56*4882a593Smuzhiyun static int match_cpu(u8 family, u8 model)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	int i;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) {
61*4882a593Smuzhiyun 		if ((family == freq_desc_tables[i].x86_family) &&
62*4882a593Smuzhiyun 		    (model == freq_desc_tables[i].x86_model))
63*4882a593Smuzhiyun 			return i;
64*4882a593Smuzhiyun 	}
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	return -1;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
70*4882a593Smuzhiyun #define id_to_freq(cpu_index, freq_id) \
71*4882a593Smuzhiyun 	(freq_desc_tables[cpu_index].freqs[freq_id])
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * TSC on Intel Atom SoCs capable of determining TSC frequency by MSR is
75*4882a593Smuzhiyun  * reliable and the frequency is known (provided by HW).
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * On these platforms PIT/HPET is generally not available so calibration won't
78*4882a593Smuzhiyun  * work at all and there is no other clocksource to act as a watchdog for the
79*4882a593Smuzhiyun  * TSC, so we have no other choice than to trust it.
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  * Returns the TSC frequency in MHz or 0 if HW does not provide it.
82*4882a593Smuzhiyun  */
cpu_mhz_from_msr(void)83*4882a593Smuzhiyun static unsigned long __maybe_unused cpu_mhz_from_msr(void)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	u32 lo, hi, ratio, freq_id, freq;
86*4882a593Smuzhiyun 	unsigned long res;
87*4882a593Smuzhiyun 	int cpu_index;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	if (gd->arch.x86_vendor != X86_VENDOR_INTEL)
90*4882a593Smuzhiyun 		return 0;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	cpu_index = match_cpu(gd->arch.x86, gd->arch.x86_model);
93*4882a593Smuzhiyun 	if (cpu_index < 0)
94*4882a593Smuzhiyun 		return 0;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (freq_desc_tables[cpu_index].msr_plat) {
97*4882a593Smuzhiyun 		rdmsr(MSR_PLATFORM_INFO, lo, hi);
98*4882a593Smuzhiyun 		ratio = (lo >> 8) & 0xff;
99*4882a593Smuzhiyun 	} else {
100*4882a593Smuzhiyun 		rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
101*4882a593Smuzhiyun 		ratio = (hi >> 8) & 0x1f;
102*4882a593Smuzhiyun 	}
103*4882a593Smuzhiyun 	debug("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (freq_desc_tables[cpu_index].msr_plat == 2) {
106*4882a593Smuzhiyun 		/* TODO: Figure out how best to deal with this */
107*4882a593Smuzhiyun 		freq = 100000;
108*4882a593Smuzhiyun 		debug("Using frequency: %u KHz\n", freq);
109*4882a593Smuzhiyun 	} else {
110*4882a593Smuzhiyun 		/* Get FSB FREQ ID */
111*4882a593Smuzhiyun 		rdmsr(MSR_FSB_FREQ, lo, hi);
112*4882a593Smuzhiyun 		freq_id = lo & 0x7;
113*4882a593Smuzhiyun 		freq = id_to_freq(cpu_index, freq_id);
114*4882a593Smuzhiyun 		debug("Resolved frequency ID: %u, frequency: %u KHz\n",
115*4882a593Smuzhiyun 		      freq_id, freq);
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
119*4882a593Smuzhiyun 	res = freq * ratio / 1000;
120*4882a593Smuzhiyun 	debug("TSC runs at %lu MHz\n", res);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	return res;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * This reads the current MSB of the PIT counter, and
127*4882a593Smuzhiyun  * checks if we are running on sufficiently fast and
128*4882a593Smuzhiyun  * non-virtualized hardware.
129*4882a593Smuzhiyun  *
130*4882a593Smuzhiyun  * Our expectations are:
131*4882a593Smuzhiyun  *
132*4882a593Smuzhiyun  *  - the PIT is running at roughly 1.19MHz
133*4882a593Smuzhiyun  *
134*4882a593Smuzhiyun  *  - each IO is going to take about 1us on real hardware,
135*4882a593Smuzhiyun  *    but we allow it to be much faster (by a factor of 10) or
136*4882a593Smuzhiyun  *    _slightly_ slower (ie we allow up to a 2us read+counter
137*4882a593Smuzhiyun  *    update - anything else implies a unacceptably slow CPU
138*4882a593Smuzhiyun  *    or PIT for the fast calibration to work.
139*4882a593Smuzhiyun  *
140*4882a593Smuzhiyun  *  - with 256 PIT ticks to read the value, we have 214us to
141*4882a593Smuzhiyun  *    see the same MSB (and overhead like doing a single TSC
142*4882a593Smuzhiyun  *    read per MSB value etc).
143*4882a593Smuzhiyun  *
144*4882a593Smuzhiyun  *  - We're doing 2 reads per loop (LSB, MSB), and we expect
145*4882a593Smuzhiyun  *    them each to take about a microsecond on real hardware.
146*4882a593Smuzhiyun  *    So we expect a count value of around 100. But we'll be
147*4882a593Smuzhiyun  *    generous, and accept anything over 50.
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  *  - if the PIT is stuck, and we see *many* more reads, we
150*4882a593Smuzhiyun  *    return early (and the next caller of pit_expect_msb()
151*4882a593Smuzhiyun  *    then consider it a failure when they don't see the
152*4882a593Smuzhiyun  *    next expected value).
153*4882a593Smuzhiyun  *
154*4882a593Smuzhiyun  * These expectations mean that we know that we have seen the
155*4882a593Smuzhiyun  * transition from one expected value to another with a fairly
156*4882a593Smuzhiyun  * high accuracy, and we didn't miss any events. We can thus
157*4882a593Smuzhiyun  * use the TSC value at the transitions to calculate a pretty
158*4882a593Smuzhiyun  * good value for the TSC frequencty.
159*4882a593Smuzhiyun  */
pit_verify_msb(unsigned char val)160*4882a593Smuzhiyun static inline int pit_verify_msb(unsigned char val)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	/* Ignore LSB */
163*4882a593Smuzhiyun 	inb(0x42);
164*4882a593Smuzhiyun 	return inb(0x42) == val;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
pit_expect_msb(unsigned char val,u64 * tscp,unsigned long * deltap)167*4882a593Smuzhiyun static inline int pit_expect_msb(unsigned char val, u64 *tscp,
168*4882a593Smuzhiyun 				 unsigned long *deltap)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	int count;
171*4882a593Smuzhiyun 	u64 tsc = 0, prev_tsc = 0;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	for (count = 0; count < 50000; count++) {
174*4882a593Smuzhiyun 		if (!pit_verify_msb(val))
175*4882a593Smuzhiyun 			break;
176*4882a593Smuzhiyun 		prev_tsc = tsc;
177*4882a593Smuzhiyun 		tsc = rdtsc();
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 	*deltap = rdtsc() - prev_tsc;
180*4882a593Smuzhiyun 	*tscp = tsc;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	/*
183*4882a593Smuzhiyun 	 * We require _some_ success, but the quality control
184*4882a593Smuzhiyun 	 * will be based on the error terms on the TSC values.
185*4882a593Smuzhiyun 	 */
186*4882a593Smuzhiyun 	return count > 5;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun  * How many MSB values do we want to see? We aim for
191*4882a593Smuzhiyun  * a maximum error rate of 500ppm (in practice the
192*4882a593Smuzhiyun  * real error is much smaller), but refuse to spend
193*4882a593Smuzhiyun  * more than 50ms on it.
194*4882a593Smuzhiyun  */
195*4882a593Smuzhiyun #define MAX_QUICK_PIT_MS 50
196*4882a593Smuzhiyun #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
197*4882a593Smuzhiyun 
quick_pit_calibrate(void)198*4882a593Smuzhiyun static unsigned long __maybe_unused quick_pit_calibrate(void)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	int i;
201*4882a593Smuzhiyun 	u64 tsc, delta;
202*4882a593Smuzhiyun 	unsigned long d1, d2;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* Set the Gate high, disable speaker */
205*4882a593Smuzhiyun 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/*
208*4882a593Smuzhiyun 	 * Counter 2, mode 0 (one-shot), binary count
209*4882a593Smuzhiyun 	 *
210*4882a593Smuzhiyun 	 * NOTE! Mode 2 decrements by two (and then the
211*4882a593Smuzhiyun 	 * output is flipped each time, giving the same
212*4882a593Smuzhiyun 	 * final output frequency as a decrement-by-one),
213*4882a593Smuzhiyun 	 * so mode 0 is much better when looking at the
214*4882a593Smuzhiyun 	 * individual counts.
215*4882a593Smuzhiyun 	 */
216*4882a593Smuzhiyun 	outb(0xb0, 0x43);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* Start at 0xffff */
219*4882a593Smuzhiyun 	outb(0xff, 0x42);
220*4882a593Smuzhiyun 	outb(0xff, 0x42);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	/*
223*4882a593Smuzhiyun 	 * The PIT starts counting at the next edge, so we
224*4882a593Smuzhiyun 	 * need to delay for a microsecond. The easiest way
225*4882a593Smuzhiyun 	 * to do that is to just read back the 16-bit counter
226*4882a593Smuzhiyun 	 * once from the PIT.
227*4882a593Smuzhiyun 	 */
228*4882a593Smuzhiyun 	pit_verify_msb(0);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (pit_expect_msb(0xff, &tsc, &d1)) {
231*4882a593Smuzhiyun 		for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
232*4882a593Smuzhiyun 			if (!pit_expect_msb(0xff-i, &delta, &d2))
233*4882a593Smuzhiyun 				break;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 			/*
236*4882a593Smuzhiyun 			 * Iterate until the error is less than 500 ppm
237*4882a593Smuzhiyun 			 */
238*4882a593Smuzhiyun 			delta -= tsc;
239*4882a593Smuzhiyun 			if (d1+d2 >= delta >> 11)
240*4882a593Smuzhiyun 				continue;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 			/*
243*4882a593Smuzhiyun 			 * Check the PIT one more time to verify that
244*4882a593Smuzhiyun 			 * all TSC reads were stable wrt the PIT.
245*4882a593Smuzhiyun 			 *
246*4882a593Smuzhiyun 			 * This also guarantees serialization of the
247*4882a593Smuzhiyun 			 * last cycle read ('d2') in pit_expect_msb.
248*4882a593Smuzhiyun 			 */
249*4882a593Smuzhiyun 			if (!pit_verify_msb(0xfe - i))
250*4882a593Smuzhiyun 				break;
251*4882a593Smuzhiyun 			goto success;
252*4882a593Smuzhiyun 		}
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 	debug("Fast TSC calibration failed\n");
255*4882a593Smuzhiyun 	return 0;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun success:
258*4882a593Smuzhiyun 	/*
259*4882a593Smuzhiyun 	 * Ok, if we get here, then we've seen the
260*4882a593Smuzhiyun 	 * MSB of the PIT decrement 'i' times, and the
261*4882a593Smuzhiyun 	 * error has shrunk to less than 500 ppm.
262*4882a593Smuzhiyun 	 *
263*4882a593Smuzhiyun 	 * As a result, we can depend on there not being
264*4882a593Smuzhiyun 	 * any odd delays anywhere, and the TSC reads are
265*4882a593Smuzhiyun 	 * reliable (within the error).
266*4882a593Smuzhiyun 	 *
267*4882a593Smuzhiyun 	 * kHz = ticks / time-in-seconds / 1000;
268*4882a593Smuzhiyun 	 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
269*4882a593Smuzhiyun 	 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
270*4882a593Smuzhiyun 	 */
271*4882a593Smuzhiyun 	delta *= PIT_TICK_RATE;
272*4882a593Smuzhiyun 	delta /= (i*256*1000);
273*4882a593Smuzhiyun 	debug("Fast TSC calibration using PIT\n");
274*4882a593Smuzhiyun 	return delta / 1000;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun /* Get the speed of the TSC timer in MHz */
get_tbclk_mhz(void)278*4882a593Smuzhiyun unsigned notrace long get_tbclk_mhz(void)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	return get_tbclk() / 1000000;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
get_ms_timer(void)283*4882a593Smuzhiyun static ulong get_ms_timer(void)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	return (get_ticks() * 1000) / get_tbclk();
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
get_timer(ulong base)288*4882a593Smuzhiyun ulong get_timer(ulong base)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	return get_ms_timer() - base;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
timer_get_us(void)293*4882a593Smuzhiyun ulong notrace timer_get_us(void)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	return get_ticks() / get_tbclk_mhz();
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
timer_get_boot_us(void)298*4882a593Smuzhiyun ulong timer_get_boot_us(void)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	return timer_get_us();
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
__udelay(unsigned long usec)303*4882a593Smuzhiyun void __udelay(unsigned long usec)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	u64 now = get_ticks();
306*4882a593Smuzhiyun 	u64 stop;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	stop = now + usec * get_tbclk_mhz();
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	while ((int64_t)(stop - get_ticks()) > 0)
311*4882a593Smuzhiyun #if defined(CONFIG_QEMU) && defined(CONFIG_SMP)
312*4882a593Smuzhiyun 		/*
313*4882a593Smuzhiyun 		 * Add a 'pause' instruction on qemu target,
314*4882a593Smuzhiyun 		 * to give other VCPUs a chance to run.
315*4882a593Smuzhiyun 		 */
316*4882a593Smuzhiyun 		asm volatile("pause");
317*4882a593Smuzhiyun #else
318*4882a593Smuzhiyun 		;
319*4882a593Smuzhiyun #endif
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
tsc_timer_get_count(struct udevice * dev,u64 * count)322*4882a593Smuzhiyun static int tsc_timer_get_count(struct udevice *dev, u64 *count)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	u64 now_tick = rdtsc();
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	*count = now_tick - gd->arch.tsc_base;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return 0;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
tsc_timer_probe(struct udevice * dev)331*4882a593Smuzhiyun static int tsc_timer_probe(struct udevice *dev)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	struct timer_dev_priv *uc_priv = dev_get_uclass_priv(dev);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	gd->arch.tsc_base = rdtsc();
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	/*
338*4882a593Smuzhiyun 	 * If there is no clock frequency specified in the device tree,
339*4882a593Smuzhiyun 	 * calibrate it by ourselves.
340*4882a593Smuzhiyun 	 */
341*4882a593Smuzhiyun 	if (!uc_priv->clock_rate) {
342*4882a593Smuzhiyun 		unsigned long fast_calibrate;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		fast_calibrate = cpu_mhz_from_msr();
345*4882a593Smuzhiyun 		if (!fast_calibrate) {
346*4882a593Smuzhiyun 			fast_calibrate = quick_pit_calibrate();
347*4882a593Smuzhiyun 			if (!fast_calibrate)
348*4882a593Smuzhiyun 				panic("TSC frequency is ZERO");
349*4882a593Smuzhiyun 		}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		uc_priv->clock_rate = fast_calibrate * 1000000;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun static const struct timer_ops tsc_timer_ops = {
358*4882a593Smuzhiyun 	.get_count = tsc_timer_get_count,
359*4882a593Smuzhiyun };
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun static const struct udevice_id tsc_timer_ids[] = {
362*4882a593Smuzhiyun 	{ .compatible = "x86,tsc-timer", },
363*4882a593Smuzhiyun 	{ }
364*4882a593Smuzhiyun };
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun U_BOOT_DRIVER(tsc_timer) = {
367*4882a593Smuzhiyun 	.name	= "tsc_timer",
368*4882a593Smuzhiyun 	.id	= UCLASS_TIMER,
369*4882a593Smuzhiyun 	.of_match = tsc_timer_ids,
370*4882a593Smuzhiyun 	.probe = tsc_timer_probe,
371*4882a593Smuzhiyun 	.ops	= &tsc_timer_ops,
372*4882a593Smuzhiyun 	.flags = DM_FLAG_PRE_RELOC,
373*4882a593Smuzhiyun };
374