xref: /OK3568_Linux_fs/kernel/drivers/cpufreq/longrun.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * (C) 2002 - 2003  Dominik Brodowski <linux@brodo.de>
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/cpufreq.h>
12*4882a593Smuzhiyun #include <linux/timex.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <asm/msr.h>
15*4882a593Smuzhiyun #include <asm/processor.h>
16*4882a593Smuzhiyun #include <asm/cpu_device_id.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun static struct cpufreq_driver	longrun_driver;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /**
21*4882a593Smuzhiyun  * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
22*4882a593Smuzhiyun  * values into per cent values. In TMTA microcode, the following is valid:
23*4882a593Smuzhiyun  * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun static unsigned int longrun_low_freq, longrun_high_freq;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /**
29*4882a593Smuzhiyun  * longrun_get_policy - get the current LongRun policy
30*4882a593Smuzhiyun  * @policy: struct cpufreq_policy where current policy is written into
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
33*4882a593Smuzhiyun  * and MSR_TMTA_LONGRUN_CTRL
34*4882a593Smuzhiyun  */
longrun_get_policy(struct cpufreq_policy * policy)35*4882a593Smuzhiyun static void longrun_get_policy(struct cpufreq_policy *policy)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	u32 msr_lo, msr_hi;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
40*4882a593Smuzhiyun 	pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi);
41*4882a593Smuzhiyun 	if (msr_lo & 0x01)
42*4882a593Smuzhiyun 		policy->policy = CPUFREQ_POLICY_PERFORMANCE;
43*4882a593Smuzhiyun 	else
44*4882a593Smuzhiyun 		policy->policy = CPUFREQ_POLICY_POWERSAVE;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
47*4882a593Smuzhiyun 	pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi);
48*4882a593Smuzhiyun 	msr_lo &= 0x0000007F;
49*4882a593Smuzhiyun 	msr_hi &= 0x0000007F;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	if (longrun_high_freq <= longrun_low_freq) {
52*4882a593Smuzhiyun 		/* Assume degenerate Longrun table */
53*4882a593Smuzhiyun 		policy->min = policy->max = longrun_high_freq;
54*4882a593Smuzhiyun 	} else {
55*4882a593Smuzhiyun 		policy->min = longrun_low_freq + msr_lo *
56*4882a593Smuzhiyun 			((longrun_high_freq - longrun_low_freq) / 100);
57*4882a593Smuzhiyun 		policy->max = longrun_low_freq + msr_hi *
58*4882a593Smuzhiyun 			((longrun_high_freq - longrun_low_freq) / 100);
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 	policy->cpu = 0;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /**
65*4882a593Smuzhiyun  * longrun_set_policy - sets a new CPUFreq policy
66*4882a593Smuzhiyun  * @policy: new policy
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  * Sets a new CPUFreq policy on LongRun-capable processors. This function
69*4882a593Smuzhiyun  * has to be called with cpufreq_driver locked.
70*4882a593Smuzhiyun  */
longrun_set_policy(struct cpufreq_policy * policy)71*4882a593Smuzhiyun static int longrun_set_policy(struct cpufreq_policy *policy)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	u32 msr_lo, msr_hi;
74*4882a593Smuzhiyun 	u32 pctg_lo, pctg_hi;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	if (!policy)
77*4882a593Smuzhiyun 		return -EINVAL;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	if (longrun_high_freq <= longrun_low_freq) {
80*4882a593Smuzhiyun 		/* Assume degenerate Longrun table */
81*4882a593Smuzhiyun 		pctg_lo = pctg_hi = 100;
82*4882a593Smuzhiyun 	} else {
83*4882a593Smuzhiyun 		pctg_lo = (policy->min - longrun_low_freq) /
84*4882a593Smuzhiyun 			((longrun_high_freq - longrun_low_freq) / 100);
85*4882a593Smuzhiyun 		pctg_hi = (policy->max - longrun_low_freq) /
86*4882a593Smuzhiyun 			((longrun_high_freq - longrun_low_freq) / 100);
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	if (pctg_hi > 100)
90*4882a593Smuzhiyun 		pctg_hi = 100;
91*4882a593Smuzhiyun 	if (pctg_lo > pctg_hi)
92*4882a593Smuzhiyun 		pctg_lo = pctg_hi;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/* performance or economy mode */
95*4882a593Smuzhiyun 	rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
96*4882a593Smuzhiyun 	msr_lo &= 0xFFFFFFFE;
97*4882a593Smuzhiyun 	switch (policy->policy) {
98*4882a593Smuzhiyun 	case CPUFREQ_POLICY_PERFORMANCE:
99*4882a593Smuzhiyun 		msr_lo |= 0x00000001;
100*4882a593Smuzhiyun 		break;
101*4882a593Smuzhiyun 	case CPUFREQ_POLICY_POWERSAVE:
102*4882a593Smuzhiyun 		break;
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 	wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/* lower and upper boundary */
107*4882a593Smuzhiyun 	rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
108*4882a593Smuzhiyun 	msr_lo &= 0xFFFFFF80;
109*4882a593Smuzhiyun 	msr_hi &= 0xFFFFFF80;
110*4882a593Smuzhiyun 	msr_lo |= pctg_lo;
111*4882a593Smuzhiyun 	msr_hi |= pctg_hi;
112*4882a593Smuzhiyun 	wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	return 0;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /**
119*4882a593Smuzhiyun  * longrun_verify_poliy - verifies a new CPUFreq policy
120*4882a593Smuzhiyun  * @policy: the policy to verify
121*4882a593Smuzhiyun  *
122*4882a593Smuzhiyun  * Validates a new CPUFreq policy. This function has to be called with
123*4882a593Smuzhiyun  * cpufreq_driver locked.
124*4882a593Smuzhiyun  */
longrun_verify_policy(struct cpufreq_policy_data * policy)125*4882a593Smuzhiyun static int longrun_verify_policy(struct cpufreq_policy_data *policy)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	if (!policy)
128*4882a593Smuzhiyun 		return -EINVAL;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	policy->cpu = 0;
131*4882a593Smuzhiyun 	cpufreq_verify_within_cpu_limits(policy);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	return 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
longrun_get(unsigned int cpu)136*4882a593Smuzhiyun static unsigned int longrun_get(unsigned int cpu)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	u32 eax, ebx, ecx, edx;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (cpu)
141*4882a593Smuzhiyun 		return 0;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
144*4882a593Smuzhiyun 	pr_debug("cpuid eax is %u\n", eax);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	return eax * 1000;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /**
150*4882a593Smuzhiyun  * longrun_determine_freqs - determines the lowest and highest possible core frequency
151*4882a593Smuzhiyun  * @low_freq: an int to put the lowest frequency into
152*4882a593Smuzhiyun  * @high_freq: an int to put the highest frequency into
153*4882a593Smuzhiyun  *
154*4882a593Smuzhiyun  * Determines the lowest and highest possible core frequencies on this CPU.
155*4882a593Smuzhiyun  * This is necessary to calculate the performance percentage according to
156*4882a593Smuzhiyun  * TMTA rules:
157*4882a593Smuzhiyun  * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
158*4882a593Smuzhiyun  */
longrun_determine_freqs(unsigned int * low_freq,unsigned int * high_freq)159*4882a593Smuzhiyun static int longrun_determine_freqs(unsigned int *low_freq,
160*4882a593Smuzhiyun 						      unsigned int *high_freq)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	u32 msr_lo, msr_hi;
163*4882a593Smuzhiyun 	u32 save_lo, save_hi;
164*4882a593Smuzhiyun 	u32 eax, ebx, ecx, edx;
165*4882a593Smuzhiyun 	u32 try_hi;
166*4882a593Smuzhiyun 	struct cpuinfo_x86 *c = &cpu_data(0);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (!low_freq || !high_freq)
169*4882a593Smuzhiyun 		return -EINVAL;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (cpu_has(c, X86_FEATURE_LRTI)) {
172*4882a593Smuzhiyun 		/* if the LongRun Table Interface is present, the
173*4882a593Smuzhiyun 		 * detection is a bit easier:
174*4882a593Smuzhiyun 		 * For minimum frequency, read out the maximum
175*4882a593Smuzhiyun 		 * level (msr_hi), write that into "currently
176*4882a593Smuzhiyun 		 * selected level", and read out the frequency.
177*4882a593Smuzhiyun 		 * For maximum frequency, read out level zero.
178*4882a593Smuzhiyun 		 */
179*4882a593Smuzhiyun 		/* minimum */
180*4882a593Smuzhiyun 		rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi);
181*4882a593Smuzhiyun 		wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi);
182*4882a593Smuzhiyun 		rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
183*4882a593Smuzhiyun 		*low_freq = msr_lo * 1000; /* to kHz */
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		/* maximum */
186*4882a593Smuzhiyun 		wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi);
187*4882a593Smuzhiyun 		rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
188*4882a593Smuzhiyun 		*high_freq = msr_lo * 1000; /* to kHz */
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		pr_debug("longrun table interface told %u - %u kHz\n",
191*4882a593Smuzhiyun 				*low_freq, *high_freq);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		if (*low_freq > *high_freq)
194*4882a593Smuzhiyun 			*low_freq = *high_freq;
195*4882a593Smuzhiyun 		return 0;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/* set the upper border to the value determined during TSC init */
199*4882a593Smuzhiyun 	*high_freq = (cpu_khz / 1000);
200*4882a593Smuzhiyun 	*high_freq = *high_freq * 1000;
201*4882a593Smuzhiyun 	pr_debug("high frequency is %u kHz\n", *high_freq);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* get current borders */
204*4882a593Smuzhiyun 	rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
205*4882a593Smuzhiyun 	save_lo = msr_lo & 0x0000007F;
206*4882a593Smuzhiyun 	save_hi = msr_hi & 0x0000007F;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	/* if current perf_pctg is larger than 90%, we need to decrease the
209*4882a593Smuzhiyun 	 * upper limit to make the calculation more accurate.
210*4882a593Smuzhiyun 	 */
211*4882a593Smuzhiyun 	cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
212*4882a593Smuzhiyun 	/* try decreasing in 10% steps, some processors react only
213*4882a593Smuzhiyun 	 * on some barrier values */
214*4882a593Smuzhiyun 	for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) {
215*4882a593Smuzhiyun 		/* set to 0 to try_hi perf_pctg */
216*4882a593Smuzhiyun 		msr_lo &= 0xFFFFFF80;
217*4882a593Smuzhiyun 		msr_hi &= 0xFFFFFF80;
218*4882a593Smuzhiyun 		msr_hi |= try_hi;
219*4882a593Smuzhiyun 		wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		/* read out current core MHz and current perf_pctg */
222*4882a593Smuzhiyun 		cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 		/* restore values */
225*4882a593Smuzhiyun 		wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi);
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 	pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
230*4882a593Smuzhiyun 	 * eqals
231*4882a593Smuzhiyun 	 * low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
232*4882a593Smuzhiyun 	 *
233*4882a593Smuzhiyun 	 * high_freq * perf_pctg is stored tempoarily into "ebx".
234*4882a593Smuzhiyun 	 */
235*4882a593Smuzhiyun 	ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if ((ecx > 95) || (ecx == 0) || (eax < ebx))
238*4882a593Smuzhiyun 		return -EIO;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	edx = ((eax - ebx) * 100) / (100 - ecx);
241*4882a593Smuzhiyun 	*low_freq = edx * 1000; /* back to kHz */
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	pr_debug("low frequency is %u kHz\n", *low_freq);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	if (*low_freq > *high_freq)
246*4882a593Smuzhiyun 		*low_freq = *high_freq;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	return 0;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 
longrun_cpu_init(struct cpufreq_policy * policy)252*4882a593Smuzhiyun static int longrun_cpu_init(struct cpufreq_policy *policy)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	int result = 0;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* capability check */
257*4882a593Smuzhiyun 	if (policy->cpu != 0)
258*4882a593Smuzhiyun 		return -ENODEV;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* detect low and high frequency */
261*4882a593Smuzhiyun 	result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq);
262*4882a593Smuzhiyun 	if (result)
263*4882a593Smuzhiyun 		return result;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/* cpuinfo and default policy values */
266*4882a593Smuzhiyun 	policy->cpuinfo.min_freq = longrun_low_freq;
267*4882a593Smuzhiyun 	policy->cpuinfo.max_freq = longrun_high_freq;
268*4882a593Smuzhiyun 	longrun_get_policy(policy);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	return 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun static struct cpufreq_driver longrun_driver = {
275*4882a593Smuzhiyun 	.flags		= CPUFREQ_CONST_LOOPS,
276*4882a593Smuzhiyun 	.verify		= longrun_verify_policy,
277*4882a593Smuzhiyun 	.setpolicy	= longrun_set_policy,
278*4882a593Smuzhiyun 	.get		= longrun_get,
279*4882a593Smuzhiyun 	.init		= longrun_cpu_init,
280*4882a593Smuzhiyun 	.name		= "longrun",
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun static const struct x86_cpu_id longrun_ids[] = {
284*4882a593Smuzhiyun 	X86_MATCH_VENDOR_FEATURE(TRANSMETA, X86_FEATURE_LONGRUN, NULL),
285*4882a593Smuzhiyun 	{}
286*4882a593Smuzhiyun };
287*4882a593Smuzhiyun MODULE_DEVICE_TABLE(x86cpu, longrun_ids);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /**
290*4882a593Smuzhiyun  * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
291*4882a593Smuzhiyun  *
292*4882a593Smuzhiyun  * Initializes the LongRun support.
293*4882a593Smuzhiyun  */
longrun_init(void)294*4882a593Smuzhiyun static int __init longrun_init(void)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	if (!x86_match_cpu(longrun_ids))
297*4882a593Smuzhiyun 		return -ENODEV;
298*4882a593Smuzhiyun 	return cpufreq_register_driver(&longrun_driver);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /**
303*4882a593Smuzhiyun  * longrun_exit - unregisters LongRun support
304*4882a593Smuzhiyun  */
longrun_exit(void)305*4882a593Smuzhiyun static void __exit longrun_exit(void)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	cpufreq_unregister_driver(&longrun_driver);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
312*4882a593Smuzhiyun MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and "
313*4882a593Smuzhiyun 		"Efficeon processors.");
314*4882a593Smuzhiyun MODULE_LICENSE("GPL");
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun module_init(longrun_init);
317*4882a593Smuzhiyun module_exit(longrun_exit);
318