xref: /OK3568_Linux_fs/kernel/drivers/cpufreq/powernow-k8.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *   (c) 2003-2012 Advanced Micro Devices, Inc.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Maintainer:
6*4882a593Smuzhiyun  *  Andreas Herrmann <herrmann.der.user@googlemail.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *  Based on the powernow-k7.c module written by Dave Jones.
9*4882a593Smuzhiyun  *  (C) 2003 Dave Jones on behalf of SuSE Labs
10*4882a593Smuzhiyun  *  (C) 2004 Dominik Brodowski <linux@brodo.de>
11*4882a593Smuzhiyun  *  (C) 2004 Pavel Machek <pavel@ucw.cz>
12*4882a593Smuzhiyun  *  Based upon datasheets & sample CPUs kindly provided by AMD.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *  Valuable input gratefully received from Dave Jones, Pavel Machek,
15*4882a593Smuzhiyun  *  Dominik Brodowski, Jacob Shin, and others.
16*4882a593Smuzhiyun  *  Originally developed by Paul Devriendt.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *  Processor information obtained from Chapter 9 (Power and Thermal
19*4882a593Smuzhiyun  *  Management) of the "BIOS and Kernel Developer's Guide (BKDG) for
20*4882a593Smuzhiyun  *  the AMD Athlon 64 and AMD Opteron Processors" and section "2.x
21*4882a593Smuzhiyun  *  Power Management" in BKDGs for newer AMD CPU families.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  *  Tables for specific CPUs can be inferred from AMD's processor
24*4882a593Smuzhiyun  *  power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf)
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <linux/kernel.h>
30*4882a593Smuzhiyun #include <linux/smp.h>
31*4882a593Smuzhiyun #include <linux/module.h>
32*4882a593Smuzhiyun #include <linux/init.h>
33*4882a593Smuzhiyun #include <linux/cpufreq.h>
34*4882a593Smuzhiyun #include <linux/slab.h>
35*4882a593Smuzhiyun #include <linux/string.h>
36*4882a593Smuzhiyun #include <linux/cpumask.h>
37*4882a593Smuzhiyun #include <linux/io.h>
38*4882a593Smuzhiyun #include <linux/delay.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include <asm/msr.h>
41*4882a593Smuzhiyun #include <asm/cpu_device_id.h>
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #include <linux/acpi.h>
44*4882a593Smuzhiyun #include <linux/mutex.h>
45*4882a593Smuzhiyun #include <acpi/processor.h>
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define VERSION "version 2.20.00"
48*4882a593Smuzhiyun #include "powernow-k8.h"
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* serialize freq changes  */
51*4882a593Smuzhiyun static DEFINE_MUTEX(fidvid_mutex);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun static struct cpufreq_driver cpufreq_amd64_driver;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* Return a frequency in MHz, given an input fid */
find_freq_from_fid(u32 fid)58*4882a593Smuzhiyun static u32 find_freq_from_fid(u32 fid)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	return 800 + (fid * 100);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Return a frequency in KHz, given an input fid */
find_khz_freq_from_fid(u32 fid)64*4882a593Smuzhiyun static u32 find_khz_freq_from_fid(u32 fid)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	return 1000 * find_freq_from_fid(fid);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* Return the vco fid for an input fid
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
72*4882a593Smuzhiyun  * only from corresponding high fids. This returns "high" fid corresponding to
73*4882a593Smuzhiyun  * "low" one.
74*4882a593Smuzhiyun  */
convert_fid_to_vco_fid(u32 fid)75*4882a593Smuzhiyun static u32 convert_fid_to_vco_fid(u32 fid)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	if (fid < HI_FID_TABLE_BOTTOM)
78*4882a593Smuzhiyun 		return 8 + (2 * fid);
79*4882a593Smuzhiyun 	else
80*4882a593Smuzhiyun 		return fid;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun  * Return 1 if the pending bit is set. Unless we just instructed the processor
85*4882a593Smuzhiyun  * to transition to a new state, seeing this bit set is really bad news.
86*4882a593Smuzhiyun  */
pending_bit_stuck(void)87*4882a593Smuzhiyun static int pending_bit_stuck(void)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	u32 lo, hi __always_unused;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	rdmsr(MSR_FIDVID_STATUS, lo, hi);
92*4882a593Smuzhiyun 	return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun  * Update the global current fid / vid values from the status msr.
97*4882a593Smuzhiyun  * Returns 1 on error.
98*4882a593Smuzhiyun  */
query_current_values_with_pending_wait(struct powernow_k8_data * data)99*4882a593Smuzhiyun static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	u32 lo, hi;
102*4882a593Smuzhiyun 	u32 i = 0;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	do {
105*4882a593Smuzhiyun 		if (i++ > 10000) {
106*4882a593Smuzhiyun 			pr_debug("detected change pending stuck\n");
107*4882a593Smuzhiyun 			return 1;
108*4882a593Smuzhiyun 		}
109*4882a593Smuzhiyun 		rdmsr(MSR_FIDVID_STATUS, lo, hi);
110*4882a593Smuzhiyun 	} while (lo & MSR_S_LO_CHANGE_PENDING);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	data->currvid = hi & MSR_S_HI_CURRENT_VID;
113*4882a593Smuzhiyun 	data->currfid = lo & MSR_S_LO_CURRENT_FID;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /* the isochronous relief time */
count_off_irt(struct powernow_k8_data * data)119*4882a593Smuzhiyun static void count_off_irt(struct powernow_k8_data *data)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	udelay((1 << data->irt) * 10);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /* the voltage stabilization time */
count_off_vst(struct powernow_k8_data * data)125*4882a593Smuzhiyun static void count_off_vst(struct powernow_k8_data *data)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	udelay(data->vstable * VST_UNITS_20US);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /* need to init the control msr to a safe value (for each cpu) */
fidvid_msr_init(void)131*4882a593Smuzhiyun static void fidvid_msr_init(void)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	u32 lo, hi;
134*4882a593Smuzhiyun 	u8 fid, vid;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	rdmsr(MSR_FIDVID_STATUS, lo, hi);
137*4882a593Smuzhiyun 	vid = hi & MSR_S_HI_CURRENT_VID;
138*4882a593Smuzhiyun 	fid = lo & MSR_S_LO_CURRENT_FID;
139*4882a593Smuzhiyun 	lo = fid | (vid << MSR_C_LO_VID_SHIFT);
140*4882a593Smuzhiyun 	hi = MSR_C_HI_STP_GNT_BENIGN;
141*4882a593Smuzhiyun 	pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi);
142*4882a593Smuzhiyun 	wrmsr(MSR_FIDVID_CTL, lo, hi);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /* write the new fid value along with the other control fields to the msr */
write_new_fid(struct powernow_k8_data * data,u32 fid)146*4882a593Smuzhiyun static int write_new_fid(struct powernow_k8_data *data, u32 fid)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	u32 lo;
149*4882a593Smuzhiyun 	u32 savevid = data->currvid;
150*4882a593Smuzhiyun 	u32 i = 0;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
153*4882a593Smuzhiyun 		pr_err("internal error - overflow on fid write\n");
154*4882a593Smuzhiyun 		return 1;
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	lo = fid;
158*4882a593Smuzhiyun 	lo |= (data->currvid << MSR_C_LO_VID_SHIFT);
159*4882a593Smuzhiyun 	lo |= MSR_C_LO_INIT_FID_VID;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
162*4882a593Smuzhiyun 		fid, lo, data->plllock * PLL_LOCK_CONVERSION);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	do {
165*4882a593Smuzhiyun 		wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
166*4882a593Smuzhiyun 		if (i++ > 100) {
167*4882a593Smuzhiyun 			pr_err("Hardware error - pending bit very stuck - no further pstate changes possible\n");
168*4882a593Smuzhiyun 			return 1;
169*4882a593Smuzhiyun 		}
170*4882a593Smuzhiyun 	} while (query_current_values_with_pending_wait(data));
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	count_off_irt(data);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (savevid != data->currvid) {
175*4882a593Smuzhiyun 		pr_err("vid change on fid trans, old 0x%x, new 0x%x\n",
176*4882a593Smuzhiyun 		       savevid, data->currvid);
177*4882a593Smuzhiyun 		return 1;
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (fid != data->currfid) {
181*4882a593Smuzhiyun 		pr_err("fid trans failed, fid 0x%x, curr 0x%x\n", fid,
182*4882a593Smuzhiyun 			data->currfid);
183*4882a593Smuzhiyun 		return 1;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	return 0;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /* Write a new vid to the hardware */
write_new_vid(struct powernow_k8_data * data,u32 vid)190*4882a593Smuzhiyun static int write_new_vid(struct powernow_k8_data *data, u32 vid)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	u32 lo;
193*4882a593Smuzhiyun 	u32 savefid = data->currfid;
194*4882a593Smuzhiyun 	int i = 0;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
197*4882a593Smuzhiyun 		pr_err("internal error - overflow on vid write\n");
198*4882a593Smuzhiyun 		return 1;
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	lo = data->currfid;
202*4882a593Smuzhiyun 	lo |= (vid << MSR_C_LO_VID_SHIFT);
203*4882a593Smuzhiyun 	lo |= MSR_C_LO_INIT_FID_VID;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
206*4882a593Smuzhiyun 		vid, lo, STOP_GRANT_5NS);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	do {
209*4882a593Smuzhiyun 		wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
210*4882a593Smuzhiyun 		if (i++ > 100) {
211*4882a593Smuzhiyun 			pr_err("internal error - pending bit very stuck - no further pstate changes possible\n");
212*4882a593Smuzhiyun 			return 1;
213*4882a593Smuzhiyun 		}
214*4882a593Smuzhiyun 	} while (query_current_values_with_pending_wait(data));
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (savefid != data->currfid) {
217*4882a593Smuzhiyun 		pr_err("fid changed on vid trans, old 0x%x new 0x%x\n",
218*4882a593Smuzhiyun 			savefid, data->currfid);
219*4882a593Smuzhiyun 		return 1;
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (vid != data->currvid) {
223*4882a593Smuzhiyun 		pr_err("vid trans failed, vid 0x%x, curr 0x%x\n",
224*4882a593Smuzhiyun 				vid, data->currvid);
225*4882a593Smuzhiyun 		return 1;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	return 0;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun  * Reduce the vid by the max of step or reqvid.
233*4882a593Smuzhiyun  * Decreasing vid codes represent increasing voltages:
234*4882a593Smuzhiyun  * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
235*4882a593Smuzhiyun  */
decrease_vid_code_by_step(struct powernow_k8_data * data,u32 reqvid,u32 step)236*4882a593Smuzhiyun static int decrease_vid_code_by_step(struct powernow_k8_data *data,
237*4882a593Smuzhiyun 		u32 reqvid, u32 step)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	if ((data->currvid - reqvid) > step)
240*4882a593Smuzhiyun 		reqvid = data->currvid - step;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (write_new_vid(data, reqvid))
243*4882a593Smuzhiyun 		return 1;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	count_off_vst(data);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
transition_fid_vid(struct powernow_k8_data * data,u32 reqfid,u32 reqvid)251*4882a593Smuzhiyun static int transition_fid_vid(struct powernow_k8_data *data,
252*4882a593Smuzhiyun 		u32 reqfid, u32 reqvid)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	if (core_voltage_pre_transition(data, reqvid, reqfid))
255*4882a593Smuzhiyun 		return 1;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (core_frequency_transition(data, reqfid))
258*4882a593Smuzhiyun 		return 1;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	if (core_voltage_post_transition(data, reqvid))
261*4882a593Smuzhiyun 		return 1;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (query_current_values_with_pending_wait(data))
264*4882a593Smuzhiyun 		return 1;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
267*4882a593Smuzhiyun 		pr_err("failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n",
268*4882a593Smuzhiyun 				smp_processor_id(),
269*4882a593Smuzhiyun 				reqfid, reqvid, data->currfid, data->currvid);
270*4882a593Smuzhiyun 		return 1;
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
274*4882a593Smuzhiyun 		smp_processor_id(), data->currfid, data->currvid);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return 0;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /* Phase 1 - core voltage transition ... setup voltage */
core_voltage_pre_transition(struct powernow_k8_data * data,u32 reqvid,u32 reqfid)280*4882a593Smuzhiyun static int core_voltage_pre_transition(struct powernow_k8_data *data,
281*4882a593Smuzhiyun 		u32 reqvid, u32 reqfid)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	u32 rvosteps = data->rvo;
284*4882a593Smuzhiyun 	u32 savefid = data->currfid;
285*4882a593Smuzhiyun 	u32 maxvid, lo __always_unused, rvomult = 1;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
288*4882a593Smuzhiyun 		smp_processor_id(),
289*4882a593Smuzhiyun 		data->currfid, data->currvid, reqvid, data->rvo);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP))
292*4882a593Smuzhiyun 		rvomult = 2;
293*4882a593Smuzhiyun 	rvosteps *= rvomult;
294*4882a593Smuzhiyun 	rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
295*4882a593Smuzhiyun 	maxvid = 0x1f & (maxvid >> 16);
296*4882a593Smuzhiyun 	pr_debug("ph1 maxvid=0x%x\n", maxvid);
297*4882a593Smuzhiyun 	if (reqvid < maxvid) /* lower numbers are higher voltages */
298*4882a593Smuzhiyun 		reqvid = maxvid;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	while (data->currvid > reqvid) {
301*4882a593Smuzhiyun 		pr_debug("ph1: curr 0x%x, req vid 0x%x\n",
302*4882a593Smuzhiyun 			data->currvid, reqvid);
303*4882a593Smuzhiyun 		if (decrease_vid_code_by_step(data, reqvid, data->vidmvs))
304*4882a593Smuzhiyun 			return 1;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	while ((rvosteps > 0) &&
308*4882a593Smuzhiyun 			((rvomult * data->rvo + data->currvid) > reqvid)) {
309*4882a593Smuzhiyun 		if (data->currvid == maxvid) {
310*4882a593Smuzhiyun 			rvosteps = 0;
311*4882a593Smuzhiyun 		} else {
312*4882a593Smuzhiyun 			pr_debug("ph1: changing vid for rvo, req 0x%x\n",
313*4882a593Smuzhiyun 				data->currvid - 1);
314*4882a593Smuzhiyun 			if (decrease_vid_code_by_step(data, data->currvid-1, 1))
315*4882a593Smuzhiyun 				return 1;
316*4882a593Smuzhiyun 			rvosteps--;
317*4882a593Smuzhiyun 		}
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (query_current_values_with_pending_wait(data))
321*4882a593Smuzhiyun 		return 1;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	if (savefid != data->currfid) {
324*4882a593Smuzhiyun 		pr_err("ph1 err, currfid changed 0x%x\n", data->currfid);
325*4882a593Smuzhiyun 		return 1;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n",
329*4882a593Smuzhiyun 		data->currfid, data->currvid);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	return 0;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /* Phase 2 - core frequency transition */
core_frequency_transition(struct powernow_k8_data * data,u32 reqfid)335*4882a593Smuzhiyun static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	u32 vcoreqfid, vcocurrfid, vcofiddiff;
338*4882a593Smuzhiyun 	u32 fid_interval, savevid = data->currvid;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	if (data->currfid == reqfid) {
341*4882a593Smuzhiyun 		pr_err("ph2 null fid transition 0x%x\n", data->currfid);
342*4882a593Smuzhiyun 		return 0;
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n",
346*4882a593Smuzhiyun 		smp_processor_id(),
347*4882a593Smuzhiyun 		data->currfid, data->currvid, reqfid);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	vcoreqfid = convert_fid_to_vco_fid(reqfid);
350*4882a593Smuzhiyun 	vcocurrfid = convert_fid_to_vco_fid(data->currfid);
351*4882a593Smuzhiyun 	vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
352*4882a593Smuzhiyun 	    : vcoreqfid - vcocurrfid;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP))
355*4882a593Smuzhiyun 		vcofiddiff = 0;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	while (vcofiddiff > 2) {
358*4882a593Smuzhiyun 		(data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 		if (reqfid > data->currfid) {
361*4882a593Smuzhiyun 			if (data->currfid > LO_FID_TABLE_TOP) {
362*4882a593Smuzhiyun 				if (write_new_fid(data,
363*4882a593Smuzhiyun 						data->currfid + fid_interval))
364*4882a593Smuzhiyun 					return 1;
365*4882a593Smuzhiyun 			} else {
366*4882a593Smuzhiyun 				if (write_new_fid
367*4882a593Smuzhiyun 				    (data,
368*4882a593Smuzhiyun 				     2 + convert_fid_to_vco_fid(data->currfid)))
369*4882a593Smuzhiyun 					return 1;
370*4882a593Smuzhiyun 			}
371*4882a593Smuzhiyun 		} else {
372*4882a593Smuzhiyun 			if (write_new_fid(data, data->currfid - fid_interval))
373*4882a593Smuzhiyun 				return 1;
374*4882a593Smuzhiyun 		}
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 		vcocurrfid = convert_fid_to_vco_fid(data->currfid);
377*4882a593Smuzhiyun 		vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
378*4882a593Smuzhiyun 		    : vcoreqfid - vcocurrfid;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	if (write_new_fid(data, reqfid))
382*4882a593Smuzhiyun 		return 1;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (query_current_values_with_pending_wait(data))
385*4882a593Smuzhiyun 		return 1;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	if (data->currfid != reqfid) {
388*4882a593Smuzhiyun 		pr_err("ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n",
389*4882a593Smuzhiyun 			data->currfid, reqfid);
390*4882a593Smuzhiyun 		return 1;
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	if (savevid != data->currvid) {
394*4882a593Smuzhiyun 		pr_err("ph2: vid changed, save 0x%x, curr 0x%x\n",
395*4882a593Smuzhiyun 			savevid, data->currvid);
396*4882a593Smuzhiyun 		return 1;
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n",
400*4882a593Smuzhiyun 		data->currfid, data->currvid);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	return 0;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /* Phase 3 - core voltage transition flow ... jump to the final vid. */
core_voltage_post_transition(struct powernow_k8_data * data,u32 reqvid)406*4882a593Smuzhiyun static int core_voltage_post_transition(struct powernow_k8_data *data,
407*4882a593Smuzhiyun 		u32 reqvid)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	u32 savefid = data->currfid;
410*4882a593Smuzhiyun 	u32 savereqvid = reqvid;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
413*4882a593Smuzhiyun 		smp_processor_id(),
414*4882a593Smuzhiyun 		data->currfid, data->currvid);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	if (reqvid != data->currvid) {
417*4882a593Smuzhiyun 		if (write_new_vid(data, reqvid))
418*4882a593Smuzhiyun 			return 1;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 		if (savefid != data->currfid) {
421*4882a593Smuzhiyun 			pr_err("ph3: bad fid change, save 0x%x, curr 0x%x\n",
422*4882a593Smuzhiyun 				savefid, data->currfid);
423*4882a593Smuzhiyun 			return 1;
424*4882a593Smuzhiyun 		}
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 		if (data->currvid != reqvid) {
427*4882a593Smuzhiyun 			pr_err("ph3: failed vid transition\n, req 0x%x, curr 0x%x",
428*4882a593Smuzhiyun 				reqvid, data->currvid);
429*4882a593Smuzhiyun 			return 1;
430*4882a593Smuzhiyun 		}
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (query_current_values_with_pending_wait(data))
434*4882a593Smuzhiyun 		return 1;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	if (savereqvid != data->currvid) {
437*4882a593Smuzhiyun 		pr_debug("ph3 failed, currvid 0x%x\n", data->currvid);
438*4882a593Smuzhiyun 		return 1;
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	if (savefid != data->currfid) {
442*4882a593Smuzhiyun 		pr_debug("ph3 failed, currfid changed 0x%x\n",
443*4882a593Smuzhiyun 			data->currfid);
444*4882a593Smuzhiyun 		return 1;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n",
448*4882a593Smuzhiyun 		data->currfid, data->currvid);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	return 0;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun static const struct x86_cpu_id powernow_k8_ids[] = {
454*4882a593Smuzhiyun 	/* IO based frequency switching */
455*4882a593Smuzhiyun 	X86_MATCH_VENDOR_FAM(AMD, 0xf, NULL),
456*4882a593Smuzhiyun 	{}
457*4882a593Smuzhiyun };
458*4882a593Smuzhiyun MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
459*4882a593Smuzhiyun 
check_supported_cpu(void * _rc)460*4882a593Smuzhiyun static void check_supported_cpu(void *_rc)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	u32 eax, ebx, ecx, edx;
463*4882a593Smuzhiyun 	int *rc = _rc;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	*rc = -ENODEV;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
470*4882a593Smuzhiyun 		if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
471*4882a593Smuzhiyun 		    ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
472*4882a593Smuzhiyun 			pr_info("Processor cpuid %x not supported\n", eax);
473*4882a593Smuzhiyun 			return;
474*4882a593Smuzhiyun 		}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 		eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
477*4882a593Smuzhiyun 		if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
478*4882a593Smuzhiyun 			pr_info("No frequency change capabilities detected\n");
479*4882a593Smuzhiyun 			return;
480*4882a593Smuzhiyun 		}
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 		cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
483*4882a593Smuzhiyun 		if ((edx & P_STATE_TRANSITION_CAPABLE)
484*4882a593Smuzhiyun 			!= P_STATE_TRANSITION_CAPABLE) {
485*4882a593Smuzhiyun 			pr_info("Power state transitions not supported\n");
486*4882a593Smuzhiyun 			return;
487*4882a593Smuzhiyun 		}
488*4882a593Smuzhiyun 		*rc = 0;
489*4882a593Smuzhiyun 	}
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
check_pst_table(struct powernow_k8_data * data,struct pst_s * pst,u8 maxvid)492*4882a593Smuzhiyun static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
493*4882a593Smuzhiyun 		u8 maxvid)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	unsigned int j;
496*4882a593Smuzhiyun 	u8 lastfid = 0xff;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	for (j = 0; j < data->numps; j++) {
499*4882a593Smuzhiyun 		if (pst[j].vid > LEAST_VID) {
500*4882a593Smuzhiyun 			pr_err(FW_BUG "vid %d invalid : 0x%x\n", j,
501*4882a593Smuzhiyun 				pst[j].vid);
502*4882a593Smuzhiyun 			return -EINVAL;
503*4882a593Smuzhiyun 		}
504*4882a593Smuzhiyun 		if (pst[j].vid < data->rvo) {
505*4882a593Smuzhiyun 			/* vid + rvo >= 0 */
506*4882a593Smuzhiyun 			pr_err(FW_BUG "0 vid exceeded with pstate %d\n", j);
507*4882a593Smuzhiyun 			return -ENODEV;
508*4882a593Smuzhiyun 		}
509*4882a593Smuzhiyun 		if (pst[j].vid < maxvid + data->rvo) {
510*4882a593Smuzhiyun 			/* vid + rvo >= maxvid */
511*4882a593Smuzhiyun 			pr_err(FW_BUG "maxvid exceeded with pstate %d\n", j);
512*4882a593Smuzhiyun 			return -ENODEV;
513*4882a593Smuzhiyun 		}
514*4882a593Smuzhiyun 		if (pst[j].fid > MAX_FID) {
515*4882a593Smuzhiyun 			pr_err(FW_BUG "maxfid exceeded with pstate %d\n", j);
516*4882a593Smuzhiyun 			return -ENODEV;
517*4882a593Smuzhiyun 		}
518*4882a593Smuzhiyun 		if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) {
519*4882a593Smuzhiyun 			/* Only first fid is allowed to be in "low" range */
520*4882a593Smuzhiyun 			pr_err(FW_BUG "two low fids - %d : 0x%x\n", j,
521*4882a593Smuzhiyun 				pst[j].fid);
522*4882a593Smuzhiyun 			return -EINVAL;
523*4882a593Smuzhiyun 		}
524*4882a593Smuzhiyun 		if (pst[j].fid < lastfid)
525*4882a593Smuzhiyun 			lastfid = pst[j].fid;
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 	if (lastfid & 1) {
528*4882a593Smuzhiyun 		pr_err(FW_BUG "lastfid invalid\n");
529*4882a593Smuzhiyun 		return -EINVAL;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 	if (lastfid > LO_FID_TABLE_TOP)
532*4882a593Smuzhiyun 		pr_info(FW_BUG "first fid not from lo freq table\n");
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	return 0;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
invalidate_entry(struct cpufreq_frequency_table * powernow_table,unsigned int entry)537*4882a593Smuzhiyun static void invalidate_entry(struct cpufreq_frequency_table *powernow_table,
538*4882a593Smuzhiyun 		unsigned int entry)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
print_basics(struct powernow_k8_data * data)543*4882a593Smuzhiyun static void print_basics(struct powernow_k8_data *data)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	int j;
546*4882a593Smuzhiyun 	for (j = 0; j < data->numps; j++) {
547*4882a593Smuzhiyun 		if (data->powernow_table[j].frequency !=
548*4882a593Smuzhiyun 				CPUFREQ_ENTRY_INVALID) {
549*4882a593Smuzhiyun 			pr_info("fid 0x%x (%d MHz), vid 0x%x\n",
550*4882a593Smuzhiyun 				data->powernow_table[j].driver_data & 0xff,
551*4882a593Smuzhiyun 				data->powernow_table[j].frequency/1000,
552*4882a593Smuzhiyun 				data->powernow_table[j].driver_data >> 8);
553*4882a593Smuzhiyun 		}
554*4882a593Smuzhiyun 	}
555*4882a593Smuzhiyun 	if (data->batps)
556*4882a593Smuzhiyun 		pr_info("Only %d pstates on battery\n", data->batps);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
fill_powernow_table(struct powernow_k8_data * data,struct pst_s * pst,u8 maxvid)559*4882a593Smuzhiyun static int fill_powernow_table(struct powernow_k8_data *data,
560*4882a593Smuzhiyun 		struct pst_s *pst, u8 maxvid)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct cpufreq_frequency_table *powernow_table;
563*4882a593Smuzhiyun 	unsigned int j;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (data->batps) {
566*4882a593Smuzhiyun 		/* use ACPI support to get full speed on mains power */
567*4882a593Smuzhiyun 		pr_warn("Only %d pstates usable (use ACPI driver for full range\n",
568*4882a593Smuzhiyun 			data->batps);
569*4882a593Smuzhiyun 		data->numps = data->batps;
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	for (j = 1; j < data->numps; j++) {
573*4882a593Smuzhiyun 		if (pst[j-1].fid >= pst[j].fid) {
574*4882a593Smuzhiyun 			pr_err("PST out of sequence\n");
575*4882a593Smuzhiyun 			return -EINVAL;
576*4882a593Smuzhiyun 		}
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	if (data->numps < 2) {
580*4882a593Smuzhiyun 		pr_err("no p states to transition\n");
581*4882a593Smuzhiyun 		return -ENODEV;
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (check_pst_table(data, pst, maxvid))
585*4882a593Smuzhiyun 		return -EINVAL;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	powernow_table = kzalloc((sizeof(*powernow_table)
588*4882a593Smuzhiyun 		* (data->numps + 1)), GFP_KERNEL);
589*4882a593Smuzhiyun 	if (!powernow_table)
590*4882a593Smuzhiyun 		return -ENOMEM;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	for (j = 0; j < data->numps; j++) {
593*4882a593Smuzhiyun 		int freq;
594*4882a593Smuzhiyun 		powernow_table[j].driver_data = pst[j].fid; /* lower 8 bits */
595*4882a593Smuzhiyun 		powernow_table[j].driver_data |= (pst[j].vid << 8); /* upper 8 bits */
596*4882a593Smuzhiyun 		freq = find_khz_freq_from_fid(pst[j].fid);
597*4882a593Smuzhiyun 		powernow_table[j].frequency = freq;
598*4882a593Smuzhiyun 	}
599*4882a593Smuzhiyun 	powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
600*4882a593Smuzhiyun 	powernow_table[data->numps].driver_data = 0;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (query_current_values_with_pending_wait(data)) {
603*4882a593Smuzhiyun 		kfree(powernow_table);
604*4882a593Smuzhiyun 		return -EIO;
605*4882a593Smuzhiyun 	}
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
608*4882a593Smuzhiyun 	data->powernow_table = powernow_table;
609*4882a593Smuzhiyun 	if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
610*4882a593Smuzhiyun 		print_basics(data);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	for (j = 0; j < data->numps; j++)
613*4882a593Smuzhiyun 		if ((pst[j].fid == data->currfid) &&
614*4882a593Smuzhiyun 		    (pst[j].vid == data->currvid))
615*4882a593Smuzhiyun 			return 0;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	pr_debug("currfid/vid do not match PST, ignoring\n");
618*4882a593Smuzhiyun 	return 0;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun /* Find and validate the PSB/PST table in BIOS. */
find_psb_table(struct powernow_k8_data * data)622*4882a593Smuzhiyun static int find_psb_table(struct powernow_k8_data *data)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun 	struct psb_s *psb;
625*4882a593Smuzhiyun 	unsigned int i;
626*4882a593Smuzhiyun 	u32 mvs;
627*4882a593Smuzhiyun 	u8 maxvid;
628*4882a593Smuzhiyun 	u32 cpst = 0;
629*4882a593Smuzhiyun 	u32 thiscpuid;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	for (i = 0xc0000; i < 0xffff0; i += 0x10) {
632*4882a593Smuzhiyun 		/* Scan BIOS looking for the signature. */
633*4882a593Smuzhiyun 		/* It can not be at ffff0 - it is too big. */
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		psb = phys_to_virt(i);
636*4882a593Smuzhiyun 		if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0)
637*4882a593Smuzhiyun 			continue;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 		pr_debug("found PSB header at 0x%p\n", psb);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 		pr_debug("table vers: 0x%x\n", psb->tableversion);
642*4882a593Smuzhiyun 		if (psb->tableversion != PSB_VERSION_1_4) {
643*4882a593Smuzhiyun 			pr_err(FW_BUG "PSB table is not v1.4\n");
644*4882a593Smuzhiyun 			return -ENODEV;
645*4882a593Smuzhiyun 		}
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 		pr_debug("flags: 0x%x\n", psb->flags1);
648*4882a593Smuzhiyun 		if (psb->flags1) {
649*4882a593Smuzhiyun 			pr_err(FW_BUG "unknown flags\n");
650*4882a593Smuzhiyun 			return -ENODEV;
651*4882a593Smuzhiyun 		}
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 		data->vstable = psb->vstable;
654*4882a593Smuzhiyun 		pr_debug("voltage stabilization time: %d(*20us)\n",
655*4882a593Smuzhiyun 				data->vstable);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 		pr_debug("flags2: 0x%x\n", psb->flags2);
658*4882a593Smuzhiyun 		data->rvo = psb->flags2 & 3;
659*4882a593Smuzhiyun 		data->irt = ((psb->flags2) >> 2) & 3;
660*4882a593Smuzhiyun 		mvs = ((psb->flags2) >> 4) & 3;
661*4882a593Smuzhiyun 		data->vidmvs = 1 << mvs;
662*4882a593Smuzhiyun 		data->batps = ((psb->flags2) >> 6) & 3;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 		pr_debug("ramp voltage offset: %d\n", data->rvo);
665*4882a593Smuzhiyun 		pr_debug("isochronous relief time: %d\n", data->irt);
666*4882a593Smuzhiyun 		pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 		pr_debug("numpst: 0x%x\n", psb->num_tables);
669*4882a593Smuzhiyun 		cpst = psb->num_tables;
670*4882a593Smuzhiyun 		if ((psb->cpuid == 0x00000fc0) ||
671*4882a593Smuzhiyun 		    (psb->cpuid == 0x00000fe0)) {
672*4882a593Smuzhiyun 			thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
673*4882a593Smuzhiyun 			if ((thiscpuid == 0x00000fc0) ||
674*4882a593Smuzhiyun 			    (thiscpuid == 0x00000fe0))
675*4882a593Smuzhiyun 				cpst = 1;
676*4882a593Smuzhiyun 		}
677*4882a593Smuzhiyun 		if (cpst != 1) {
678*4882a593Smuzhiyun 			pr_err(FW_BUG "numpst must be 1\n");
679*4882a593Smuzhiyun 			return -ENODEV;
680*4882a593Smuzhiyun 		}
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 		data->plllock = psb->plllocktime;
683*4882a593Smuzhiyun 		pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime);
684*4882a593Smuzhiyun 		pr_debug("maxfid: 0x%x\n", psb->maxfid);
685*4882a593Smuzhiyun 		pr_debug("maxvid: 0x%x\n", psb->maxvid);
686*4882a593Smuzhiyun 		maxvid = psb->maxvid;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 		data->numps = psb->numps;
689*4882a593Smuzhiyun 		pr_debug("numpstates: 0x%x\n", data->numps);
690*4882a593Smuzhiyun 		return fill_powernow_table(data,
691*4882a593Smuzhiyun 				(struct pst_s *)(psb+1), maxvid);
692*4882a593Smuzhiyun 	}
693*4882a593Smuzhiyun 	/*
694*4882a593Smuzhiyun 	 * If you see this message, complain to BIOS manufacturer. If
695*4882a593Smuzhiyun 	 * he tells you "we do not support Linux" or some similar
696*4882a593Smuzhiyun 	 * nonsense, remember that Windows 2000 uses the same legacy
697*4882a593Smuzhiyun 	 * mechanism that the old Linux PSB driver uses. Tell them it
698*4882a593Smuzhiyun 	 * is broken with Windows 2000.
699*4882a593Smuzhiyun 	 *
700*4882a593Smuzhiyun 	 * The reference to the AMD documentation is chapter 9 in the
701*4882a593Smuzhiyun 	 * BIOS and Kernel Developer's Guide, which is available on
702*4882a593Smuzhiyun 	 * www.amd.com
703*4882a593Smuzhiyun 	 */
704*4882a593Smuzhiyun 	pr_err(FW_BUG "No PSB or ACPI _PSS objects\n");
705*4882a593Smuzhiyun 	pr_err("Make sure that your BIOS is up to date and Cool'N'Quiet support is enabled in BIOS setup\n");
706*4882a593Smuzhiyun 	return -ENODEV;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun 
powernow_k8_acpi_pst_values(struct powernow_k8_data * data,unsigned int index)709*4882a593Smuzhiyun static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
710*4882a593Smuzhiyun 		unsigned int index)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun 	u64 control;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	if (!data->acpi_data.state_count)
715*4882a593Smuzhiyun 		return;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	control = data->acpi_data.states[index].control;
718*4882a593Smuzhiyun 	data->irt = (control >> IRT_SHIFT) & IRT_MASK;
719*4882a593Smuzhiyun 	data->rvo = (control >> RVO_SHIFT) & RVO_MASK;
720*4882a593Smuzhiyun 	data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
721*4882a593Smuzhiyun 	data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK;
722*4882a593Smuzhiyun 	data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK);
723*4882a593Smuzhiyun 	data->vstable = (control >> VST_SHIFT) & VST_MASK;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun 
powernow_k8_cpu_init_acpi(struct powernow_k8_data * data)726*4882a593Smuzhiyun static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	struct cpufreq_frequency_table *powernow_table;
729*4882a593Smuzhiyun 	int ret_val = -ENODEV;
730*4882a593Smuzhiyun 	u64 control, status;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
733*4882a593Smuzhiyun 		pr_debug("register performance failed: bad ACPI data\n");
734*4882a593Smuzhiyun 		return -EIO;
735*4882a593Smuzhiyun 	}
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	/* verify the data contained in the ACPI structures */
738*4882a593Smuzhiyun 	if (data->acpi_data.state_count <= 1) {
739*4882a593Smuzhiyun 		pr_debug("No ACPI P-States\n");
740*4882a593Smuzhiyun 		goto err_out;
741*4882a593Smuzhiyun 	}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	control = data->acpi_data.control_register.space_id;
744*4882a593Smuzhiyun 	status = data->acpi_data.status_register.space_id;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
747*4882a593Smuzhiyun 	    (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
748*4882a593Smuzhiyun 		pr_debug("Invalid control/status registers (%llx - %llx)\n",
749*4882a593Smuzhiyun 			control, status);
750*4882a593Smuzhiyun 		goto err_out;
751*4882a593Smuzhiyun 	}
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	/* fill in data->powernow_table */
754*4882a593Smuzhiyun 	powernow_table = kzalloc((sizeof(*powernow_table)
755*4882a593Smuzhiyun 		* (data->acpi_data.state_count + 1)), GFP_KERNEL);
756*4882a593Smuzhiyun 	if (!powernow_table)
757*4882a593Smuzhiyun 		goto err_out;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	/* fill in data */
760*4882a593Smuzhiyun 	data->numps = data->acpi_data.state_count;
761*4882a593Smuzhiyun 	powernow_k8_acpi_pst_values(data, 0);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	ret_val = fill_powernow_table_fidvid(data, powernow_table);
764*4882a593Smuzhiyun 	if (ret_val)
765*4882a593Smuzhiyun 		goto err_out_mem;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	powernow_table[data->acpi_data.state_count].frequency =
768*4882a593Smuzhiyun 		CPUFREQ_TABLE_END;
769*4882a593Smuzhiyun 	data->powernow_table = powernow_table;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
772*4882a593Smuzhiyun 		print_basics(data);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	/* notify BIOS that we exist */
775*4882a593Smuzhiyun 	acpi_processor_notify_smm(THIS_MODULE);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
778*4882a593Smuzhiyun 		pr_err("unable to alloc powernow_k8_data cpumask\n");
779*4882a593Smuzhiyun 		ret_val = -ENOMEM;
780*4882a593Smuzhiyun 		goto err_out_mem;
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	return 0;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun err_out_mem:
786*4882a593Smuzhiyun 	kfree(powernow_table);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun err_out:
789*4882a593Smuzhiyun 	acpi_processor_unregister_performance(data->cpu);
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	/* data->acpi_data.state_count informs us at ->exit()
792*4882a593Smuzhiyun 	 * whether ACPI was used */
793*4882a593Smuzhiyun 	data->acpi_data.state_count = 0;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	return ret_val;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun 
fill_powernow_table_fidvid(struct powernow_k8_data * data,struct cpufreq_frequency_table * powernow_table)798*4882a593Smuzhiyun static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
799*4882a593Smuzhiyun 		struct cpufreq_frequency_table *powernow_table)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun 	int i;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	for (i = 0; i < data->acpi_data.state_count; i++) {
804*4882a593Smuzhiyun 		u32 fid;
805*4882a593Smuzhiyun 		u32 vid;
806*4882a593Smuzhiyun 		u32 freq, index;
807*4882a593Smuzhiyun 		u64 status, control;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 		if (data->exttype) {
810*4882a593Smuzhiyun 			status =  data->acpi_data.states[i].status;
811*4882a593Smuzhiyun 			fid = status & EXT_FID_MASK;
812*4882a593Smuzhiyun 			vid = (status >> VID_SHIFT) & EXT_VID_MASK;
813*4882a593Smuzhiyun 		} else {
814*4882a593Smuzhiyun 			control =  data->acpi_data.states[i].control;
815*4882a593Smuzhiyun 			fid = control & FID_MASK;
816*4882a593Smuzhiyun 			vid = (control >> VID_SHIFT) & VID_MASK;
817*4882a593Smuzhiyun 		}
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 		pr_debug("   %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 		index = fid | (vid<<8);
822*4882a593Smuzhiyun 		powernow_table[i].driver_data = index;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 		freq = find_khz_freq_from_fid(fid);
825*4882a593Smuzhiyun 		powernow_table[i].frequency = freq;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		/* verify frequency is OK */
828*4882a593Smuzhiyun 		if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) {
829*4882a593Smuzhiyun 			pr_debug("invalid freq %u kHz, ignoring\n", freq);
830*4882a593Smuzhiyun 			invalidate_entry(powernow_table, i);
831*4882a593Smuzhiyun 			continue;
832*4882a593Smuzhiyun 		}
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 		/* verify voltage is OK -
835*4882a593Smuzhiyun 		 * BIOSs are using "off" to indicate invalid */
836*4882a593Smuzhiyun 		if (vid == VID_OFF) {
837*4882a593Smuzhiyun 			pr_debug("invalid vid %u, ignoring\n", vid);
838*4882a593Smuzhiyun 			invalidate_entry(powernow_table, i);
839*4882a593Smuzhiyun 			continue;
840*4882a593Smuzhiyun 		}
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 		if (freq != (data->acpi_data.states[i].core_frequency * 1000)) {
843*4882a593Smuzhiyun 			pr_info("invalid freq entries %u kHz vs. %u kHz\n",
844*4882a593Smuzhiyun 				freq, (unsigned int)
845*4882a593Smuzhiyun 				(data->acpi_data.states[i].core_frequency
846*4882a593Smuzhiyun 				 * 1000));
847*4882a593Smuzhiyun 			invalidate_entry(powernow_table, i);
848*4882a593Smuzhiyun 			continue;
849*4882a593Smuzhiyun 		}
850*4882a593Smuzhiyun 	}
851*4882a593Smuzhiyun 	return 0;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun 
powernow_k8_cpu_exit_acpi(struct powernow_k8_data * data)854*4882a593Smuzhiyun static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun 	if (data->acpi_data.state_count)
857*4882a593Smuzhiyun 		acpi_processor_unregister_performance(data->cpu);
858*4882a593Smuzhiyun 	free_cpumask_var(data->acpi_data.shared_cpu_map);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
get_transition_latency(struct powernow_k8_data * data)861*4882a593Smuzhiyun static int get_transition_latency(struct powernow_k8_data *data)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	int max_latency = 0;
864*4882a593Smuzhiyun 	int i;
865*4882a593Smuzhiyun 	for (i = 0; i < data->acpi_data.state_count; i++) {
866*4882a593Smuzhiyun 		int cur_latency = data->acpi_data.states[i].transition_latency
867*4882a593Smuzhiyun 			+ data->acpi_data.states[i].bus_master_latency;
868*4882a593Smuzhiyun 		if (cur_latency > max_latency)
869*4882a593Smuzhiyun 			max_latency = cur_latency;
870*4882a593Smuzhiyun 	}
871*4882a593Smuzhiyun 	if (max_latency == 0) {
872*4882a593Smuzhiyun 		pr_err(FW_WARN "Invalid zero transition latency\n");
873*4882a593Smuzhiyun 		max_latency = 1;
874*4882a593Smuzhiyun 	}
875*4882a593Smuzhiyun 	/* value in usecs, needs to be in nanoseconds */
876*4882a593Smuzhiyun 	return 1000 * max_latency;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun /* Take a frequency, and issue the fid/vid transition command */
transition_frequency_fidvid(struct powernow_k8_data * data,unsigned int index,struct cpufreq_policy * policy)880*4882a593Smuzhiyun static int transition_frequency_fidvid(struct powernow_k8_data *data,
881*4882a593Smuzhiyun 		unsigned int index,
882*4882a593Smuzhiyun 		struct cpufreq_policy *policy)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun 	u32 fid = 0;
885*4882a593Smuzhiyun 	u32 vid = 0;
886*4882a593Smuzhiyun 	int res;
887*4882a593Smuzhiyun 	struct cpufreq_freqs freqs;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	/* fid/vid correctness check for k8 */
892*4882a593Smuzhiyun 	/* fid are the lower 8 bits of the index we stored into
893*4882a593Smuzhiyun 	 * the cpufreq frequency table in find_psb_table, vid
894*4882a593Smuzhiyun 	 * are the upper 8 bits.
895*4882a593Smuzhiyun 	 */
896*4882a593Smuzhiyun 	fid = data->powernow_table[index].driver_data & 0xFF;
897*4882a593Smuzhiyun 	vid = (data->powernow_table[index].driver_data & 0xFF00) >> 8;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	if (query_current_values_with_pending_wait(data))
902*4882a593Smuzhiyun 		return 1;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if ((data->currvid == vid) && (data->currfid == fid)) {
905*4882a593Smuzhiyun 		pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n",
906*4882a593Smuzhiyun 			fid, vid);
907*4882a593Smuzhiyun 		return 0;
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n",
911*4882a593Smuzhiyun 		smp_processor_id(), fid, vid);
912*4882a593Smuzhiyun 	freqs.old = find_khz_freq_from_fid(data->currfid);
913*4882a593Smuzhiyun 	freqs.new = find_khz_freq_from_fid(fid);
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	cpufreq_freq_transition_begin(policy, &freqs);
916*4882a593Smuzhiyun 	res = transition_fid_vid(data, fid, vid);
917*4882a593Smuzhiyun 	cpufreq_freq_transition_end(policy, &freqs, res);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	return res;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun struct powernowk8_target_arg {
923*4882a593Smuzhiyun 	struct cpufreq_policy		*pol;
924*4882a593Smuzhiyun 	unsigned			newstate;
925*4882a593Smuzhiyun };
926*4882a593Smuzhiyun 
powernowk8_target_fn(void * arg)927*4882a593Smuzhiyun static long powernowk8_target_fn(void *arg)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun 	struct powernowk8_target_arg *pta = arg;
930*4882a593Smuzhiyun 	struct cpufreq_policy *pol = pta->pol;
931*4882a593Smuzhiyun 	unsigned newstate = pta->newstate;
932*4882a593Smuzhiyun 	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
933*4882a593Smuzhiyun 	u32 checkfid;
934*4882a593Smuzhiyun 	u32 checkvid;
935*4882a593Smuzhiyun 	int ret;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	if (!data)
938*4882a593Smuzhiyun 		return -EINVAL;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	checkfid = data->currfid;
941*4882a593Smuzhiyun 	checkvid = data->currvid;
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	if (pending_bit_stuck()) {
944*4882a593Smuzhiyun 		pr_err("failing targ, change pending bit set\n");
945*4882a593Smuzhiyun 		return -EIO;
946*4882a593Smuzhiyun 	}
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n",
949*4882a593Smuzhiyun 		pol->cpu, data->powernow_table[newstate].frequency, pol->min,
950*4882a593Smuzhiyun 		pol->max);
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	if (query_current_values_with_pending_wait(data))
953*4882a593Smuzhiyun 		return -EIO;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
956*4882a593Smuzhiyun 		data->currfid, data->currvid);
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	if ((checkvid != data->currvid) ||
959*4882a593Smuzhiyun 	    (checkfid != data->currfid)) {
960*4882a593Smuzhiyun 		pr_info("error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
961*4882a593Smuzhiyun 		       checkfid, data->currfid,
962*4882a593Smuzhiyun 		       checkvid, data->currvid);
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	mutex_lock(&fidvid_mutex);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	powernow_k8_acpi_pst_values(data, newstate);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	ret = transition_frequency_fidvid(data, newstate, pol);
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	if (ret) {
972*4882a593Smuzhiyun 		pr_err("transition frequency failed\n");
973*4882a593Smuzhiyun 		mutex_unlock(&fidvid_mutex);
974*4882a593Smuzhiyun 		return 1;
975*4882a593Smuzhiyun 	}
976*4882a593Smuzhiyun 	mutex_unlock(&fidvid_mutex);
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	pol->cur = find_khz_freq_from_fid(data->currfid);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	return 0;
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun /* Driver entry point to switch to the target frequency */
powernowk8_target(struct cpufreq_policy * pol,unsigned index)984*4882a593Smuzhiyun static int powernowk8_target(struct cpufreq_policy *pol, unsigned index)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun 	struct powernowk8_target_arg pta = { .pol = pol, .newstate = index };
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun struct init_on_cpu {
992*4882a593Smuzhiyun 	struct powernow_k8_data *data;
993*4882a593Smuzhiyun 	int rc;
994*4882a593Smuzhiyun };
995*4882a593Smuzhiyun 
powernowk8_cpu_init_on_cpu(void * _init_on_cpu)996*4882a593Smuzhiyun static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun 	struct init_on_cpu *init_on_cpu = _init_on_cpu;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	if (pending_bit_stuck()) {
1001*4882a593Smuzhiyun 		pr_err("failing init, change pending bit set\n");
1002*4882a593Smuzhiyun 		init_on_cpu->rc = -ENODEV;
1003*4882a593Smuzhiyun 		return;
1004*4882a593Smuzhiyun 	}
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	if (query_current_values_with_pending_wait(init_on_cpu->data)) {
1007*4882a593Smuzhiyun 		init_on_cpu->rc = -ENODEV;
1008*4882a593Smuzhiyun 		return;
1009*4882a593Smuzhiyun 	}
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	fidvid_msr_init();
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	init_on_cpu->rc = 0;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun #define MISSING_PSS_MSG \
1017*4882a593Smuzhiyun 	FW_BUG "No compatible ACPI _PSS objects found.\n" \
1018*4882a593Smuzhiyun 	FW_BUG "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" \
1019*4882a593Smuzhiyun 	FW_BUG "If that doesn't help, try upgrading your BIOS.\n"
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun /* per CPU init entry point to the driver */
powernowk8_cpu_init(struct cpufreq_policy * pol)1022*4882a593Smuzhiyun static int powernowk8_cpu_init(struct cpufreq_policy *pol)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun 	struct powernow_k8_data *data;
1025*4882a593Smuzhiyun 	struct init_on_cpu init_on_cpu;
1026*4882a593Smuzhiyun 	int rc, cpu;
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
1029*4882a593Smuzhiyun 	if (rc)
1030*4882a593Smuzhiyun 		return -ENODEV;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1033*4882a593Smuzhiyun 	if (!data)
1034*4882a593Smuzhiyun 		return -ENOMEM;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	data->cpu = pol->cpu;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	if (powernow_k8_cpu_init_acpi(data)) {
1039*4882a593Smuzhiyun 		/*
1040*4882a593Smuzhiyun 		 * Use the PSB BIOS structure. This is only available on
1041*4882a593Smuzhiyun 		 * an UP version, and is deprecated by AMD.
1042*4882a593Smuzhiyun 		 */
1043*4882a593Smuzhiyun 		if (num_online_cpus() != 1) {
1044*4882a593Smuzhiyun 			pr_err_once(MISSING_PSS_MSG);
1045*4882a593Smuzhiyun 			goto err_out;
1046*4882a593Smuzhiyun 		}
1047*4882a593Smuzhiyun 		if (pol->cpu != 0) {
1048*4882a593Smuzhiyun 			pr_err(FW_BUG "No ACPI _PSS objects for CPU other than CPU0. Complain to your BIOS vendor.\n");
1049*4882a593Smuzhiyun 			goto err_out;
1050*4882a593Smuzhiyun 		}
1051*4882a593Smuzhiyun 		rc = find_psb_table(data);
1052*4882a593Smuzhiyun 		if (rc)
1053*4882a593Smuzhiyun 			goto err_out;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 		/* Take a crude guess here.
1056*4882a593Smuzhiyun 		 * That guess was in microseconds, so multiply with 1000 */
1057*4882a593Smuzhiyun 		pol->cpuinfo.transition_latency = (
1058*4882a593Smuzhiyun 			 ((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
1059*4882a593Smuzhiyun 			 ((1 << data->irt) * 30)) * 1000;
1060*4882a593Smuzhiyun 	} else /* ACPI _PSS objects available */
1061*4882a593Smuzhiyun 		pol->cpuinfo.transition_latency = get_transition_latency(data);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	/* only run on specific CPU from here on */
1064*4882a593Smuzhiyun 	init_on_cpu.data = data;
1065*4882a593Smuzhiyun 	smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
1066*4882a593Smuzhiyun 				 &init_on_cpu, 1);
1067*4882a593Smuzhiyun 	rc = init_on_cpu.rc;
1068*4882a593Smuzhiyun 	if (rc != 0)
1069*4882a593Smuzhiyun 		goto err_out_exit_acpi;
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	cpumask_copy(pol->cpus, topology_core_cpumask(pol->cpu));
1072*4882a593Smuzhiyun 	data->available_cores = pol->cpus;
1073*4882a593Smuzhiyun 	pol->freq_table = data->powernow_table;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1076*4882a593Smuzhiyun 		data->currfid, data->currvid);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	/* Point all the CPUs in this policy to the same data */
1079*4882a593Smuzhiyun 	for_each_cpu(cpu, pol->cpus)
1080*4882a593Smuzhiyun 		per_cpu(powernow_data, cpu) = data;
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	return 0;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun err_out_exit_acpi:
1085*4882a593Smuzhiyun 	powernow_k8_cpu_exit_acpi(data);
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun err_out:
1088*4882a593Smuzhiyun 	kfree(data);
1089*4882a593Smuzhiyun 	return -ENODEV;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
powernowk8_cpu_exit(struct cpufreq_policy * pol)1092*4882a593Smuzhiyun static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
1093*4882a593Smuzhiyun {
1094*4882a593Smuzhiyun 	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1095*4882a593Smuzhiyun 	int cpu;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	if (!data)
1098*4882a593Smuzhiyun 		return -EINVAL;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	powernow_k8_cpu_exit_acpi(data);
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	kfree(data->powernow_table);
1103*4882a593Smuzhiyun 	kfree(data);
1104*4882a593Smuzhiyun 	for_each_cpu(cpu, pol->cpus)
1105*4882a593Smuzhiyun 		per_cpu(powernow_data, cpu) = NULL;
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	return 0;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun 
query_values_on_cpu(void * _err)1110*4882a593Smuzhiyun static void query_values_on_cpu(void *_err)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun 	int *err = _err;
1113*4882a593Smuzhiyun 	struct powernow_k8_data *data = __this_cpu_read(powernow_data);
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	*err = query_current_values_with_pending_wait(data);
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun 
powernowk8_get(unsigned int cpu)1118*4882a593Smuzhiyun static unsigned int powernowk8_get(unsigned int cpu)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun 	struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
1121*4882a593Smuzhiyun 	unsigned int khz = 0;
1122*4882a593Smuzhiyun 	int err;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	if (!data)
1125*4882a593Smuzhiyun 		return 0;
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	smp_call_function_single(cpu, query_values_on_cpu, &err, true);
1128*4882a593Smuzhiyun 	if (err)
1129*4882a593Smuzhiyun 		goto out;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	khz = find_khz_freq_from_fid(data->currfid);
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun out:
1135*4882a593Smuzhiyun 	return khz;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun static struct cpufreq_driver cpufreq_amd64_driver = {
1139*4882a593Smuzhiyun 	.flags		= CPUFREQ_ASYNC_NOTIFICATION,
1140*4882a593Smuzhiyun 	.verify		= cpufreq_generic_frequency_table_verify,
1141*4882a593Smuzhiyun 	.target_index	= powernowk8_target,
1142*4882a593Smuzhiyun 	.bios_limit	= acpi_processor_get_bios_limit,
1143*4882a593Smuzhiyun 	.init		= powernowk8_cpu_init,
1144*4882a593Smuzhiyun 	.exit		= powernowk8_cpu_exit,
1145*4882a593Smuzhiyun 	.get		= powernowk8_get,
1146*4882a593Smuzhiyun 	.name		= "powernow-k8",
1147*4882a593Smuzhiyun 	.attr		= cpufreq_generic_attr,
1148*4882a593Smuzhiyun };
1149*4882a593Smuzhiyun 
__request_acpi_cpufreq(void)1150*4882a593Smuzhiyun static void __request_acpi_cpufreq(void)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun 	const char drv[] = "acpi-cpufreq";
1153*4882a593Smuzhiyun 	const char *cur_drv;
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	cur_drv = cpufreq_get_current_driver();
1156*4882a593Smuzhiyun 	if (!cur_drv)
1157*4882a593Smuzhiyun 		goto request;
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv))))
1160*4882a593Smuzhiyun 		pr_warn("WTF driver: %s\n", cur_drv);
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	return;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun  request:
1165*4882a593Smuzhiyun 	pr_warn("This CPU is not supported anymore, using acpi-cpufreq instead.\n");
1166*4882a593Smuzhiyun 	request_module(drv);
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun /* driver entry point for init */
powernowk8_init(void)1170*4882a593Smuzhiyun static int powernowk8_init(void)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun 	unsigned int i, supported_cpus = 0;
1173*4882a593Smuzhiyun 	int ret;
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
1176*4882a593Smuzhiyun 		__request_acpi_cpufreq();
1177*4882a593Smuzhiyun 		return -ENODEV;
1178*4882a593Smuzhiyun 	}
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	if (!x86_match_cpu(powernow_k8_ids))
1181*4882a593Smuzhiyun 		return -ENODEV;
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	get_online_cpus();
1184*4882a593Smuzhiyun 	for_each_online_cpu(i) {
1185*4882a593Smuzhiyun 		smp_call_function_single(i, check_supported_cpu, &ret, 1);
1186*4882a593Smuzhiyun 		if (!ret)
1187*4882a593Smuzhiyun 			supported_cpus++;
1188*4882a593Smuzhiyun 	}
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	if (supported_cpus != num_online_cpus()) {
1191*4882a593Smuzhiyun 		put_online_cpus();
1192*4882a593Smuzhiyun 		return -ENODEV;
1193*4882a593Smuzhiyun 	}
1194*4882a593Smuzhiyun 	put_online_cpus();
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	ret = cpufreq_register_driver(&cpufreq_amd64_driver);
1197*4882a593Smuzhiyun 	if (ret)
1198*4882a593Smuzhiyun 		return ret;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	pr_info("Found %d %s (%d cpu cores) (" VERSION ")\n",
1201*4882a593Smuzhiyun 		num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	return ret;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun /* driver entry point for term */
powernowk8_exit(void)1207*4882a593Smuzhiyun static void __exit powernowk8_exit(void)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	pr_debug("exit\n");
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	cpufreq_unregister_driver(&cpufreq_amd64_driver);
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>");
1215*4882a593Smuzhiyun MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>");
1216*4882a593Smuzhiyun MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
1217*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun late_initcall(powernowk8_init);
1220*4882a593Smuzhiyun module_exit(powernowk8_exit);
1221