xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/radeon/sumo_smc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2012 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "radeon.h"
25*4882a593Smuzhiyun #include "sumod.h"
26*4882a593Smuzhiyun #include "sumo_dpm.h"
27*4882a593Smuzhiyun #include "ppsmc.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define SUMO_SMU_SERVICE_ROUTINE_PG_INIT        1
30*4882a593Smuzhiyun #define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY  27
31*4882a593Smuzhiyun #define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20  20
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev);
34*4882a593Smuzhiyun 
sumo_send_msg_to_smu(struct radeon_device * rdev,u32 id)35*4882a593Smuzhiyun static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	u32 gfx_int_req;
38*4882a593Smuzhiyun 	int i;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	for (i = 0; i < rdev->usec_timeout; i++) {
41*4882a593Smuzhiyun 		if (RREG32(GFX_INT_STATUS) & INT_DONE)
42*4882a593Smuzhiyun 			break;
43*4882a593Smuzhiyun 		udelay(1);
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	gfx_int_req = SERV_INDEX(id) | INT_REQ;
47*4882a593Smuzhiyun 	WREG32(GFX_INT_REQ, gfx_int_req);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	for (i = 0; i < rdev->usec_timeout; i++) {
50*4882a593Smuzhiyun 		if (RREG32(GFX_INT_REQ) & INT_REQ)
51*4882a593Smuzhiyun 			break;
52*4882a593Smuzhiyun 		udelay(1);
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	for (i = 0; i < rdev->usec_timeout; i++) {
56*4882a593Smuzhiyun 		if (RREG32(GFX_INT_STATUS) & INT_ACK)
57*4882a593Smuzhiyun 			break;
58*4882a593Smuzhiyun 		udelay(1);
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	for (i = 0; i < rdev->usec_timeout; i++) {
62*4882a593Smuzhiyun 		if (RREG32(GFX_INT_STATUS) & INT_DONE)
63*4882a593Smuzhiyun 			break;
64*4882a593Smuzhiyun 		udelay(1);
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	gfx_int_req &= ~INT_REQ;
68*4882a593Smuzhiyun 	WREG32(GFX_INT_REQ, gfx_int_req);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
sumo_initialize_m3_arb(struct radeon_device * rdev)71*4882a593Smuzhiyun void sumo_initialize_m3_arb(struct radeon_device *rdev)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	struct sumo_power_info *pi = sumo_get_pi(rdev);
74*4882a593Smuzhiyun 	u32 i;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	if (!pi->enable_dynamic_m3_arbiter)
77*4882a593Smuzhiyun 		return;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	for (i = 0; i < NUMBER_OF_M3ARB_PARAM_SETS; i++)
80*4882a593Smuzhiyun 		WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
81*4882a593Smuzhiyun 			   pi->sys_info.csr_m3_arb_cntl_default[i]);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	for (; i < NUMBER_OF_M3ARB_PARAM_SETS * 2; i++)
84*4882a593Smuzhiyun 		WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
85*4882a593Smuzhiyun 			   pi->sys_info.csr_m3_arb_cntl_uvd[i % NUMBER_OF_M3ARB_PARAM_SETS]);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	for (; i < NUMBER_OF_M3ARB_PARAM_SETS * 3; i++)
88*4882a593Smuzhiyun 		WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
89*4882a593Smuzhiyun 			   pi->sys_info.csr_m3_arb_cntl_fs3d[i % NUMBER_OF_M3ARB_PARAM_SETS]);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
sumo_is_alt_vddnb_supported(struct radeon_device * rdev)92*4882a593Smuzhiyun static bool sumo_is_alt_vddnb_supported(struct radeon_device *rdev)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct sumo_power_info *pi = sumo_get_pi(rdev);
95*4882a593Smuzhiyun 	bool return_code = false;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (!pi->enable_alt_vddnb)
98*4882a593Smuzhiyun 		return return_code;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if ((rdev->family == CHIP_SUMO) || (rdev->family == CHIP_SUMO2)) {
101*4882a593Smuzhiyun 		if (pi->fw_version >= 0x00010C00)
102*4882a593Smuzhiyun 			return_code = true;
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return return_code;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
sumo_smu_notify_alt_vddnb_change(struct radeon_device * rdev,bool powersaving,bool force_nbps1)108*4882a593Smuzhiyun void sumo_smu_notify_alt_vddnb_change(struct radeon_device *rdev,
109*4882a593Smuzhiyun 				      bool powersaving, bool force_nbps1)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	u32 param = 0;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	if (!sumo_is_alt_vddnb_supported(rdev))
114*4882a593Smuzhiyun 		return;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	if (powersaving)
117*4882a593Smuzhiyun 		param |= 1;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if (force_nbps1)
120*4882a593Smuzhiyun 		param |= 2;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	WREG32_RCU(RCU_ALTVDDNB_NOTIFY, param);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
sumo_smu_pg_init(struct radeon_device * rdev)127*4882a593Smuzhiyun void sumo_smu_pg_init(struct radeon_device *rdev)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_PG_INIT);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
sumo_power_of_4(u32 unit)132*4882a593Smuzhiyun static u32 sumo_power_of_4(u32 unit)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	u32 ret = 1;
135*4882a593Smuzhiyun 	u32 i;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	for (i = 0; i < unit; i++)
138*4882a593Smuzhiyun 		ret *= 4;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	return ret;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
sumo_enable_boost_timer(struct radeon_device * rdev)143*4882a593Smuzhiyun void sumo_enable_boost_timer(struct radeon_device *rdev)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct sumo_power_info *pi = sumo_get_pi(rdev);
146*4882a593Smuzhiyun 	u32 period, unit, timer_value;
147*4882a593Smuzhiyun 	u32 xclk = radeon_get_xclk(rdev);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	unit = (RREG32_RCU(RCU_LCLK_SCALING_CNTL) & LCLK_SCALING_TIMER_PRESCALER_MASK)
150*4882a593Smuzhiyun 		>> LCLK_SCALING_TIMER_PRESCALER_SHIFT;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	period = 100 * (xclk / 100 / sumo_power_of_4(unit));
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	timer_value = (period << 16) | (unit << 4);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	WREG32_RCU(RCU_GNB_PWR_REP_TIMER_CNTL, timer_value);
157*4882a593Smuzhiyun 	WREG32_RCU(RCU_BOOST_MARGIN, pi->sys_info.sclk_dpm_boost_margin);
158*4882a593Smuzhiyun 	WREG32_RCU(RCU_THROTTLE_MARGIN, pi->sys_info.sclk_dpm_throttle_margin);
159*4882a593Smuzhiyun 	WREG32_RCU(GNB_TDP_LIMIT, pi->sys_info.gnb_tdp_limit);
160*4882a593Smuzhiyun 	WREG32_RCU(RCU_SclkDpmTdpLimitPG, pi->sys_info.sclk_dpm_tdp_limit_pg);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
sumo_set_tdp_limit(struct radeon_device * rdev,u32 index,u32 tdp_limit)165*4882a593Smuzhiyun void sumo_set_tdp_limit(struct radeon_device *rdev, u32 index, u32 tdp_limit)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	u32 regoffset = 0;
168*4882a593Smuzhiyun 	u32 shift = 0;
169*4882a593Smuzhiyun 	u32 mask = 0xFFF;
170*4882a593Smuzhiyun 	u32 sclk_dpm_tdp_limit;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	switch (index) {
173*4882a593Smuzhiyun 	case 0:
174*4882a593Smuzhiyun 		regoffset = RCU_SclkDpmTdpLimit01;
175*4882a593Smuzhiyun 		shift = 16;
176*4882a593Smuzhiyun 		break;
177*4882a593Smuzhiyun 	case 1:
178*4882a593Smuzhiyun 		regoffset = RCU_SclkDpmTdpLimit01;
179*4882a593Smuzhiyun 		shift = 0;
180*4882a593Smuzhiyun 		break;
181*4882a593Smuzhiyun 	case 2:
182*4882a593Smuzhiyun 		regoffset = RCU_SclkDpmTdpLimit23;
183*4882a593Smuzhiyun 		shift = 16;
184*4882a593Smuzhiyun 		break;
185*4882a593Smuzhiyun 	case 3:
186*4882a593Smuzhiyun 		regoffset = RCU_SclkDpmTdpLimit23;
187*4882a593Smuzhiyun 		shift = 0;
188*4882a593Smuzhiyun 		break;
189*4882a593Smuzhiyun 	case 4:
190*4882a593Smuzhiyun 		regoffset = RCU_SclkDpmTdpLimit47;
191*4882a593Smuzhiyun 		shift = 16;
192*4882a593Smuzhiyun 		break;
193*4882a593Smuzhiyun 	case 7:
194*4882a593Smuzhiyun 		regoffset = RCU_SclkDpmTdpLimit47;
195*4882a593Smuzhiyun 		shift = 0;
196*4882a593Smuzhiyun 		break;
197*4882a593Smuzhiyun 	default:
198*4882a593Smuzhiyun 		break;
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	sclk_dpm_tdp_limit = RREG32_RCU(regoffset);
202*4882a593Smuzhiyun 	sclk_dpm_tdp_limit &= ~(mask << shift);
203*4882a593Smuzhiyun 	sclk_dpm_tdp_limit |= (tdp_limit << shift);
204*4882a593Smuzhiyun 	WREG32_RCU(regoffset, sclk_dpm_tdp_limit);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
sumo_boost_state_enable(struct radeon_device * rdev,bool enable)207*4882a593Smuzhiyun void sumo_boost_state_enable(struct radeon_device *rdev, bool enable)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	u32 boost_disable = RREG32_RCU(RCU_GPU_BOOST_DISABLE);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	boost_disable &= 0xFFFFFFFE;
212*4882a593Smuzhiyun 	boost_disable |= (enable ? 0 : 1);
213*4882a593Smuzhiyun 	WREG32_RCU(RCU_GPU_BOOST_DISABLE, boost_disable);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
sumo_get_running_fw_version(struct radeon_device * rdev)216*4882a593Smuzhiyun u32 sumo_get_running_fw_version(struct radeon_device *rdev)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	return RREG32_RCU(RCU_FW_VERSION);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
221