xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L3
24 
25 #include <linux/firmware.h>
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "atomfirmware.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "smu_v12_0.h"
31 #include "soc15_common.h"
32 #include "atom.h"
33 #include "smu_cmn.h"
34 
35 #include "asic_reg/mp/mp_12_0_0_offset.h"
36 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
37 #include "asic_reg/smuio/smuio_12_0_0_offset.h"
38 #include "asic_reg/smuio/smuio_12_0_0_sh_mask.h"
39 
40 /*
41  * DO NOT use these for err/warn/info/debug messages.
42  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
43  * They are more MGPU friendly.
44  */
45 #undef pr_err
46 #undef pr_warn
47 #undef pr_info
48 #undef pr_debug
49 
50 // because some SMU12 based ASICs use older ip offset tables
51 // we should undefine this register from the smuio12 header
52 // to prevent confusion down the road
53 #undef mmPWR_MISC_CNTL_STATUS
54 
55 #define smnMP1_FIRMWARE_FLAGS                                0x3010024
56 
smu_v12_0_check_fw_status(struct smu_context * smu)57 int smu_v12_0_check_fw_status(struct smu_context *smu)
58 {
59 	struct amdgpu_device *adev = smu->adev;
60 	uint32_t mp1_fw_flags;
61 
62 	mp1_fw_flags = RREG32_PCIE(MP1_Public |
63 		(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
64 
65 	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
66 		MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
67 		return 0;
68 
69 	return -EIO;
70 }
71 
smu_v12_0_check_fw_version(struct smu_context * smu)72 int smu_v12_0_check_fw_version(struct smu_context *smu)
73 {
74 	uint32_t if_version = 0xff, smu_version = 0xff;
75 	uint16_t smu_major;
76 	uint8_t smu_minor, smu_debug;
77 	int ret = 0;
78 
79 	ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
80 	if (ret)
81 		return ret;
82 
83 	smu_major = (smu_version >> 16) & 0xffff;
84 	smu_minor = (smu_version >> 8) & 0xff;
85 	smu_debug = (smu_version >> 0) & 0xff;
86 
87 	/*
88 	 * 1. if_version mismatch is not critical as our fw is designed
89 	 * to be backward compatible.
90 	 * 2. New fw usually brings some optimizations. But that's visible
91 	 * only on the paired driver.
92 	 * Considering above, we just leave user a warning message instead
93 	 * of halt driver loading.
94 	 */
95 	if (if_version != smu->smc_driver_if_version) {
96 		dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
97 			"smu fw version = 0x%08x (%d.%d.%d)\n",
98 			smu->smc_driver_if_version, if_version,
99 			smu_version, smu_major, smu_minor, smu_debug);
100 		dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
101 	}
102 
103 	return ret;
104 }
105 
smu_v12_0_powergate_sdma(struct smu_context * smu,bool gate)106 int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
107 {
108 	if (!smu->is_apu)
109 		return 0;
110 
111 	if (gate)
112 		return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
113 	else
114 		return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
115 }
116 
smu_v12_0_set_gfx_cgpg(struct smu_context * smu,bool enable)117 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
118 {
119 	if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
120 		return 0;
121 
122 	return smu_cmn_send_smc_msg_with_param(smu,
123 		SMU_MSG_SetGfxCGPG,
124 		enable ? 1 : 0,
125 		NULL);
126 }
127 
128 /**
129  * smu_v12_0_get_gfxoff_status - get gfxoff status
130  *
131  * @smu: amdgpu_device pointer
132  *
133  * This function will be used to get gfxoff status
134  *
135  * Returns 0=GFXOFF(default).
136  * Returns 1=Transition out of GFX State.
137  * Returns 2=Not in GFXOFF.
138  * Returns 3=Transition into GFXOFF.
139  */
smu_v12_0_get_gfxoff_status(struct smu_context * smu)140 uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
141 {
142 	uint32_t reg;
143 	uint32_t gfxOff_Status = 0;
144 	struct amdgpu_device *adev = smu->adev;
145 
146 	reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
147 	gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
148 		>> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
149 
150 	return gfxOff_Status;
151 }
152 
smu_v12_0_gfx_off_control(struct smu_context * smu,bool enable)153 int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
154 {
155 	int ret = 0, timeout = 500;
156 
157 	if (enable) {
158 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
159 
160 	} else {
161 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
162 
163 		/* confirm gfx is back to "on" state, timeout is 0.5 second */
164 		while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
165 			msleep(1);
166 			timeout--;
167 			if (timeout == 0) {
168 				DRM_ERROR("disable gfxoff timeout and failed!\n");
169 				break;
170 			}
171 		}
172 	}
173 
174 	return ret;
175 }
176 
smu_v12_0_fini_smc_tables(struct smu_context * smu)177 int smu_v12_0_fini_smc_tables(struct smu_context *smu)
178 {
179 	struct smu_table_context *smu_table = &smu->smu_table;
180 
181 	kfree(smu_table->clocks_table);
182 	smu_table->clocks_table = NULL;
183 
184 	kfree(smu_table->metrics_table);
185 	smu_table->metrics_table = NULL;
186 
187 	kfree(smu_table->watermarks_table);
188 	smu_table->watermarks_table = NULL;
189 
190 	kfree(smu_table->gpu_metrics_table);
191 	smu_table->gpu_metrics_table = NULL;
192 
193 	return 0;
194 }
195 
smu_v12_0_set_default_dpm_tables(struct smu_context * smu)196 int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
197 {
198 	struct smu_table_context *smu_table = &smu->smu_table;
199 
200 	return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
201 }
202 
smu_v12_0_mode2_reset(struct smu_context * smu)203 int smu_v12_0_mode2_reset(struct smu_context *smu){
204 	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
205 }
206 
smu_v12_0_set_soft_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)207 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
208 			    uint32_t min, uint32_t max)
209 {
210 	int ret = 0;
211 
212 	if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
213 		return 0;
214 
215 	switch (clk_type) {
216 	case SMU_GFXCLK:
217 	case SMU_SCLK:
218 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
219 		if (ret)
220 			return ret;
221 
222 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
223 		if (ret)
224 			return ret;
225 	break;
226 	case SMU_FCLK:
227 	case SMU_MCLK:
228 	case SMU_UCLK:
229 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
230 		if (ret)
231 			return ret;
232 
233 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
234 		if (ret)
235 			return ret;
236 	break;
237 	case SMU_SOCCLK:
238 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
239 		if (ret)
240 			return ret;
241 
242 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
243 		if (ret)
244 			return ret;
245 	break;
246 	case SMU_VCLK:
247 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
248 		if (ret)
249 			return ret;
250 
251 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
252 		if (ret)
253 			return ret;
254 	break;
255 	default:
256 		return -EINVAL;
257 	}
258 
259 	return ret;
260 }
261 
smu_v12_0_set_driver_table_location(struct smu_context * smu)262 int smu_v12_0_set_driver_table_location(struct smu_context *smu)
263 {
264 	struct smu_table *driver_table = &smu->smu_table.driver_table;
265 	int ret = 0;
266 
267 	if (driver_table->mc_address) {
268 		ret = smu_cmn_send_smc_msg_with_param(smu,
269 				SMU_MSG_SetDriverDramAddrHigh,
270 				upper_32_bits(driver_table->mc_address),
271 				NULL);
272 		if (!ret)
273 			ret = smu_cmn_send_smc_msg_with_param(smu,
274 				SMU_MSG_SetDriverDramAddrLow,
275 				lower_32_bits(driver_table->mc_address),
276 				NULL);
277 	}
278 
279 	return ret;
280 }
281 
smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 * gpu_metrics)282 void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
283 {
284 	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
285 
286 	gpu_metrics->common_header.structure_size =
287 				sizeof(struct gpu_metrics_v2_0);
288 	gpu_metrics->common_header.format_revision = 2;
289 	gpu_metrics->common_header.content_revision = 0;
290 
291 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
292 }
293