1 /* 2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch.h> 8 #include <arch_helpers.h> 9 #include <common/debug.h> 10 #include <denver.h> 11 #include <errno.h> 12 #include <lib/mmio.h> 13 #include <mce_private.h> 14 #include <platform_def.h> 15 #include <t194_nvg.h> 16 #include <tegra_private.h> 17 18 #define ID_AFR0_EL1_CACHE_OPS_SHIFT 12 19 #define ID_AFR0_EL1_CACHE_OPS_MASK 0xFU 20 /* 21 * Reports the major and minor version of this interface. 22 * 23 * NVGDATA[0:31]: SW(R) Minor Version 24 * NVGDATA[32:63]: SW(R) Major Version 25 */ 26 uint64_t nvg_get_version(void) 27 { 28 nvg_set_request(TEGRA_NVG_CHANNEL_VERSION); 29 30 return (uint64_t)nvg_get_result(); 31 } 32 33 /* 34 * Enable the perf per watt mode. 35 * 36 * NVGDATA[0]: SW(RW), 1 = enable perf per watt mode 37 */ 38 int32_t nvg_enable_power_perf_mode(void) 39 { 40 nvg_set_request_data(TEGRA_NVG_CHANNEL_POWER_PERF, 1U); 41 42 return 0; 43 } 44 45 /* 46 * Disable the perf per watt mode. 47 * 48 * NVGDATA[0]: SW(RW), 0 = disable perf per watt mode 49 */ 50 int32_t nvg_disable_power_perf_mode(void) 51 { 52 nvg_set_request_data(TEGRA_NVG_CHANNEL_POWER_PERF, 0U); 53 54 return 0; 55 } 56 57 /* 58 * Enable the battery saver mode. 59 * 60 * NVGDATA[2]: SW(RW), 1 = enable battery saver mode 61 */ 62 int32_t nvg_enable_power_saver_modes(void) 63 { 64 nvg_set_request_data(TEGRA_NVG_CHANNEL_POWER_MODES, 1U); 65 66 return 0; 67 } 68 69 /* 70 * Disable the battery saver mode. 71 * 72 * NVGDATA[2]: SW(RW), 0 = disable battery saver mode 73 */ 74 int32_t nvg_disable_power_saver_modes(void) 75 { 76 nvg_set_request_data(TEGRA_NVG_CHANNEL_POWER_MODES, 0U); 77 78 return 0; 79 } 80 81 /* 82 * Set the expected wake time in TSC ticks for the next low-power state the 83 * core enters. 84 * 85 * NVGDATA[0:31]: SW(RW), WAKE_TIME 86 */ 87 void nvg_set_wake_time(uint32_t wake_time) 88 { 89 /* time (TSC ticks) until the core is expected to get a wake event */ 90 nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, (uint64_t)wake_time); 91 } 92 93 /* 94 * This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and 95 * SYSTEM_CSTATE values. 96 * 97 * NVGDATA[0:2]: SW(RW), CLUSTER_CSTATE 98 * NVGDATA[7]: SW(W), update cluster flag 99 * NVGDATA[8:9]: SW(RW), CG_CSTATE 100 * NVGDATA[15]: SW(W), update ccplex flag 101 * NVGDATA[16:19]: SW(RW), SYSTEM_CSTATE 102 * NVGDATA[23]: SW(W), update system flag 103 * NVGDATA[31]: SW(W), update wake mask flag 104 * NVGDATA[32:63]: SW(RW), WAKE_MASK 105 */ 106 void nvg_update_cstate_info(uint32_t cluster, uint32_t ccplex, 107 uint32_t system, uint32_t wake_mask, uint8_t update_wake_mask) 108 { 109 uint64_t val = 0; 110 111 /* update CLUSTER_CSTATE? */ 112 if (cluster != 0U) { 113 val |= ((uint64_t)cluster & CLUSTER_CSTATE_MASK) | 114 CLUSTER_CSTATE_UPDATE_BIT; 115 } 116 117 /* update CCPLEX_CSTATE? */ 118 if (ccplex != 0U) { 119 val |= (((uint64_t)ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) | 120 CCPLEX_CSTATE_UPDATE_BIT; 121 } 122 123 /* update SYSTEM_CSTATE? */ 124 if (system != 0U) { 125 val |= (((uint64_t)system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) | 126 SYSTEM_CSTATE_UPDATE_BIT; 127 } 128 129 /* update wake mask value? */ 130 if (update_wake_mask != 0U) { 131 val |= CSTATE_WAKE_MASK_UPDATE_BIT; 132 } 133 134 /* set the wake mask */ 135 val |= ((uint64_t)wake_mask & CSTATE_WAKE_MASK_CLEAR) << CSTATE_WAKE_MASK_SHIFT; 136 137 /* set the updated cstate info */ 138 nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_INFO, val); 139 } 140 141 /* 142 * Indices gives MTS the crossover point in TSC ticks for when it becomes 143 * no longer viable to enter the named state 144 * 145 * Type 5 : NVGDATA[0:31]: C6 Lower bound 146 * Type 6 : NVGDATA[0:31]: CC6 Lower bound 147 * Type 8 : NVGDATA[0:31]: CG7 Lower bound 148 */ 149 int32_t nvg_update_crossover_time(uint32_t type, uint32_t time) 150 { 151 int32_t ret = 0; 152 153 switch (type) { 154 case TEGRA_NVG_CHANNEL_CROSSOVER_C6_LOWER_BOUND: 155 nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_C6_LOWER_BOUND, 156 (uint64_t)time); 157 break; 158 159 case TEGRA_NVG_CHANNEL_CROSSOVER_CC6_LOWER_BOUND: 160 nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_CC6_LOWER_BOUND, 161 (uint64_t)time); 162 break; 163 164 case TEGRA_NVG_CHANNEL_CROSSOVER_CG7_LOWER_BOUND: 165 nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_CG7_LOWER_BOUND, 166 (uint64_t)time); 167 break; 168 169 default: 170 ERROR("%s: unknown crossover type (%d)\n", __func__, type); 171 ret = EINVAL; 172 break; 173 } 174 175 return ret; 176 } 177 178 /* 179 * These NVG calls allow ARM SW to access CSTATE statistical information 180 * 181 * NVGDATA[0:3]: SW(RW) Core/cluster/cg id 182 * NVGDATA[16:31]: SW(RW) Stat id 183 */ 184 int32_t nvg_set_cstate_stat_query_value(uint64_t data) 185 { 186 int32_t ret = 0; 187 188 /* sanity check stat id and core id*/ 189 if ((data >> MCE_STAT_ID_SHIFT) > 190 (uint64_t)NVG_STAT_QUERY_C7_RESIDENCY_SUM) { 191 ERROR("%s: unknown stat id (%d)\n", __func__, 192 (uint32_t)(data >> MCE_STAT_ID_SHIFT)); 193 ret = EINVAL; 194 } else if ((data & MCE_CORE_ID_MASK) > (uint64_t)PLATFORM_CORE_COUNT) { 195 ERROR("%s: unknown core id (%d)\n", __func__, 196 (uint32_t)(data & MCE_CORE_ID_MASK)); 197 ret = EINVAL; 198 } else { 199 nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_STAT_QUERY_REQUEST, data); 200 } 201 202 return ret; 203 } 204 205 /* 206 * The read-only value associated with the CSTATE_STAT_QUERY_REQUEST 207 * 208 * NVGDATA[0:63]: SW(R) Stat count 209 */ 210 uint64_t nvg_get_cstate_stat_query_value(void) 211 { 212 nvg_set_request(TEGRA_NVG_CHANNEL_CSTATE_STAT_QUERY_VALUE); 213 214 return (uint64_t)nvg_get_result(); 215 } 216 217 /* 218 * Return a non-zero value if the CCPLEX is able to enter SC7 219 * 220 * NVGDATA[0]: SW(R), Is allowed result 221 */ 222 int32_t nvg_is_sc7_allowed(void) 223 { 224 /* issue command to check if SC7 is allowed */ 225 nvg_set_request(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED); 226 227 /* 1 = SC7 allowed, 0 = SC7 not allowed */ 228 return (int32_t)nvg_get_result(); 229 } 230 231 /* 232 * Wake an offlined logical core. Note that a core is offlined by entering 233 * a C-state where the WAKE_MASK is all 0. 234 * 235 * NVGDATA[0:3]: SW(W) logical core to online 236 */ 237 int32_t nvg_online_core(uint32_t core) 238 { 239 int32_t ret = 0; 240 241 /* sanity check the core ID value */ 242 if (core > (uint32_t)PLATFORM_CORE_COUNT) { 243 ERROR("%s: unknown core id (%d)\n", __func__, core); 244 ret = EINVAL; 245 } else { 246 /* get a core online */ 247 nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE, 248 (uint64_t)core & MCE_CORE_ID_MASK); 249 } 250 251 return ret; 252 } 253 254 /* 255 * Enables and controls the voltage/frequency hint for CC3. CC3 is disabled 256 * by default. 257 * 258 * NVGDATA[7:0] SW(RW) frequency request 259 * NVGDATA[31:31] SW(RW) enable bit 260 */ 261 int32_t nvg_cc3_ctrl(uint32_t freq, uint8_t enable) 262 { 263 uint64_t val = 0; 264 265 /* 266 * If the enable bit is cleared, Auto-CC3 will be disabled by setting 267 * the SW visible frequency request registers for all non 268 * floorswept cores valid independent of StandbyWFI and disabling 269 * the IDLE frequency request register. If set, Auto-CC3 270 * will be enabled by setting the ARM SW visible frequency 271 * request registers for all non floorswept cores to be enabled by 272 * StandbyWFI or the equivalent signal, and always keeping the IDLE 273 * frequency request register enabled. 274 */ 275 if (enable != 0U) { 276 val = ((uint64_t)freq & MCE_AUTO_CC3_FREQ_MASK) | MCE_AUTO_CC3_ENABLE_BIT; 277 } 278 nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, val); 279 280 return 0; 281 } 282 283 /* 284 * MC GSC (General Security Carveout) register values are expected to be 285 * changed by TrustZone ARM code after boot. 286 * 287 * NVGDATA[0:15] SW(R) GSC enun 288 */ 289 int32_t nvg_update_ccplex_gsc(uint32_t gsc_idx) 290 { 291 int32_t ret; 292 293 /* sanity check GSC ID */ 294 if (gsc_idx > (uint32_t)TEGRA_NVG_CHANNEL_UPDATE_GSC_VPR) { 295 ERROR("%s: unknown gsc_idx (%u)\n", __func__, gsc_idx); 296 ret = EINVAL; 297 } else { 298 nvg_set_request_data(TEGRA_NVG_CHANNEL_UPDATE_CCPLEX_GSC, 299 (uint64_t)gsc_idx); 300 } 301 302 return ret; 303 } 304 305 /* 306 * Cache clean operation for all CCPLEX caches. 307 */ 308 int32_t nvg_roc_clean_cache(void) 309 { 310 int32_t ret = 0; 311 312 /* check if cache flush through mts is supported */ 313 if (((read_id_afr0_el1() >> ID_AFR0_EL1_CACHE_OPS_SHIFT) & 314 ID_AFR0_EL1_CACHE_OPS_MASK) == 1U) { 315 if (nvg_cache_clean() == 0U) { 316 ERROR("%s: failed\n", __func__); 317 ret = EINVAL; 318 } 319 } else { 320 ret = EINVAL; 321 } 322 return ret; 323 } 324 325 /* 326 * Cache clean and invalidate operation for all CCPLEX caches. 327 */ 328 int32_t nvg_roc_flush_cache(void) 329 { 330 int32_t ret = 0; 331 332 /* check if cache flush through mts is supported */ 333 if (((read_id_afr0_el1() >> ID_AFR0_EL1_CACHE_OPS_SHIFT) & 334 ID_AFR0_EL1_CACHE_OPS_MASK) == 1U) { 335 if (nvg_cache_clean_inval() == 0U) { 336 ERROR("%s: failed\n", __func__); 337 ret = EINVAL; 338 } 339 } else { 340 ret = EINVAL; 341 } 342 return ret; 343 } 344 345 /* 346 * Cache clean and invalidate, clear TR-bit operation for all CCPLEX caches. 347 */ 348 int32_t nvg_roc_clean_cache_trbits(void) 349 { 350 int32_t ret = 0; 351 352 /* check if cache flush through mts is supported */ 353 if (((read_id_afr0_el1() >> ID_AFR0_EL1_CACHE_OPS_SHIFT) & 354 ID_AFR0_EL1_CACHE_OPS_MASK) == 1U) { 355 if (nvg_cache_inval_all() == 0U) { 356 ERROR("%s: failed\n", __func__); 357 ret = EINVAL; 358 } 359 } else { 360 ret = EINVAL; 361 } 362 return ret; 363 } 364 365 /* 366 * Set the power state for a core 367 */ 368 int32_t nvg_enter_cstate(uint32_t state, uint32_t wake_time) 369 { 370 int32_t ret = 0; 371 uint64_t val = 0ULL; 372 373 /* check for allowed power state */ 374 if ((state != (uint32_t)TEGRA_NVG_CORE_C0) && 375 (state != (uint32_t)TEGRA_NVG_CORE_C1) && 376 (state != (uint32_t)TEGRA_NVG_CORE_C6) && 377 (state != (uint32_t)TEGRA_NVG_CORE_C7)) 378 { 379 ERROR("%s: unknown cstate (%d)\n", __func__, state); 380 ret = EINVAL; 381 } else { 382 /* time (TSC ticks) until the core is expected to get a wake event */ 383 nvg_set_wake_time(wake_time); 384 385 /* set the core cstate */ 386 val = read_actlr_el1() & ~ACTLR_EL1_PMSTATE_MASK; 387 write_actlr_el1(val | (uint64_t)state); 388 } 389 390 return ret; 391 } 392