1 /* 2 * Copyright 2018-2020 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 */ 7 8 #include <common/debug.h> 9 10 #include <plat_gic.h> 11 #include <plat_common.h> 12 #include <plat_psci.h> 13 #ifdef NXP_WARM_BOOT 14 #include <plat_warm_rst.h> 15 #endif 16 17 #include <platform_def.h> 18 19 #if (SOC_CORE_OFF || SOC_CORE_PWR_DWN) 20 static void __dead2 _no_return_wfi(void) 21 { 22 _bl31_dead_wfi: 23 wfi(); 24 goto _bl31_dead_wfi; 25 } 26 #endif 27 28 #if (SOC_CORE_RELEASE || SOC_CORE_PWR_DWN) 29 /* the entry for core warm boot */ 30 static uintptr_t warmboot_entry = (uintptr_t) NULL; 31 #endif 32 33 #if (SOC_CORE_RELEASE) 34 static int _pwr_domain_on(u_register_t mpidr) 35 { 36 int core_pos = plat_core_pos(mpidr); 37 int rc = PSCI_E_INVALID_PARAMS; 38 u_register_t core_mask; 39 40 if (core_pos >= 0 && core_pos < PLATFORM_CORE_COUNT) { 41 42 _soc_set_start_addr(warmboot_entry); 43 44 dsb(); 45 isb(); 46 47 core_mask = (1 << core_pos); 48 rc = _psci_cpu_on(core_mask); 49 } 50 51 return (rc); 52 } 53 #endif 54 55 #if (SOC_CORE_OFF) 56 static void _pwr_domain_off(const psci_power_state_t *target_state) 57 { 58 u_register_t core_mask = plat_my_core_mask(); 59 u_register_t core_state = _getCoreState(core_mask); 60 61 /* set core state in internal data */ 62 core_state = CORE_OFF_PENDING; 63 _setCoreState(core_mask, core_state); 64 65 _psci_cpu_prep_off(core_mask); 66 } 67 #endif 68 69 #if (SOC_CORE_OFF || SOC_CORE_PWR_DWN) 70 static void __dead2 _pwr_down_wfi(const psci_power_state_t *target_state) 71 { 72 u_register_t core_mask = plat_my_core_mask(); 73 u_register_t core_state = _getCoreState(core_mask); 74 75 switch (core_state) { 76 #if (SOC_CORE_OFF) 77 case CORE_OFF_PENDING: 78 /* set core state in internal data */ 79 core_state = CORE_OFF; 80 _setCoreState(core_mask, core_state); 81 82 /* turn the core off */ 83 _psci_cpu_off_wfi(core_mask, warmboot_entry); 84 break; 85 #endif 86 #if (SOC_CORE_PWR_DWN) 87 case CORE_PWR_DOWN: 88 /* power-down the core */ 89 _psci_cpu_pwrdn_wfi(core_mask, warmboot_entry); 90 break; 91 #endif 92 #if (SOC_SYSTEM_PWR_DWN) 93 case SYS_OFF_PENDING: 94 /* set core state in internal data */ 95 core_state = SYS_OFF; 96 _setCoreState(core_mask, core_state); 97 98 /* power-down the system */ 99 _psci_sys_pwrdn_wfi(core_mask, warmboot_entry); 100 break; 101 #endif 102 default: 103 _no_return_wfi(); 104 break; 105 } 106 } 107 #endif 108 109 #if (SOC_CORE_RELEASE || SOC_CORE_RESTART) 110 static void _pwr_domain_wakeup(const psci_power_state_t *target_state) 111 { 112 u_register_t core_mask = plat_my_core_mask(); 113 u_register_t core_state = _getCoreState(core_mask); 114 115 switch (core_state) { 116 case CORE_PENDING: /* this core is coming out of reset */ 117 118 /* soc per cpu setup */ 119 soc_init_percpu(); 120 121 /* gic per cpu setup */ 122 plat_gic_pcpu_init(); 123 124 /* set core state in internal data */ 125 core_state = CORE_RELEASED; 126 _setCoreState(core_mask, core_state); 127 break; 128 129 #if (SOC_CORE_RESTART) 130 case CORE_WAKEUP: 131 132 /* this core is waking up from OFF */ 133 _psci_wakeup(core_mask); 134 135 /* set core state in internal data */ 136 core_state = CORE_RELEASED; 137 _setCoreState(core_mask, core_state); 138 139 break; 140 #endif 141 } 142 } 143 #endif 144 145 #if (SOC_CORE_STANDBY) 146 static void _pwr_cpu_standby(plat_local_state_t cpu_state) 147 { 148 u_register_t core_mask = plat_my_core_mask(); 149 u_register_t core_state; 150 151 if (cpu_state == PLAT_MAX_RET_STATE) { 152 153 /* set core state to standby */ 154 core_state = CORE_STANDBY; 155 _setCoreState(core_mask, core_state); 156 157 _psci_core_entr_stdby(core_mask); 158 159 /* when we are here, the core is waking up 160 * set core state to released 161 */ 162 core_state = CORE_RELEASED; 163 _setCoreState(core_mask, core_state); 164 } 165 } 166 #endif 167 168 #if (SOC_CORE_PWR_DWN) 169 static void _pwr_suspend(const psci_power_state_t *state) 170 { 171 172 u_register_t core_mask = plat_my_core_mask(); 173 u_register_t core_state; 174 175 if (state->pwr_domain_state[PLAT_MAX_LVL] == PLAT_MAX_OFF_STATE) { 176 #if (SOC_SYSTEM_PWR_DWN) 177 _psci_sys_prep_pwrdn(core_mask); 178 179 /* set core state */ 180 core_state = SYS_OFF_PENDING; 181 _setCoreState(core_mask, core_state); 182 #endif 183 } else if (state->pwr_domain_state[PLAT_MAX_LVL] 184 == PLAT_MAX_RET_STATE) { 185 #if (SOC_SYSTEM_STANDBY) 186 _psci_sys_prep_stdby(core_mask); 187 188 /* set core state */ 189 core_state = CORE_STANDBY; 190 _setCoreState(core_mask, core_state); 191 #endif 192 } 193 194 else if (state->pwr_domain_state[PLAT_CLSTR_LVL] == 195 PLAT_MAX_OFF_STATE) { 196 #if (SOC_CLUSTER_PWR_DWN) 197 _psci_clstr_prep_pwrdn(core_mask); 198 199 /* set core state */ 200 core_state = CORE_PWR_DOWN; 201 _setCoreState(core_mask, core_state); 202 #endif 203 } 204 205 else if (state->pwr_domain_state[PLAT_CLSTR_LVL] == 206 PLAT_MAX_RET_STATE) { 207 #if (SOC_CLUSTER_STANDBY) 208 _psci_clstr_prep_stdby(core_mask); 209 210 /* set core state */ 211 core_state = CORE_STANDBY; 212 _setCoreState(core_mask, core_state); 213 #endif 214 } 215 216 else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_OFF_STATE) { 217 #if (SOC_CORE_PWR_DWN) 218 /* prep the core for power-down */ 219 _psci_core_prep_pwrdn(core_mask); 220 221 /* set core state */ 222 core_state = CORE_PWR_DOWN; 223 _setCoreState(core_mask, core_state); 224 #endif 225 } 226 227 else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_RET_STATE) { 228 #if (SOC_CORE_STANDBY) 229 _psci_core_prep_stdby(core_mask); 230 231 /* set core state */ 232 core_state = CORE_STANDBY; 233 _setCoreState(core_mask, core_state); 234 #endif 235 } 236 237 } 238 #endif 239 240 #if (SOC_CORE_PWR_DWN) 241 static void _pwr_suspend_finish(const psci_power_state_t *state) 242 { 243 244 u_register_t core_mask = plat_my_core_mask(); 245 u_register_t core_state; 246 247 248 if (state->pwr_domain_state[PLAT_MAX_LVL] == PLAT_MAX_OFF_STATE) { 249 #if (SOC_SYSTEM_PWR_DWN) 250 _psci_sys_exit_pwrdn(core_mask); 251 252 /* when we are here, the core is back up 253 * set core state to released 254 */ 255 core_state = CORE_RELEASED; 256 _setCoreState(core_mask, core_state); 257 #endif 258 } else if (state->pwr_domain_state[PLAT_MAX_LVL] 259 == PLAT_MAX_RET_STATE) { 260 #if (SOC_SYSTEM_STANDBY) 261 _psci_sys_exit_stdby(core_mask); 262 263 /* when we are here, the core is waking up 264 * set core state to released 265 */ 266 core_state = CORE_RELEASED; 267 _setCoreState(core_mask, core_state); 268 #endif 269 } 270 271 else if (state->pwr_domain_state[PLAT_CLSTR_LVL] == 272 PLAT_MAX_OFF_STATE) { 273 #if (SOC_CLUSTER_PWR_DWN) 274 _psci_clstr_exit_pwrdn(core_mask); 275 276 /* when we are here, the core is waking up 277 * set core state to released 278 */ 279 core_state = CORE_RELEASED; 280 _setCoreState(core_mask, core_state); 281 #endif 282 } 283 284 else if (state->pwr_domain_state[PLAT_CLSTR_LVL] == 285 PLAT_MAX_RET_STATE) { 286 #if (SOC_CLUSTER_STANDBY) 287 _psci_clstr_exit_stdby(core_mask); 288 289 /* when we are here, the core is waking up 290 * set core state to released 291 */ 292 core_state = CORE_RELEASED; 293 _setCoreState(core_mask, core_state); 294 #endif 295 } 296 297 else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_OFF_STATE) { 298 #if (SOC_CORE_PWR_DWN) 299 _psci_core_exit_pwrdn(core_mask); 300 301 /* when we are here, the core is back up 302 * set core state to released 303 */ 304 core_state = CORE_RELEASED; 305 _setCoreState(core_mask, core_state); 306 #endif 307 } 308 309 else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_RET_STATE) { 310 #if (SOC_CORE_STANDBY) 311 _psci_core_exit_stdby(core_mask); 312 313 /* when we are here, the core is waking up 314 * set core state to released 315 */ 316 core_state = CORE_RELEASED; 317 _setCoreState(core_mask, core_state); 318 #endif 319 } 320 321 } 322 #endif 323 324 #if (SOC_CORE_STANDBY || SOC_CORE_PWR_DWN) 325 326 #define PWR_STATE_TYPE_MASK 0x00010000 327 #define PWR_STATE_TYPE_STNDBY 0x0 328 #define PWR_STATE_TYPE_PWRDWN 0x00010000 329 #define PWR_STATE_LVL_MASK 0x03000000 330 #define PWR_STATE_LVL_CORE 0x0 331 #define PWR_STATE_LVL_CLSTR 0x01000000 332 #define PWR_STATE_LVL_SYS 0x02000000 333 #define PWR_STATE_LVL_MAX 0x03000000 334 335 /* turns a requested power state into a target power state 336 * based on SoC capabilities 337 */ 338 static int _pwr_state_validate(uint32_t pwr_state, 339 psci_power_state_t *state) 340 { 341 int stat = PSCI_E_INVALID_PARAMS; 342 int pwrdn = (pwr_state & PWR_STATE_TYPE_MASK); 343 int lvl = (pwr_state & PWR_STATE_LVL_MASK); 344 345 switch (lvl) { 346 case PWR_STATE_LVL_MAX: 347 if (pwrdn && SOC_SYSTEM_PWR_DWN) 348 state->pwr_domain_state[PLAT_MAX_LVL] = 349 PLAT_MAX_OFF_STATE; 350 else if (SOC_SYSTEM_STANDBY) 351 state->pwr_domain_state[PLAT_MAX_LVL] = 352 PLAT_MAX_RET_STATE; 353 /* intentional fall-thru condition */ 354 case PWR_STATE_LVL_SYS: 355 if (pwrdn && SOC_SYSTEM_PWR_DWN) 356 state->pwr_domain_state[PLAT_SYS_LVL] = 357 PLAT_MAX_OFF_STATE; 358 else if (SOC_SYSTEM_STANDBY) 359 state->pwr_domain_state[PLAT_SYS_LVL] = 360 PLAT_MAX_RET_STATE; 361 /* intentional fall-thru condition */ 362 case PWR_STATE_LVL_CLSTR: 363 if (pwrdn && SOC_CLUSTER_PWR_DWN) 364 state->pwr_domain_state[PLAT_CLSTR_LVL] = 365 PLAT_MAX_OFF_STATE; 366 else if (SOC_CLUSTER_STANDBY) 367 state->pwr_domain_state[PLAT_CLSTR_LVL] = 368 PLAT_MAX_RET_STATE; 369 /* intentional fall-thru condition */ 370 case PWR_STATE_LVL_CORE: 371 stat = PSCI_E_SUCCESS; 372 373 if (pwrdn && SOC_CORE_PWR_DWN) 374 state->pwr_domain_state[PLAT_CORE_LVL] = 375 PLAT_MAX_OFF_STATE; 376 else if (SOC_CORE_STANDBY) 377 state->pwr_domain_state[PLAT_CORE_LVL] = 378 PLAT_MAX_RET_STATE; 379 break; 380 } 381 return (stat); 382 } 383 384 #endif 385 386 #if (SOC_SYSTEM_PWR_DWN) 387 static void _pwr_state_sys_suspend(psci_power_state_t *req_state) 388 { 389 390 /* if we need to have per-SoC settings, then we need to 391 * extend this by calling into psci_utils.S and from there 392 * on down to the SoC.S files 393 */ 394 395 req_state->pwr_domain_state[PLAT_MAX_LVL] = PLAT_MAX_OFF_STATE; 396 req_state->pwr_domain_state[PLAT_SYS_LVL] = PLAT_MAX_OFF_STATE; 397 req_state->pwr_domain_state[PLAT_CLSTR_LVL] = PLAT_MAX_OFF_STATE; 398 req_state->pwr_domain_state[PLAT_CORE_LVL] = PLAT_MAX_OFF_STATE; 399 400 } 401 #endif 402 403 #if defined(NXP_WARM_BOOT) && (SOC_SYSTEM_RESET2) 404 static int psci_system_reset2(int is_vendor, 405 int reset_type, 406 u_register_t cookie) 407 { 408 int ret = 0; 409 410 INFO("Executing the sequence of warm reset.\n"); 411 ret = prep_n_execute_warm_reset(); 412 413 return ret; 414 } 415 #endif 416 417 static plat_psci_ops_t _psci_pm_ops = { 418 #if (SOC_SYSTEM_OFF) 419 .system_off = _psci_system_off, 420 #endif 421 #if (SOC_SYSTEM_RESET) 422 .system_reset = _psci_system_reset, 423 #endif 424 #if defined(NXP_WARM_BOOT) && (SOC_SYSTEM_RESET2) 425 .system_reset2 = psci_system_reset2, 426 #endif 427 #if (SOC_CORE_RELEASE || SOC_CORE_RESTART) 428 /* core released or restarted */ 429 .pwr_domain_on_finish = _pwr_domain_wakeup, 430 #endif 431 #if (SOC_CORE_OFF) 432 /* core shutting down */ 433 .pwr_domain_off = _pwr_domain_off, 434 #endif 435 #if (SOC_CORE_OFF || SOC_CORE_PWR_DWN) 436 .pwr_domain_pwr_down_wfi = _pwr_down_wfi, 437 #endif 438 #if (SOC_CORE_STANDBY || SOC_CORE_PWR_DWN) 439 /* cpu_suspend */ 440 .validate_power_state = _pwr_state_validate, 441 #if (SOC_CORE_STANDBY) 442 .cpu_standby = _pwr_cpu_standby, 443 #endif 444 #if (SOC_CORE_PWR_DWN) 445 .pwr_domain_suspend = _pwr_suspend, 446 .pwr_domain_suspend_finish = _pwr_suspend_finish, 447 #endif 448 #endif 449 #if (SOC_SYSTEM_PWR_DWN) 450 .get_sys_suspend_power_state = _pwr_state_sys_suspend, 451 #endif 452 #if (SOC_CORE_RELEASE) 453 /* core executing psci_cpu_on */ 454 .pwr_domain_on = _pwr_domain_on 455 #endif 456 }; 457 458 #if (SOC_CORE_RELEASE || SOC_CORE_PWR_DWN) 459 int plat_setup_psci_ops(uintptr_t sec_entrypoint, 460 const plat_psci_ops_t **psci_ops) 461 { 462 warmboot_entry = sec_entrypoint; 463 *psci_ops = &_psci_pm_ops; 464 return 0; 465 } 466 467 #else 468 469 int plat_setup_psci_ops(uintptr_t sec_entrypoint, 470 const plat_psci_ops_t **psci_ops) 471 { 472 *psci_ops = &_psci_pm_ops; 473 return 0; 474 } 475 #endif 476