1 /* 2 * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch_helpers.h> 32 #include <errno.h> 33 #include <assert.h> 34 #include <debug.h> 35 #include <gicv2.h> 36 #include <mmio.h> 37 #include <plat_arm.h> 38 #include <platform.h> 39 #include <psci.h> 40 #include "pm_api_sys.h" 41 #include "pm_client.h" 42 #include "zynqmp_private.h" 43 44 uintptr_t zynqmp_sec_entry; 45 46 void zynqmp_cpu_standby(plat_local_state_t cpu_state) 47 { 48 VERBOSE("%s: cpu_state: 0x%x\n", __func__, cpu_state); 49 50 dsb(); 51 wfi(); 52 } 53 54 static int zynqmp_nopmu_pwr_domain_on(u_register_t mpidr) 55 { 56 uint32_t r; 57 unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr); 58 59 VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr); 60 61 if (cpu_id == -1) 62 return PSCI_E_INTERN_FAIL; 63 64 /* program RVBAR */ 65 mmio_write_32(APU_RVBAR_L_0 + (cpu_id << 3), zynqmp_sec_entry); 66 mmio_write_32(APU_RVBAR_H_0 + (cpu_id << 3), zynqmp_sec_entry >> 32); 67 68 /* clear VINITHI */ 69 r = mmio_read_32(APU_CONFIG_0); 70 r &= ~(1 << APU_CONFIG_0_VINITHI_SHIFT << cpu_id); 71 mmio_write_32(APU_CONFIG_0, r); 72 73 /* clear power down request */ 74 r = mmio_read_32(APU_PWRCTL); 75 r &= ~(1 << cpu_id); 76 mmio_write_32(APU_PWRCTL, r); 77 78 /* power up island */ 79 mmio_write_32(PMU_GLOBAL_REQ_PWRUP_EN, 1 << cpu_id); 80 mmio_write_32(PMU_GLOBAL_REQ_PWRUP_TRIG, 1 << cpu_id); 81 /* FIXME: we should have a way to break out */ 82 while (mmio_read_32(PMU_GLOBAL_REQ_PWRUP_STATUS) & (1 << cpu_id)) 83 ; 84 85 /* release core reset */ 86 r = mmio_read_32(CRF_APB_RST_FPD_APU); 87 r &= ~((CRF_APB_RST_FPD_APU_ACPU_PWRON_RESET | 88 CRF_APB_RST_FPD_APU_ACPU_RESET) << cpu_id); 89 mmio_write_32(CRF_APB_RST_FPD_APU, r); 90 91 return PSCI_E_SUCCESS; 92 } 93 94 static int zynqmp_pwr_domain_on(u_register_t mpidr) 95 { 96 unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr); 97 const struct pm_proc *proc; 98 99 VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr); 100 101 if (cpu_id == -1) 102 return PSCI_E_INTERN_FAIL; 103 104 proc = pm_get_proc(cpu_id); 105 106 /* Send request to PMU to wake up selected APU CPU core */ 107 pm_req_wakeup(proc->node_id, 1, zynqmp_sec_entry, REQ_ACK_BLOCKING); 108 109 return PSCI_E_SUCCESS; 110 } 111 112 static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state) 113 { 114 uint32_t r; 115 unsigned int cpu_id = plat_my_core_pos(); 116 117 for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) 118 VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", 119 __func__, i, target_state->pwr_domain_state[i]); 120 121 /* Prevent interrupts from spuriously waking up this cpu */ 122 gicv2_cpuif_disable(); 123 124 /* set power down request */ 125 r = mmio_read_32(APU_PWRCTL); 126 r |= (1 << cpu_id); 127 mmio_write_32(APU_PWRCTL, r); 128 } 129 130 static void zynqmp_pwr_domain_off(const psci_power_state_t *target_state) 131 { 132 unsigned int cpu_id = plat_my_core_pos(); 133 const struct pm_proc *proc = pm_get_proc(cpu_id); 134 135 for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) 136 VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", 137 __func__, i, target_state->pwr_domain_state[i]); 138 139 /* Prevent interrupts from spuriously waking up this cpu */ 140 gicv2_cpuif_disable(); 141 142 /* 143 * Send request to PMU to power down the appropriate APU CPU 144 * core. 145 * According to PSCI specification, CPU_off function does not 146 * have resume address and CPU core can only be woken up 147 * invoking CPU_on function, during which resume address will 148 * be set. 149 */ 150 pm_self_suspend(proc->node_id, MAX_LATENCY, PM_STATE_CPU_IDLE, 0); 151 } 152 153 static void zynqmp_nopmu_pwr_domain_suspend(const psci_power_state_t *target_state) 154 { 155 uint32_t r; 156 unsigned int cpu_id = plat_my_core_pos(); 157 158 for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) 159 VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", 160 __func__, i, target_state->pwr_domain_state[i]); 161 162 /* set power down request */ 163 r = mmio_read_32(APU_PWRCTL); 164 r |= (1 << cpu_id); 165 mmio_write_32(APU_PWRCTL, r); 166 167 /* program RVBAR */ 168 mmio_write_32(APU_RVBAR_L_0 + (cpu_id << 3), zynqmp_sec_entry); 169 mmio_write_32(APU_RVBAR_H_0 + (cpu_id << 3), zynqmp_sec_entry >> 32); 170 171 /* clear VINITHI */ 172 r = mmio_read_32(APU_CONFIG_0); 173 r &= ~(1 << APU_CONFIG_0_VINITHI_SHIFT << cpu_id); 174 mmio_write_32(APU_CONFIG_0, r); 175 176 /* enable power up on IRQ */ 177 mmio_write_32(PMU_GLOBAL_REQ_PWRUP_EN, 1 << cpu_id); 178 } 179 180 static void zynqmp_pwr_domain_suspend(const psci_power_state_t *target_state) 181 { 182 unsigned int state; 183 unsigned int cpu_id = plat_my_core_pos(); 184 const struct pm_proc *proc = pm_get_proc(cpu_id); 185 186 for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) 187 VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", 188 __func__, i, target_state->pwr_domain_state[i]); 189 190 state = target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE ? 191 PM_STATE_SUSPEND_TO_RAM : PM_STATE_CPU_IDLE; 192 193 /* Send request to PMU to suspend this core */ 194 pm_self_suspend(proc->node_id, MAX_LATENCY, state, zynqmp_sec_entry); 195 196 /* APU is to be turned off */ 197 if (target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE) { 198 /* disable coherency */ 199 plat_arm_interconnect_exit_coherency(); 200 } 201 } 202 203 static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state) 204 { 205 for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) 206 VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", 207 __func__, i, target_state->pwr_domain_state[i]); 208 209 gicv2_cpuif_enable(); 210 gicv2_pcpu_distif_init(); 211 } 212 213 static void zynqmp_nopmu_pwr_domain_suspend_finish(const psci_power_state_t *target_state) 214 { 215 uint32_t r; 216 unsigned int cpu_id = plat_my_core_pos(); 217 218 for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) 219 VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", 220 __func__, i, target_state->pwr_domain_state[i]); 221 222 /* disable power up on IRQ */ 223 mmio_write_32(PMU_GLOBAL_REQ_PWRUP_DIS, 1 << cpu_id); 224 225 /* clear powerdown bit */ 226 r = mmio_read_32(APU_PWRCTL); 227 r &= ~(1 << cpu_id); 228 mmio_write_32(APU_PWRCTL, r); 229 } 230 231 static void zynqmp_pwr_domain_suspend_finish(const psci_power_state_t *target_state) 232 { 233 unsigned int cpu_id = plat_my_core_pos(); 234 const struct pm_proc *proc = pm_get_proc(cpu_id); 235 236 for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) 237 VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", 238 __func__, i, target_state->pwr_domain_state[i]); 239 240 /* Clear the APU power control register for this cpu */ 241 pm_client_wakeup(proc); 242 243 /* enable coherency */ 244 plat_arm_interconnect_enter_coherency(); 245 /* APU was turned off */ 246 if (target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE) { 247 plat_arm_gic_init(); 248 } else { 249 gicv2_cpuif_enable(); 250 gicv2_pcpu_distif_init(); 251 } 252 } 253 254 /******************************************************************************* 255 * ZynqMP handlers to shutdown/reboot the system 256 ******************************************************************************/ 257 static void __dead2 zynqmp_nopmu_system_off(void) 258 { 259 ERROR("ZynqMP System Off: operation not handled.\n"); 260 261 /* disable coherency */ 262 plat_arm_interconnect_exit_coherency(); 263 264 panic(); 265 } 266 267 static void __dead2 zynqmp_system_off(void) 268 { 269 /* disable coherency */ 270 plat_arm_interconnect_exit_coherency(); 271 272 /* Send the power down request to the PMU */ 273 pm_system_shutdown(PMF_SHUTDOWN_TYPE_SHUTDOWN, 274 PMF_SHUTDOWN_SUBTYPE_SUBSYSTEM); 275 276 while (1) 277 wfi(); 278 } 279 280 static void __dead2 zynqmp_nopmu_system_reset(void) 281 { 282 /* 283 * This currently triggers a system reset. I.e. the whole 284 * system will be reset! Including RPUs, PMU, PL, etc. 285 */ 286 287 /* disable coherency */ 288 plat_arm_interconnect_exit_coherency(); 289 290 /* bypass RPLL (needed on 1.0 silicon) */ 291 uint32_t reg = mmio_read_32(CRL_APB_RPLL_CTRL); 292 reg |= CRL_APB_RPLL_CTRL_BYPASS; 293 mmio_write_32(CRL_APB_RPLL_CTRL, reg); 294 295 /* trigger system reset */ 296 mmio_write_32(CRL_APB_RESET_CTRL, CRL_APB_RESET_CTRL_SOFT_RESET); 297 298 while (1) 299 wfi(); 300 } 301 302 static void __dead2 zynqmp_system_reset(void) 303 { 304 /* disable coherency */ 305 plat_arm_interconnect_exit_coherency(); 306 307 /* Send the system reset request to the PMU */ 308 pm_system_shutdown(PMF_SHUTDOWN_TYPE_RESET, 309 PMF_SHUTDOWN_SUBTYPE_SUBSYSTEM); 310 311 while (1) 312 wfi(); 313 } 314 315 int zynqmp_validate_power_state(unsigned int power_state, 316 psci_power_state_t *req_state) 317 { 318 VERBOSE("%s: power_state: 0x%x\n", __func__, power_state); 319 320 int pstate = psci_get_pstate_type(power_state); 321 322 assert(req_state); 323 324 /* Sanity check the requested state */ 325 if (pstate == PSTATE_TYPE_STANDBY) 326 req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE; 327 else 328 req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_OFF_STATE; 329 330 /* We expect the 'state id' to be zero */ 331 if (psci_get_pstate_id(power_state)) 332 return PSCI_E_INVALID_PARAMS; 333 334 return PSCI_E_SUCCESS; 335 } 336 337 int zynqmp_validate_ns_entrypoint(unsigned long ns_entrypoint) 338 { 339 VERBOSE("%s: ns_entrypoint: 0x%lx\n", __func__, ns_entrypoint); 340 341 /* FIXME: Actually validate */ 342 return PSCI_E_SUCCESS; 343 } 344 345 void zynqmp_get_sys_suspend_power_state(psci_power_state_t *req_state) 346 { 347 req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE; 348 req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE; 349 } 350 351 /******************************************************************************* 352 * Export the platform handlers to enable psci to invoke them 353 ******************************************************************************/ 354 static const struct plat_psci_ops zynqmp_psci_ops = { 355 .cpu_standby = zynqmp_cpu_standby, 356 .pwr_domain_on = zynqmp_pwr_domain_on, 357 .pwr_domain_off = zynqmp_pwr_domain_off, 358 .pwr_domain_suspend = zynqmp_pwr_domain_suspend, 359 .pwr_domain_on_finish = zynqmp_pwr_domain_on_finish, 360 .pwr_domain_suspend_finish = zynqmp_pwr_domain_suspend_finish, 361 .system_off = zynqmp_system_off, 362 .system_reset = zynqmp_system_reset, 363 .validate_power_state = zynqmp_validate_power_state, 364 .validate_ns_entrypoint = zynqmp_validate_ns_entrypoint, 365 .get_sys_suspend_power_state = zynqmp_get_sys_suspend_power_state, 366 }; 367 368 static const struct plat_psci_ops zynqmp_nopmu_psci_ops = { 369 .cpu_standby = zynqmp_cpu_standby, 370 .pwr_domain_on = zynqmp_nopmu_pwr_domain_on, 371 .pwr_domain_off = zynqmp_nopmu_pwr_domain_off, 372 .pwr_domain_suspend = zynqmp_nopmu_pwr_domain_suspend, 373 .pwr_domain_on_finish = zynqmp_pwr_domain_on_finish, 374 .pwr_domain_suspend_finish = zynqmp_nopmu_pwr_domain_suspend_finish, 375 .system_off = zynqmp_nopmu_system_off, 376 .system_reset = zynqmp_nopmu_system_reset, 377 .validate_power_state = zynqmp_validate_power_state, 378 .validate_ns_entrypoint = zynqmp_validate_ns_entrypoint, 379 .get_sys_suspend_power_state = zynqmp_get_sys_suspend_power_state, 380 }; 381 382 /******************************************************************************* 383 * Export the platform specific power ops. 384 ******************************************************************************/ 385 int plat_setup_psci_ops(uintptr_t sec_entrypoint, 386 const struct plat_psci_ops **psci_ops) 387 { 388 zynqmp_sec_entry = sec_entrypoint; 389 390 if (zynqmp_is_pmu_up()) 391 *psci_ops = &zynqmp_psci_ops; 392 else 393 *psci_ops = &zynqmp_nopmu_psci_ops; 394 395 return 0; 396 } 397