1 /* 2 * Copyright (c) 2017 - 2020, Broadcom 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 11 #include <arch_helpers.h> 12 #include <common/debug.h> 13 #include <drivers/arm/ccn.h> 14 #include <lib/bakery_lock.h> 15 #include <lib/mmio.h> 16 #include <lib/psci/psci.h> 17 #include <lib/spinlock.h> 18 19 #include <brcm_scpi.h> 20 #include <chimp.h> 21 #include <cmn_plat_util.h> 22 #include <plat_brcm.h> 23 #include <platform_def.h> 24 #include <sr_utils.h> 25 26 #include "m0_cfg.h" 27 28 29 #define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0]) 30 #define CLUSTER_PWR_STATE(state) \ 31 ((state)->pwr_domain_state[MPIDR_AFFLVL1]) 32 #define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL2]) 33 34 #define VENDOR_RST_TYPE_SHIFT 4 35 36 #if HW_ASSISTED_COHERENCY 37 /* 38 * On systems where participant CPUs are cache-coherent, we can use spinlocks 39 * instead of bakery locks. 40 */ 41 spinlock_t event_lock; 42 #define event_lock_get(_lock) spin_lock(&_lock) 43 #define event_lock_release(_lock) spin_unlock(&_lock) 44 45 #else 46 /* 47 * Use bakery locks for state coordination as not all participants are 48 * cache coherent now. 49 */ 50 DEFINE_BAKERY_LOCK(event_lock); 51 #define event_lock_get(_lock) bakery_lock_get(&_lock) 52 #define event_lock_release(_lock) bakery_lock_release(&_lock) 53 #endif 54 55 static int brcm_pwr_domain_on(u_register_t mpidr) 56 { 57 /* 58 * SCP takes care of powering up parent power domains so we 59 * only need to care about level 0 60 */ 61 scpi_set_brcm_power_state(mpidr, scpi_power_on, scpi_power_on, 62 scpi_power_on); 63 64 return PSCI_E_SUCCESS; 65 } 66 67 /******************************************************************************* 68 * Handler called when a power level has just been powered on after 69 * being turned off earlier. The target_state encodes the low power state that 70 * each level has woken up from. This handler would never be invoked with 71 * the system power domain uninitialized as either the primary would have taken 72 * care of it as part of cold boot or the first core awakened from system 73 * suspend would have already initialized it. 74 ******************************************************************************/ 75 static void brcm_pwr_domain_on_finish(const psci_power_state_t *target_state) 76 { 77 unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr()); 78 79 /* Assert that the system power domain need not be initialized */ 80 assert(SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_RUN); 81 82 assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF); 83 84 /* 85 * Perform the common cluster specific operations i.e enable coherency 86 * if this cluster was off. 87 */ 88 if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF) { 89 INFO("Cluster #%lu entering to snoop/dvm domain\n", cluster_id); 90 ccn_enter_snoop_dvm_domain(1 << cluster_id); 91 } 92 93 /* Program the gic per-cpu distributor or re-distributor interface */ 94 plat_brcm_gic_pcpu_init(); 95 96 /* Enable the gic cpu interface */ 97 plat_brcm_gic_cpuif_enable(); 98 } 99 100 static void brcm_power_down_common(void) 101 { 102 unsigned int standbywfil2, standbywfi; 103 uint64_t mpidr = read_mpidr_el1(); 104 105 switch (MPIDR_AFFLVL1_VAL(mpidr)) { 106 case 0x0: 107 standbywfi = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFI; 108 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFIL2; 109 break; 110 case 0x1: 111 standbywfi = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFI; 112 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFIL2; 113 break; 114 case 0x2: 115 standbywfi = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFI; 116 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFIL2; 117 break; 118 case 0x3: 119 standbywfi = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFI; 120 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFIL2; 121 break; 122 default: 123 ERROR("Invalid cluster #%" PRIx64 "\n", MPIDR_AFFLVL1_VAL(mpidr)); 124 return; 125 } 126 /* Clear the WFI status bit */ 127 event_lock_get(event_lock); 128 mmio_setbits_32(CDRU_PROC_EVENT_CLEAR, 129 (1 << (standbywfi + MPIDR_AFFLVL0_VAL(mpidr))) | 130 (1 << standbywfil2)); 131 event_lock_release(event_lock); 132 } 133 134 /* 135 * Helper function to inform power down state to SCP. 136 */ 137 static void brcm_scp_suspend(const psci_power_state_t *target_state) 138 { 139 uint32_t cluster_state = scpi_power_on; 140 uint32_t system_state = scpi_power_on; 141 142 /* Check if power down at system power domain level is requested */ 143 if (SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF) 144 system_state = scpi_power_retention; 145 146 /* Check if Cluster is to be turned off */ 147 if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF) 148 cluster_state = scpi_power_off; 149 150 /* 151 * Ask the SCP to power down the appropriate components depending upon 152 * their state. 153 */ 154 scpi_set_brcm_power_state(read_mpidr_el1(), 155 scpi_power_off, 156 cluster_state, 157 system_state); 158 } 159 160 /* 161 * Helper function to turn off a CPU power domain and its parent power domains 162 * if applicable. Since SCPI doesn't differentiate between OFF and suspend, we 163 * call the suspend helper here. 164 */ 165 static void brcm_scp_off(const psci_power_state_t *target_state) 166 { 167 brcm_scp_suspend(target_state); 168 } 169 170 static void brcm_pwr_domain_off(const psci_power_state_t *target_state) 171 { 172 unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr_el1()); 173 174 assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF); 175 /* Prevent interrupts from spuriously waking up this cpu */ 176 plat_brcm_gic_cpuif_disable(); 177 178 /* Turn redistributor off */ 179 plat_brcm_gic_redistif_off(); 180 181 /* If Cluster is to be turned off, disable coherency */ 182 if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF) 183 ccn_exit_snoop_dvm_domain(1 << cluster_id); 184 185 brcm_power_down_common(); 186 187 brcm_scp_off(target_state); 188 } 189 190 /******************************************************************************* 191 * Handler called when the CPU power domain is about to enter standby. 192 ******************************************************************************/ 193 static void brcm_cpu_standby(plat_local_state_t cpu_state) 194 { 195 unsigned int scr; 196 197 assert(cpu_state == PLAT_LOCAL_STATE_RET); 198 199 scr = read_scr_el3(); 200 /* 201 * Enable the Non secure interrupt to wake the CPU. 202 * In GICv3 affinity routing mode, the non secure group1 interrupts use 203 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ. 204 * Enabling both the bits works for both GICv2 mode and GICv3 affinity 205 * routing mode. 206 */ 207 write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT); 208 isb(); 209 dsb(); 210 wfi(); 211 212 /* 213 * Restore SCR to the original value, synchronisation of scr_el3 is 214 * done by eret while el3_exit to save some execution cycles. 215 */ 216 write_scr_el3(scr); 217 } 218 219 /* 220 * Helper function to shutdown the system via SCPI. 221 */ 222 static void __dead2 brcm_scp_sys_shutdown(void) 223 { 224 /* 225 * Disable GIC CPU interface to prevent pending interrupt 226 * from waking up the AP from WFI. 227 */ 228 plat_brcm_gic_cpuif_disable(); 229 230 /* Flush and invalidate data cache */ 231 dcsw_op_all(DCCISW); 232 233 /* Bring Cluster out of coherency domain as its going to die */ 234 plat_brcm_interconnect_exit_coherency(); 235 236 brcm_power_down_common(); 237 238 /* Send the power down request to the SCP */ 239 scpi_sys_power_state(scpi_system_shutdown); 240 241 wfi(); 242 ERROR("BRCM System Off: operation not handled.\n"); 243 panic(); 244 } 245 246 /* 247 * Helper function to reset the system 248 */ 249 static void __dead2 brcm_scp_sys_reset(unsigned int reset_type) 250 { 251 /* 252 * Disable GIC CPU interface to prevent pending interrupt 253 * from waking up the AP from WFI. 254 */ 255 plat_brcm_gic_cpuif_disable(); 256 257 /* Flush and invalidate data cache */ 258 dcsw_op_all(DCCISW); 259 260 /* Bring Cluster out of coherency domain as its going to die */ 261 plat_brcm_interconnect_exit_coherency(); 262 263 brcm_power_down_common(); 264 265 /* Send the system reset request to the SCP 266 * 267 * As per PSCI spec system power state could be 268 * 0-> Shutdown 269 * 1-> Reboot- Board level Reset 270 * 2-> Reset - SoC level Reset 271 * 272 * Spec allocates 8 bits, 2 nibble, for this. One nibble is sufficient 273 * for sending the state hence We are utilizing 2nd nibble for vendor 274 * define reset type. 275 */ 276 scpi_sys_power_state((reset_type << VENDOR_RST_TYPE_SHIFT) | 277 scpi_system_reboot); 278 279 wfi(); 280 ERROR("BRCM System Reset: operation not handled.\n"); 281 panic(); 282 } 283 284 static void __dead2 brcm_system_reset(void) 285 { 286 unsigned int reset_type; 287 288 if (bcm_chimp_is_nic_mode()) 289 reset_type = SOFT_RESET_L3; 290 else 291 reset_type = SOFT_SYS_RESET_L1; 292 293 brcm_scp_sys_reset(reset_type); 294 } 295 296 static int brcm_system_reset2(int is_vendor, int reset_type, 297 u_register_t cookie) 298 { 299 if (!is_vendor) { 300 /* Architectural warm boot: only warm reset is supported */ 301 reset_type = SOFT_RESET_L3; 302 } else { 303 uint32_t boot_source = (uint32_t)cookie; 304 305 boot_source &= BOOT_SOURCE_MASK; 306 brcm_stingray_set_straps(boot_source); 307 } 308 brcm_scp_sys_reset(reset_type); 309 310 /* 311 * brcm_scp_sys_reset cannot return (it is a __dead function), 312 * but brcm_system_reset2 has to return some value, even in 313 * this case. 314 */ 315 return 0; 316 } 317 318 static int brcm_validate_ns_entrypoint(uintptr_t entrypoint) 319 { 320 /* 321 * Check if the non secure entrypoint lies within the non 322 * secure DRAM. 323 */ 324 if ((entrypoint >= BRCM_NS_DRAM1_BASE) && 325 (entrypoint < (BRCM_NS_DRAM1_BASE + BRCM_NS_DRAM1_SIZE))) 326 return PSCI_E_SUCCESS; 327 #ifdef __aarch64__ 328 if ((entrypoint >= BRCM_DRAM2_BASE) && 329 (entrypoint < (BRCM_DRAM2_BASE + BRCM_DRAM2_SIZE))) 330 return PSCI_E_SUCCESS; 331 332 if ((entrypoint >= BRCM_DRAM3_BASE) && 333 (entrypoint < (BRCM_DRAM3_BASE + BRCM_DRAM3_SIZE))) 334 return PSCI_E_SUCCESS; 335 #endif 336 337 return PSCI_E_INVALID_ADDRESS; 338 } 339 340 /******************************************************************************* 341 * ARM standard platform handler called to check the validity of the power state 342 * parameter. 343 ******************************************************************************/ 344 static int brcm_validate_power_state(unsigned int power_state, 345 psci_power_state_t *req_state) 346 { 347 int pstate = psci_get_pstate_type(power_state); 348 int pwr_lvl = psci_get_pstate_pwrlvl(power_state); 349 int i; 350 351 assert(req_state); 352 353 if (pwr_lvl > PLAT_MAX_PWR_LVL) 354 return PSCI_E_INVALID_PARAMS; 355 356 /* Sanity check the requested state */ 357 if (pstate == PSTATE_TYPE_STANDBY) { 358 /* 359 * It's possible to enter standby only on power level 0 360 * Ignore any other power level. 361 */ 362 if (pwr_lvl != MPIDR_AFFLVL0) 363 return PSCI_E_INVALID_PARAMS; 364 365 req_state->pwr_domain_state[MPIDR_AFFLVL0] = 366 PLAT_LOCAL_STATE_RET; 367 } else { 368 for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++) 369 req_state->pwr_domain_state[i] = 370 PLAT_LOCAL_STATE_OFF; 371 } 372 373 /* 374 * We expect the 'state id' to be zero. 375 */ 376 if (psci_get_pstate_id(power_state)) 377 return PSCI_E_INVALID_PARAMS; 378 379 return PSCI_E_SUCCESS; 380 } 381 382 /******************************************************************************* 383 * Export the platform handlers via plat_brcm_psci_pm_ops. The ARM Standard 384 * platform will take care of registering the handlers with PSCI. 385 ******************************************************************************/ 386 plat_psci_ops_t plat_brcm_psci_pm_ops = { 387 .pwr_domain_on = brcm_pwr_domain_on, 388 .pwr_domain_on_finish = brcm_pwr_domain_on_finish, 389 .pwr_domain_off = brcm_pwr_domain_off, 390 .cpu_standby = brcm_cpu_standby, 391 .system_off = brcm_scp_sys_shutdown, 392 .system_reset = brcm_system_reset, 393 .system_reset2 = brcm_system_reset2, 394 .validate_ns_entrypoint = brcm_validate_ns_entrypoint, 395 .validate_power_state = brcm_validate_power_state, 396 }; 397 398 int plat_setup_psci_ops(uintptr_t sec_entrypoint, 399 const struct plat_psci_ops **psci_ops) 400 { 401 *psci_ops = &plat_brcm_psci_pm_ops; 402 403 /* Setup mailbox with entry point. */ 404 mmio_write_64(CRMU_CFG_BASE + offsetof(M0CFG, core_cfg.rvbar), 405 sec_entrypoint); 406 407 return 0; 408 } 409