1 /* 2 * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <arch_helpers.h> 11 #include <common/debug.h> 12 #include <lib/utils.h> 13 #include <plat/common/platform.h> 14 15 #include <brcm_mhu.h> 16 #include <brcm_scpi.h> 17 #include <platform_def.h> 18 19 #define SCPI_SHARED_MEM_SCP_TO_AP (PLAT_SCP_COM_SHARED_MEM_BASE) 20 #define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_SCP_COM_SHARED_MEM_BASE \ 21 + 0x100) 22 23 /* Header and payload addresses for commands from AP to SCP */ 24 #define SCPI_CMD_HEADER_AP_TO_SCP \ 25 ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP) 26 #define SCPI_CMD_PAYLOAD_AP_TO_SCP \ 27 ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t))) 28 29 /* Header and payload addresses for responses from SCP to AP */ 30 #define SCPI_RES_HEADER_SCP_TO_AP \ 31 ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP) 32 #define SCPI_RES_PAYLOAD_SCP_TO_AP \ 33 ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t))) 34 35 /* ID of the MHU slot used for the SCPI protocol */ 36 #define SCPI_MHU_SLOT_ID 0 37 38 static void scpi_secure_message_start(void) 39 { 40 mhu_secure_message_start(SCPI_MHU_SLOT_ID); 41 } 42 43 static void scpi_secure_message_send(size_t payload_size) 44 { 45 /* 46 * Ensure that any write to the SCPI payload area is seen by SCP before 47 * we write to the MHU register. If these 2 writes were reordered by 48 * the CPU then SCP would read stale payload data 49 */ 50 dmbst(); 51 52 mhu_secure_message_send(SCPI_MHU_SLOT_ID); 53 } 54 55 static void scpi_secure_message_receive(scpi_cmd_t *cmd) 56 { 57 uint32_t mhu_status; 58 59 assert(cmd != NULL); 60 61 mhu_status = mhu_secure_message_wait(); 62 63 /* Expect an SCPI message, reject any other protocol */ 64 if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) { 65 ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n", 66 mhu_status); 67 panic(); 68 } 69 70 /* 71 * Ensure that any read to the SCPI payload area is done after reading 72 * the MHU register. If these 2 reads were reordered then the CPU would 73 * read invalid payload data 74 */ 75 dmbld(); 76 77 memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd)); 78 } 79 80 static void scpi_secure_message_end(void) 81 { 82 mhu_secure_message_end(SCPI_MHU_SLOT_ID); 83 } 84 85 int scpi_wait_ready(void) 86 { 87 scpi_cmd_t scpi_cmd; 88 89 VERBOSE("Waiting for SCP_READY command...\n"); 90 91 /* Get a message from the SCP */ 92 scpi_secure_message_start(); 93 scpi_secure_message_receive(&scpi_cmd); 94 scpi_secure_message_end(); 95 96 /* We are expecting 'SCP Ready', produce correct error if it's not */ 97 scpi_status_t status = SCP_OK; 98 99 if (scpi_cmd.id != SCPI_CMD_SCP_READY) { 100 ERROR("Unexpected SCP command: expected #%u, received #%u\n", 101 SCPI_CMD_SCP_READY, scpi_cmd.id); 102 status = SCP_E_SUPPORT; 103 } else if (scpi_cmd.size != 0) { 104 ERROR("SCP_READY cmd has incorrect size: expected 0, got %u\n", 105 scpi_cmd.size); 106 status = SCP_E_SIZE; 107 } 108 109 VERBOSE("Sending response for SCP_READY command\n"); 110 111 /* 112 * Send our response back to SCP. 113 * We are using the same SCPI header, just update the status field. 114 */ 115 scpi_cmd.status = status; 116 scpi_secure_message_start(); 117 memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd)); 118 scpi_secure_message_send(0); 119 scpi_secure_message_end(); 120 121 return status == SCP_OK ? 0 : -1; 122 } 123 124 void scpi_set_brcm_power_state(unsigned int mpidr, 125 scpi_power_state_t cpu_state, scpi_power_state_t cluster_state, 126 scpi_power_state_t brcm_state) 127 { 128 scpi_cmd_t *cmd; 129 uint32_t state = 0; 130 uint32_t *payload_addr; 131 132 #if ARM_PLAT_MT 133 /* 134 * The current SCPI driver only caters for single-threaded platforms. 135 * Hence we ignore the thread ID (which is always 0) for such platforms. 136 */ 137 state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */ 138 state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */ 139 #else 140 state |= mpidr & 0x0f; /* CPU ID */ 141 state |= (mpidr & 0xf00) >> 4; /* Cluster ID */ 142 #endif /* ARM_PLAT_MT */ 143 144 state |= cpu_state << 8; 145 state |= cluster_state << 12; 146 state |= brcm_state << 16; 147 148 scpi_secure_message_start(); 149 150 /* Populate the command header */ 151 cmd = SCPI_CMD_HEADER_AP_TO_SCP; 152 cmd->id = SCPI_CMD_SET_POWER_STATE; 153 cmd->set = SCPI_SET_NORMAL; 154 cmd->sender = 0; 155 cmd->size = sizeof(state); 156 /* Populate the command payload */ 157 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP; 158 *payload_addr = state; 159 scpi_secure_message_send(sizeof(state)); 160 161 /* 162 * SCP does not reply to this command in order to avoid MHU interrupts 163 * from the sender, which could interfere with its power state request. 164 */ 165 scpi_secure_message_end(); 166 } 167 168 /* 169 * Query and obtain power state from SCP. 170 * 171 * In response to the query, SCP returns power states of all CPUs in all 172 * clusters of the system. The returned response is then filtered based on the 173 * supplied MPIDR. Power states of requested cluster and CPUs within are updated 174 * via. supplied non-NULL pointer arguments. 175 * 176 * Returns 0 on success, or -1 on errors. 177 */ 178 int scpi_get_brcm_power_state(unsigned int mpidr, unsigned int *cpu_state_p, 179 unsigned int *cluster_state_p) 180 { 181 scpi_cmd_t *cmd; 182 scpi_cmd_t response; 183 int power_state, cpu, cluster, rc = -1; 184 185 /* 186 * Extract CPU and cluster membership of the given MPIDR. SCPI caters 187 * for only up to 0xf clusters, and 8 CPUs per cluster 188 */ 189 cpu = mpidr & MPIDR_AFFLVL_MASK; 190 cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; 191 if (cpu >= 8 || cluster >= 0xf) 192 return -1; 193 194 scpi_secure_message_start(); 195 196 /* Populate request headers */ 197 zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd)); 198 cmd = SCPI_CMD_HEADER_AP_TO_SCP; 199 cmd->id = SCPI_CMD_GET_POWER_STATE; 200 201 /* 202 * Send message and wait for SCP's response 203 */ 204 scpi_secure_message_send(0); 205 scpi_secure_message_receive(&response); 206 207 if (response.status != SCP_OK) 208 goto exit; 209 210 /* Validate SCP response */ 211 if (!CHECK_RESPONSE(response, cluster)) 212 goto exit; 213 214 /* Extract power states for required cluster */ 215 power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster); 216 if (CLUSTER_ID(power_state) != cluster) 217 goto exit; 218 219 /* Update power state via. pointers */ 220 if (cluster_state_p) 221 *cluster_state_p = CLUSTER_POWER_STATE(power_state); 222 if (cpu_state_p) 223 *cpu_state_p = CPU_POWER_STATE(power_state); 224 rc = 0; 225 226 exit: 227 scpi_secure_message_end(); 228 return rc; 229 } 230 231 uint32_t scpi_sys_power_state(scpi_system_state_t system_state) 232 { 233 scpi_cmd_t *cmd; 234 uint8_t *payload_addr; 235 236 scpi_secure_message_start(); 237 238 /* Populate the command header */ 239 cmd = SCPI_CMD_HEADER_AP_TO_SCP; 240 cmd->id = SCPI_CMD_SYS_POWER_STATE; 241 cmd->set = 0; 242 cmd->sender = 0; 243 cmd->size = sizeof(*payload_addr); 244 /* Populate the command payload */ 245 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP; 246 *payload_addr = system_state & 0xff; 247 scpi_secure_message_send(sizeof(*payload_addr)); 248 249 scpi_secure_message_end(); 250 251 return SCP_OK; 252 } 253