1 /* 2 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <arch_helpers.h> 11 #include <common/debug.h> 12 #include <drivers/arm/css/css_mhu.h> 13 #include <drivers/arm/css/css_scpi.h> 14 #include <lib/utils.h> 15 #include <plat/common/platform.h> 16 #include <platform_def.h> 17 18 #define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE 19 #define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \ 20 + 0x100) 21 22 /* Header and payload addresses for commands from AP to SCP */ 23 #define SCPI_CMD_HEADER_AP_TO_SCP \ 24 ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP) 25 #define SCPI_CMD_PAYLOAD_AP_TO_SCP \ 26 ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t))) 27 28 /* Header and payload addresses for responses from SCP to AP */ 29 #define SCPI_RES_HEADER_SCP_TO_AP \ 30 ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP) 31 #define SCPI_RES_PAYLOAD_SCP_TO_AP \ 32 ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t))) 33 34 /* ID of the MHU slot used for the SCPI protocol */ 35 #define SCPI_MHU_SLOT_ID 0 36 37 static void scpi_secure_message_start(void) 38 { 39 mhu_secure_message_start(SCPI_MHU_SLOT_ID); 40 } 41 42 static void scpi_secure_message_send(size_t payload_size) 43 { 44 /* 45 * Ensure that any write to the SCPI payload area is seen by SCP before 46 * we write to the MHU register. If these 2 writes were reordered by 47 * the CPU then SCP would read stale payload data 48 */ 49 dmbst(); 50 51 mhu_secure_message_send(SCPI_MHU_SLOT_ID); 52 } 53 54 static void scpi_secure_message_receive(scpi_cmd_t *cmd) 55 { 56 uint32_t mhu_status; 57 58 assert(cmd != NULL); 59 60 mhu_status = mhu_secure_message_wait(); 61 62 /* Expect an SCPI message, reject any other protocol */ 63 if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) { 64 ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n", 65 mhu_status); 66 panic(); 67 } 68 69 /* 70 * Ensure that any read to the SCPI payload area is done after reading 71 * the MHU register. If these 2 reads were reordered then the CPU would 72 * read invalid payload data 73 */ 74 dmbld(); 75 76 memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd)); 77 } 78 79 static void scpi_secure_message_end(void) 80 { 81 mhu_secure_message_end(SCPI_MHU_SLOT_ID); 82 } 83 84 int scpi_wait_ready(void) 85 { 86 scpi_cmd_t scpi_cmd; 87 88 VERBOSE("Waiting for SCP_READY command...\n"); 89 90 /* Get a message from the SCP */ 91 scpi_secure_message_start(); 92 scpi_secure_message_receive(&scpi_cmd); 93 scpi_secure_message_end(); 94 95 /* We are expecting 'SCP Ready', produce correct error if it's not */ 96 scpi_status_t status = SCP_OK; 97 if (scpi_cmd.id != SCPI_CMD_SCP_READY) { 98 ERROR("Unexpected SCP command: expected command #%u, got command #%u\n", 99 SCPI_CMD_SCP_READY, scpi_cmd.id); 100 status = SCP_E_SUPPORT; 101 } else if (scpi_cmd.size != 0) { 102 ERROR("SCP_READY command has incorrect size: expected 0, got %u\n", 103 scpi_cmd.size); 104 status = SCP_E_SIZE; 105 } 106 107 VERBOSE("Sending response for SCP_READY command\n"); 108 109 /* 110 * Send our response back to SCP. 111 * We are using the same SCPI header, just update the status field. 112 */ 113 scpi_cmd.status = status; 114 scpi_secure_message_start(); 115 memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd)); 116 scpi_secure_message_send(0); 117 scpi_secure_message_end(); 118 119 return status == SCP_OK ? 0 : -1; 120 } 121 122 void scpi_set_css_power_state(unsigned int mpidr, 123 scpi_power_state_t cpu_state, scpi_power_state_t cluster_state, 124 scpi_power_state_t css_state) 125 { 126 scpi_cmd_t *cmd; 127 uint32_t state = 0; 128 uint32_t *payload_addr; 129 130 #if ARM_PLAT_MT 131 /* 132 * The current SCPI driver only caters for single-threaded platforms. 133 * Hence we ignore the thread ID (which is always 0) for such platforms. 134 */ 135 state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */ 136 state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */ 137 #else 138 state |= mpidr & 0x0f; /* CPU ID */ 139 state |= (mpidr & 0xf00) >> 4; /* Cluster ID */ 140 #endif /* ARM_PLAT_MT */ 141 142 state |= cpu_state << 8; 143 state |= cluster_state << 12; 144 state |= css_state << 16; 145 146 scpi_secure_message_start(); 147 148 /* Populate the command header */ 149 cmd = SCPI_CMD_HEADER_AP_TO_SCP; 150 cmd->id = SCPI_CMD_SET_CSS_POWER_STATE; 151 cmd->set = SCPI_SET_NORMAL; 152 cmd->sender = 0; 153 cmd->size = sizeof(state); 154 /* Populate the command payload */ 155 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP; 156 *payload_addr = state; 157 scpi_secure_message_send(sizeof(state)); 158 /* 159 * SCP does not reply to this command in order to avoid MHU interrupts 160 * from the sender, which could interfere with its power state request. 161 */ 162 163 scpi_secure_message_end(); 164 } 165 166 /* 167 * Query and obtain CSS power state from SCP. 168 * 169 * In response to the query, SCP returns power states of all CPUs in all 170 * clusters of the system. The returned response is then filtered based on the 171 * supplied MPIDR. Power states of requested cluster and CPUs within are updated 172 * via supplied non-NULL pointer arguments. 173 * 174 * Returns 0 on success, or -1 on errors. 175 */ 176 int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p, 177 unsigned int *cluster_state_p) 178 { 179 scpi_cmd_t *cmd; 180 scpi_cmd_t response; 181 int power_state, cpu, cluster, rc = -1; 182 183 /* 184 * Extract CPU and cluster membership of the given MPIDR. SCPI caters 185 * for only up to 0xf clusters, and 8 CPUs per cluster 186 */ 187 #if ARM_PLAT_MT 188 /* 189 * The current SCPI driver only caters for single-threaded platforms. 190 * Hence we ignore the thread ID (which is always 0) for such platforms. 191 */ 192 cpu = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; 193 cluster = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK; 194 #else 195 cpu = mpidr & MPIDR_AFFLVL_MASK; 196 cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; 197 #endif /* ARM_PLAT_MT */ 198 if (cpu >= 8 || cluster >= 0xf) 199 return -1; 200 201 scpi_secure_message_start(); 202 203 /* Populate request headers */ 204 zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd)); 205 cmd = SCPI_CMD_HEADER_AP_TO_SCP; 206 cmd->id = SCPI_CMD_GET_CSS_POWER_STATE; 207 208 /* 209 * Send message and wait for SCP's response 210 */ 211 scpi_secure_message_send(0); 212 scpi_secure_message_receive(&response); 213 214 if (response.status != SCP_OK) 215 goto exit; 216 217 /* Validate SCP response */ 218 if (!CHECK_RESPONSE(response, cluster)) 219 goto exit; 220 221 /* Extract power states for required cluster */ 222 power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster); 223 if (CLUSTER_ID(power_state) != cluster) 224 goto exit; 225 226 /* Update power state via pointers */ 227 if (cluster_state_p) 228 *cluster_state_p = CLUSTER_POWER_STATE(power_state); 229 if (cpu_state_p) 230 *cpu_state_p = CPU_POWER_STATE(power_state); 231 rc = 0; 232 233 exit: 234 scpi_secure_message_end(); 235 return rc; 236 } 237 238 uint32_t scpi_sys_power_state(scpi_system_state_t system_state) 239 { 240 scpi_cmd_t *cmd; 241 uint8_t *payload_addr; 242 scpi_cmd_t response; 243 244 scpi_secure_message_start(); 245 246 /* Populate the command header */ 247 cmd = SCPI_CMD_HEADER_AP_TO_SCP; 248 cmd->id = SCPI_CMD_SYS_POWER_STATE; 249 cmd->set = 0; 250 cmd->sender = 0; 251 cmd->size = sizeof(*payload_addr); 252 /* Populate the command payload */ 253 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP; 254 *payload_addr = system_state & 0xff; 255 scpi_secure_message_send(sizeof(*payload_addr)); 256 257 scpi_secure_message_receive(&response); 258 259 scpi_secure_message_end(); 260 261 return response.status; 262 } 263