1 /* 2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. 3 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #include <arch_helpers.h> 9 #include <assert.h> 10 #include <common/debug.h> 11 #include <delay_timer.h> 12 #include <errno.h> 13 #include <mmio.h> 14 #include <psci.h> 15 #include <se_private.h> 16 #include <security_engine.h> 17 #include <tegra_platform.h> 18 19 /******************************************************************************* 20 * Constants and Macros 21 ******************************************************************************/ 22 23 #define TIMEOUT_100MS 100UL // Timeout in 100ms 24 25 /******************************************************************************* 26 * Data structure and global variables 27 ******************************************************************************/ 28 29 /* The security engine contexts are formatted as follows: 30 * 31 * SE1 CONTEXT: 32 * #--------------------------------# 33 * | Random Data 1 Block | 34 * #--------------------------------# 35 * | Sticky Bits 2 Blocks | 36 * #--------------------------------# 37 * | Key Table 64 Blocks | 38 * | For each Key (x16): | 39 * | Key: 2 Blocks | 40 * | Original-IV: 1 Block | 41 * | Updated-IV: 1 Block | 42 * #--------------------------------# 43 * | RSA Keys 64 Blocks | 44 * #--------------------------------# 45 * | Known Pattern 1 Block | 46 * #--------------------------------# 47 * 48 * SE2/PKA1 CONTEXT: 49 * #--------------------------------# 50 * | Random Data 1 Block | 51 * #--------------------------------# 52 * | Sticky Bits 2 Blocks | 53 * #--------------------------------# 54 * | Key Table 64 Blocks | 55 * | For each Key (x16): | 56 * | Key: 2 Blocks | 57 * | Original-IV: 1 Block | 58 * | Updated-IV: 1 Block | 59 * #--------------------------------# 60 * | RSA Keys 64 Blocks | 61 * #--------------------------------# 62 * | PKA sticky bits 1 Block | 63 * #--------------------------------# 64 * | PKA keys 512 Blocks | 65 * #--------------------------------# 66 * | Known Pattern 1 Block | 67 * #--------------------------------# 68 */ 69 70 /* SE input and output linked list buffers */ 71 static tegra_se_io_lst_t se1_src_ll_buf; 72 static tegra_se_io_lst_t se1_dst_ll_buf; 73 74 /* SE2 input and output linked list buffers */ 75 static tegra_se_io_lst_t se2_src_ll_buf; 76 static tegra_se_io_lst_t se2_dst_ll_buf; 77 78 /* SE1 security engine device handle */ 79 static tegra_se_dev_t se_dev_1 = { 80 .se_num = 1, 81 /* setup base address for se */ 82 .se_base = TEGRA_SE1_BASE, 83 /* Setup context size in AES blocks */ 84 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1, 85 /* Setup SRC buffers for SE operations */ 86 .src_ll_buf = &se1_src_ll_buf, 87 /* Setup DST buffers for SE operations */ 88 .dst_ll_buf = &se1_dst_ll_buf, 89 }; 90 91 /* SE2 security engine device handle */ 92 static tegra_se_dev_t se_dev_2 = { 93 .se_num = 2, 94 /* setup base address for se */ 95 .se_base = TEGRA_SE2_BASE, 96 /* Setup context size in AES blocks */ 97 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2, 98 /* Setup SRC buffers for SE operations */ 99 .src_ll_buf = &se2_src_ll_buf, 100 /* Setup DST buffers for SE operations */ 101 .dst_ll_buf = &se2_dst_ll_buf, 102 }; 103 104 /******************************************************************************* 105 * Functions Definition 106 ******************************************************************************/ 107 108 static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev) 109 { 110 flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)), 111 sizeof(tegra_se_io_lst_t)); 112 flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)), 113 sizeof(tegra_se_io_lst_t)); 114 } 115 116 /* 117 * Check that SE operation has completed after kickoff 118 * This function is invoked after an SE operation has been started, 119 * and it checks the following conditions: 120 * 1. SE_INT_STATUS = SE_OP_DONE 121 * 2. SE_STATUS = IDLE 122 * 3. AHB bus data transfer complete. 123 * 4. SE_ERR_STATUS is clean. 124 */ 125 static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev) 126 { 127 uint32_t val = 0; 128 int32_t ret = 0; 129 uint32_t timeout; 130 131 /* Poll the SE interrupt register to ensure H/W operation complete */ 132 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); 133 for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) && 134 (timeout < TIMEOUT_100MS); timeout++) { 135 mdelay(1); 136 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); 137 } 138 139 if (timeout == TIMEOUT_100MS) { 140 ERROR("%s: ERR: Atomic context save operation timeout!\n", 141 __func__); 142 ret = -ETIMEDOUT; 143 } 144 145 /* Poll the SE status idle to ensure H/W operation complete */ 146 if (ret == 0) { 147 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); 148 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); 149 timeout++) { 150 mdelay(1); 151 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); 152 } 153 154 if (timeout == TIMEOUT_100MS) { 155 ERROR("%s: ERR: MEM_INTERFACE and SE state " 156 "idle state timeout.\n", __func__); 157 ret = -ETIMEDOUT; 158 } 159 } 160 161 /* Check AHB bus transfer complete */ 162 if (ret == 0) { 163 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET); 164 for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) && 165 (timeout < TIMEOUT_100MS); timeout++) { 166 mdelay(1); 167 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET); 168 } 169 170 if (timeout == TIMEOUT_100MS) { 171 ERROR("%s: SE write over AHB timeout.\n", __func__); 172 ret = -ETIMEDOUT; 173 } 174 } 175 176 /* Ensure that no errors are thrown during operation */ 177 if (ret == 0) { 178 val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET); 179 if (val != 0U) { 180 ERROR("%s: error during SE operation! 0x%x", __func__, val); 181 ret = -ENOTSUP; 182 } 183 } 184 185 return ret; 186 } 187 188 /* 189 * Returns true if the SE engine is configured to perform SE context save in 190 * hardware. 191 */ 192 static inline int32_t tegra_se_atomic_save_enabled(const tegra_se_dev_t *se_dev) 193 { 194 uint32_t val; 195 int32_t ret = 0; 196 197 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); 198 if (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_EN) 199 ret = 1; 200 201 return ret; 202 } 203 204 /* 205 * Wait for SE engine to be idle and clear pending interrupts before 206 * starting the next SE operation. 207 */ 208 static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev) 209 { 210 int32_t ret = 0; 211 uint32_t val = 0; 212 uint32_t timeout; 213 214 /* Wait for previous operation to finish */ 215 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); 216 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) { 217 mdelay(1); 218 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); 219 } 220 221 if (timeout == TIMEOUT_100MS) { 222 ERROR("%s: ERR: SE status is not idle!\n", __func__); 223 ret = -ETIMEDOUT; 224 } 225 226 /* Clear any pending interrupts from previous operation */ 227 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); 228 tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val); 229 return ret; 230 } 231 232 /* 233 * SE atomic context save. At SC7 entry, SE driver triggers the 234 * hardware automatically performs the context save operation. 235 */ 236 static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev) 237 { 238 int32_t ret = 0; 239 uint32_t val = 0; 240 uint32_t blk_count_limit = 0; 241 uint32_t block_count; 242 243 /* Check that previous operation is finalized */ 244 ret = tegra_se_operation_prepare(se_dev); 245 246 /* Ensure HW atomic context save has been enabled 247 * This should have been done at boot time. 248 * SE_CTX_SAVE_AUTO.ENABLE == ENABLE 249 */ 250 if (ret == 0) { 251 ret = tegra_se_atomic_save_enabled(se_dev); 252 } 253 254 /* Read the context save progress counter: block_count 255 * Ensure no previous context save has been triggered 256 * SE_CTX_SAVE_AUTO.CURR_CNT == 0 257 */ 258 if (ret == 0) { 259 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); 260 block_count = SE_CTX_SAVE_GET_BLK_COUNT(val); 261 if (block_count != 0U) { 262 ERROR("%s: ctx_save triggered multiple times\n", 263 __func__); 264 ret = -EALREADY; 265 } 266 } 267 268 /* Set the destination block count when the context save complete */ 269 if (ret == 0) { 270 blk_count_limit = block_count + se_dev->ctx_size_blks; 271 } 272 273 /* Program SE_CONFIG register as for RNG operation 274 * SE_CONFIG.ENC_ALG = RNG 275 * SE_CONFIG.DEC_ALG = NOP 276 * SE_CONFIG.ENC_MODE is ignored 277 * SE_CONFIG.DEC_MODE is ignored 278 * SE_CONFIG.DST = MEMORY 279 */ 280 if (ret == 0) { 281 val = (SE_CONFIG_ENC_ALG_RNG | 282 SE_CONFIG_DEC_ALG_NOP | 283 SE_CONFIG_DST_MEMORY); 284 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); 285 286 tegra_se_make_data_coherent(se_dev); 287 288 /* SE_CTX_SAVE operation */ 289 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, 290 SE_OP_CTX_SAVE); 291 292 ret = tegra_se_operation_complete(se_dev); 293 } 294 295 /* Check that context has written the correct number of blocks */ 296 if (ret == 0) { 297 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); 298 if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) { 299 ERROR("%s: expected %d blocks but %d were written\n", 300 __func__, blk_count_limit, val); 301 ret = -ECANCELED; 302 } 303 } 304 305 return ret; 306 } 307 308 /* 309 * Security engine primitive operations, including normal operation 310 * and the context save operation. 311 */ 312 static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes) 313 { 314 uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE; 315 int ret = 0; 316 317 assert(se_dev); 318 319 /* Use device buffers for in and out */ 320 tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf))); 321 tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf))); 322 323 /* Check that previous operation is finalized */ 324 ret = tegra_se_operation_prepare(se_dev); 325 if (ret != 0) { 326 goto op_error; 327 } 328 329 /* Program SE operation size */ 330 if (nblocks) { 331 tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1); 332 } 333 334 /* Make SE LL data coherent before the SE operation */ 335 tegra_se_make_data_coherent(se_dev); 336 337 /* Start hardware operation */ 338 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START); 339 340 /* Wait for operation to finish */ 341 ret = tegra_se_operation_complete(se_dev); 342 343 op_error: 344 return ret; 345 } 346 347 /* 348 * Security Engine sequence to generat SRK 349 * SE and SE2 will generate different SRK by different 350 * entropy seeds. 351 */ 352 static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev) 353 { 354 int ret = PSCI_E_INTERN_FAIL; 355 uint32_t val; 356 357 /* Confgure the following hardware register settings: 358 * SE_CONFIG.DEC_ALG = NOP 359 * SE_CONFIG.ENC_ALG = RNG 360 * SE_CONFIG.DST = SRK 361 * SE_OPERATION.OP = START 362 * SE_CRYPTO_LAST_BLOCK = 0 363 */ 364 se_dev->src_ll_buf->last_buff_num = 0; 365 se_dev->dst_ll_buf->last_buff_num = 0; 366 367 /* Configure random number generator */ 368 val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY); 369 tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val); 370 371 /* Configure output destination = SRK */ 372 val = (SE_CONFIG_ENC_ALG_RNG | 373 SE_CONFIG_DEC_ALG_NOP | 374 SE_CONFIG_DST_SRK); 375 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); 376 377 /* Perform hardware operation */ 378 ret = tegra_se_perform_operation(se_dev, 0); 379 380 return ret; 381 } 382 383 /* 384 * Initialize the SE engine handle 385 */ 386 void tegra_se_init(void) 387 { 388 INFO("%s: start SE init\n", __func__); 389 390 /* Generate random SRK to initialize DRBG */ 391 tegra_se_generate_srk(&se_dev_1); 392 tegra_se_generate_srk(&se_dev_2); 393 394 INFO("%s: SE init done\n", __func__); 395 } 396 397 /* 398 * Security engine power suspend entry point. 399 * This function is invoked from PSCI power domain suspend handler. 400 */ 401 int32_t tegra_se_suspend(void) 402 { 403 int32_t ret = 0; 404 405 /* Atomic context save se2 and pka1 */ 406 INFO("%s: SE2/PKA1 atomic context save\n", __func__); 407 ret = tegra_se_context_save_atomic(&se_dev_2); 408 409 /* Atomic context save se */ 410 if (ret == 0) { 411 INFO("%s: SE1 atomic context save\n", __func__); 412 ret = tegra_se_context_save_atomic(&se_dev_1); 413 } 414 415 if (ret == 0) { 416 INFO("%s: SE atomic context save done\n", __func__); 417 } 418 419 return ret; 420 } 421 422 /* 423 * Save TZRAM to shadow TZRAM in AON 424 */ 425 int32_t tegra_se_save_tzram(void) 426 { 427 uint32_t val = 0; 428 int32_t ret = 0; 429 uint32_t timeout; 430 431 INFO("%s: SE TZRAM save start\n", __func__); 432 433 val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE); 434 tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val); 435 436 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION); 437 for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) && 438 (timeout < TIMEOUT_100MS); timeout++) { 439 mdelay(1); 440 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION); 441 } 442 443 if (timeout == TIMEOUT_100MS) { 444 ERROR("%s: ERR: TZRAM save timeout!\n", __func__); 445 ret = -ETIMEDOUT; 446 } 447 448 if (ret == 0) { 449 INFO("%s: SE TZRAM save done!\n", __func__); 450 } 451 452 return ret; 453 } 454 455 /* 456 * The function is invoked by SE resume 457 */ 458 static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev) 459 { 460 uint32_t val; 461 462 assert(se_dev); 463 464 /* Lock RNG source to ENTROPY on resume */ 465 val = DRBG_RO_ENT_IGNORE_MEM_ENABLE | 466 DRBG_RO_ENT_SRC_LOCK_ENABLE | 467 DRBG_RO_ENT_SRC_ENABLE; 468 tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val); 469 470 /* Set a random value to SRK to initialize DRBG */ 471 tegra_se_generate_srk(se_dev); 472 } 473 474 /* 475 * The function is invoked on SC7 resume 476 */ 477 void tegra_se_resume(void) 478 { 479 tegra_se_warm_boot_resume(&se_dev_1); 480 tegra_se_warm_boot_resume(&se_dev_2); 481 } 482