1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bl_common.h> 10 #include <console.h> 11 #include <debug.h> 12 #include <delay_timer.h> 13 #include <desc_image_load.h> 14 #include <dw_ufs.h> 15 #include <errno.h> 16 #include <generic_delay_timer.h> 17 #include <hi3660.h> 18 #include <mmio.h> 19 #ifdef SPD_opteed 20 #include <optee_utils.h> 21 #endif 22 #include <platform_def.h> 23 #include <string.h> 24 #include <ufs.h> 25 26 #include "hikey960_def.h" 27 #include "hikey960_private.h" 28 29 /* 30 * The next 2 constants identify the extents of the code & RO data region. 31 * These addresses are used by the MMU setup code and therefore they must be 32 * page-aligned. It is the responsibility of the linker script to ensure that 33 * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses. 34 */ 35 #define BL2_RO_BASE (unsigned long)(&__RO_START__) 36 #define BL2_RO_LIMIT (unsigned long)(&__RO_END__) 37 38 #define BL2_RW_BASE (BL2_RO_LIMIT) 39 40 /* 41 * The next 2 constants identify the extents of the coherent memory region. 42 * These addresses are used by the MMU setup code and therefore they must be 43 * page-aligned. It is the responsibility of the linker script to ensure that 44 * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to 45 * page-aligned addresses. 46 */ 47 #define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__) 48 #define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__) 49 50 static meminfo_t bl2_el3_tzram_layout; 51 extern int load_lpm3(void); 52 53 enum { 54 BOOT_MODE_RECOVERY = 0, 55 BOOT_MODE_NORMAL, 56 BOOT_MODE_MASK = 1, 57 }; 58 59 /******************************************************************************* 60 * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol. 61 * Return 0 on success, -1 otherwise. 62 ******************************************************************************/ 63 int plat_hikey960_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info) 64 { 65 int i; 66 int *buf; 67 68 assert(scp_bl2_image_info->image_size < SCP_BL2_SIZE); 69 70 INFO("BL2: Initiating SCP_BL2 transfer to SCP\n"); 71 72 INFO("BL2: SCP_BL2: 0x%lx@0x%x\n", 73 scp_bl2_image_info->image_base, 74 scp_bl2_image_info->image_size); 75 76 buf = (int *)scp_bl2_image_info->image_base; 77 78 INFO("BL2: SCP_BL2 HEAD:\n"); 79 for (i = 0; i < 64; i += 4) 80 INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n", 81 buf[i], buf[i+1], buf[i+2], buf[i+3]); 82 83 buf = (int *)(scp_bl2_image_info->image_base + 84 scp_bl2_image_info->image_size - 256); 85 86 INFO("BL2: SCP_BL2 TAIL:\n"); 87 for (i = 0; i < 64; i += 4) 88 INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n", 89 buf[i], buf[i+1], buf[i+2], buf[i+3]); 90 91 INFO("BL2: SCP_BL2 transferred to SCP\n"); 92 93 load_lpm3(); 94 (void)buf; 95 96 return 0; 97 } 98 99 static void hikey960_ufs_reset(void) 100 { 101 unsigned int data, mask; 102 103 mmio_write_32(CRG_PERDIS7_REG, 1 << 14); 104 mmio_clrbits_32(UFS_SYS_PHY_CLK_CTRL_REG, BIT_SYSCTRL_REF_CLOCK_EN); 105 do { 106 data = mmio_read_32(UFS_SYS_PHY_CLK_CTRL_REG); 107 } while (data & BIT_SYSCTRL_REF_CLOCK_EN); 108 /* use abb clk */ 109 mmio_clrbits_32(UFS_SYS_UFS_SYSCTRL_REG, BIT_UFS_REFCLK_SRC_SE1); 110 mmio_clrbits_32(UFS_SYS_PHY_ISO_EN_REG, BIT_UFS_REFCLK_ISO_EN); 111 mmio_write_32(PCTRL_PERI_CTRL3_REG, (1 << 0) | (1 << 16)); 112 mdelay(1); 113 mmio_write_32(CRG_PEREN7_REG, 1 << 14); 114 mmio_setbits_32(UFS_SYS_PHY_CLK_CTRL_REG, BIT_SYSCTRL_REF_CLOCK_EN); 115 116 mmio_write_32(CRG_PERRSTEN3_REG, PERI_UFS_BIT); 117 do { 118 data = mmio_read_32(CRG_PERRSTSTAT3_REG); 119 } while ((data & PERI_UFS_BIT) == 0); 120 mmio_setbits_32(UFS_SYS_PSW_POWER_CTRL_REG, BIT_UFS_PSW_MTCMOS_EN); 121 mdelay(1); 122 mmio_setbits_32(UFS_SYS_HC_LP_CTRL_REG, BIT_SYSCTRL_PWR_READY); 123 mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG, 124 MASK_UFS_DEVICE_RESET); 125 /* clear SC_DIV_UFS_PERIBUS */ 126 mask = SC_DIV_UFS_PERIBUS << 16; 127 mmio_write_32(CRG_CLKDIV17_REG, mask); 128 /* set SC_DIV_UFSPHY_CFG(3) */ 129 mask = SC_DIV_UFSPHY_CFG_MASK << 16; 130 data = SC_DIV_UFSPHY_CFG(3); 131 mmio_write_32(CRG_CLKDIV16_REG, mask | data); 132 data = mmio_read_32(UFS_SYS_PHY_CLK_CTRL_REG); 133 data &= ~MASK_SYSCTRL_CFG_CLOCK_FREQ; 134 data |= 0x39; 135 mmio_write_32(UFS_SYS_PHY_CLK_CTRL_REG, data); 136 mmio_clrbits_32(UFS_SYS_PHY_CLK_CTRL_REG, MASK_SYSCTRL_REF_CLOCK_SEL); 137 mmio_setbits_32(UFS_SYS_CLOCK_GATE_BYPASS_REG, 138 MASK_UFS_CLK_GATE_BYPASS); 139 mmio_setbits_32(UFS_SYS_UFS_SYSCTRL_REG, MASK_UFS_SYSCTRL_BYPASS); 140 141 mmio_setbits_32(UFS_SYS_PSW_CLK_CTRL_REG, BIT_SYSCTRL_PSW_CLK_EN); 142 mmio_clrbits_32(UFS_SYS_PSW_POWER_CTRL_REG, BIT_UFS_PSW_ISO_CTRL); 143 mmio_clrbits_32(UFS_SYS_PHY_ISO_EN_REG, BIT_UFS_PHY_ISO_CTRL); 144 mmio_clrbits_32(UFS_SYS_HC_LP_CTRL_REG, BIT_SYSCTRL_LP_ISOL_EN); 145 mmio_write_32(CRG_PERRSTDIS3_REG, PERI_ARST_UFS_BIT); 146 mmio_setbits_32(UFS_SYS_RESET_CTRL_EN_REG, BIT_SYSCTRL_LP_RESET_N); 147 mdelay(1); 148 mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG, 149 MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET); 150 mdelay(20); 151 mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG, 152 0x03300330); 153 154 mmio_write_32(CRG_PERRSTDIS3_REG, PERI_UFS_BIT); 155 do { 156 data = mmio_read_32(CRG_PERRSTSTAT3_REG); 157 } while (data & PERI_UFS_BIT); 158 } 159 160 static void hikey960_init_ufs(void) 161 { 162 dw_ufs_params_t ufs_params; 163 164 memset(&ufs_params, 0, sizeof(ufs_params_t)); 165 ufs_params.reg_base = UFS_REG_BASE; 166 ufs_params.desc_base = HIKEY960_UFS_DESC_BASE; 167 ufs_params.desc_size = HIKEY960_UFS_DESC_SIZE; 168 hikey960_ufs_reset(); 169 dw_ufs_init(&ufs_params); 170 } 171 172 /******************************************************************************* 173 * Gets SPSR for BL32 entry 174 ******************************************************************************/ 175 uint32_t hikey960_get_spsr_for_bl32_entry(void) 176 { 177 /* 178 * The Secure Payload Dispatcher service is responsible for 179 * setting the SPSR prior to entry into the BL3-2 image. 180 */ 181 return 0; 182 } 183 184 /******************************************************************************* 185 * Gets SPSR for BL33 entry 186 ******************************************************************************/ 187 #ifndef AARCH32 188 uint32_t hikey960_get_spsr_for_bl33_entry(void) 189 { 190 unsigned int mode; 191 uint32_t spsr; 192 193 /* Figure out what mode we enter the non-secure world in */ 194 mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1; 195 196 /* 197 * TODO: Consider the possibility of specifying the SPSR in 198 * the FIP ToC and allowing the platform to have a say as 199 * well. 200 */ 201 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); 202 return spsr; 203 } 204 #else 205 uint32_t hikey960_get_spsr_for_bl33_entry(void) 206 { 207 unsigned int hyp_status, mode, spsr; 208 209 hyp_status = GET_VIRT_EXT(read_id_pfr1()); 210 211 mode = (hyp_status) ? MODE32_hyp : MODE32_svc; 212 213 /* 214 * TODO: Consider the possibility of specifying the SPSR in 215 * the FIP ToC and allowing the platform to have a say as 216 * well. 217 */ 218 spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1, 219 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS); 220 return spsr; 221 } 222 #endif /* AARCH32 */ 223 224 int hikey960_bl2_handle_post_image_load(unsigned int image_id) 225 { 226 int err = 0; 227 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); 228 #ifdef SPD_opteed 229 bl_mem_params_node_t *pager_mem_params = NULL; 230 bl_mem_params_node_t *paged_mem_params = NULL; 231 #endif 232 assert(bl_mem_params); 233 234 switch (image_id) { 235 #ifdef AARCH64 236 case BL32_IMAGE_ID: 237 #ifdef SPD_opteed 238 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID); 239 assert(pager_mem_params); 240 241 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID); 242 assert(paged_mem_params); 243 244 err = parse_optee_header(&bl_mem_params->ep_info, 245 &pager_mem_params->image_info, 246 &paged_mem_params->image_info); 247 if (err != 0) { 248 WARN("OPTEE header parse error.\n"); 249 } 250 #endif 251 bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl32_entry(); 252 break; 253 #endif 254 255 case BL33_IMAGE_ID: 256 /* BL33 expects to receive the primary CPU MPID (through r0) */ 257 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); 258 bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl33_entry(); 259 break; 260 261 #ifdef SCP_BL2_BASE 262 case SCP_BL2_IMAGE_ID: 263 /* The subsequent handling of SCP_BL2 is platform specific */ 264 err = plat_hikey960_bl2_handle_scp_bl2(&bl_mem_params->image_info); 265 if (err) { 266 WARN("Failure in platform-specific handling of SCP_BL2 image.\n"); 267 } 268 break; 269 #endif 270 default: 271 /* Do nothing in default case */ 272 break; 273 } 274 275 return err; 276 } 277 278 /******************************************************************************* 279 * This function can be used by the platforms to update/use image 280 * information for given `image_id`. 281 ******************************************************************************/ 282 int bl2_plat_handle_post_image_load(unsigned int image_id) 283 { 284 return hikey960_bl2_handle_post_image_load(image_id); 285 } 286 287 void bl2_el3_early_platform_setup(u_register_t arg1, u_register_t arg2, 288 u_register_t arg3, u_register_t arg4) 289 { 290 unsigned int id, uart_base; 291 292 generic_delay_timer_init(); 293 hikey960_read_boardid(&id); 294 if (id == 5300) 295 uart_base = PL011_UART5_BASE; 296 else 297 uart_base = PL011_UART6_BASE; 298 /* Initialize the console to provide early debug support */ 299 console_init(uart_base, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE); 300 /* 301 * Allow BL2 to see the whole Trusted RAM. 302 */ 303 bl2_el3_tzram_layout.total_base = BL2_RW_BASE; 304 bl2_el3_tzram_layout.total_size = BL31_LIMIT - BL2_RW_BASE; 305 } 306 307 void bl2_el3_plat_arch_setup(void) 308 { 309 hikey960_init_mmu_el3(bl2_el3_tzram_layout.total_base, 310 bl2_el3_tzram_layout.total_size, 311 BL2_RO_BASE, 312 BL2_RO_LIMIT, 313 BL2_COHERENT_RAM_BASE, 314 BL2_COHERENT_RAM_LIMIT); 315 } 316 317 void bl2_platform_setup(void) 318 { 319 /* disable WDT0 */ 320 if (mmio_read_32(WDT0_REG_BASE + WDT_LOCK_OFFSET) == WDT_LOCKED) { 321 mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, WDT_UNLOCK); 322 mmio_write_32(WDT0_REG_BASE + WDT_CONTROL_OFFSET, 0); 323 mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, 0); 324 } 325 hikey960_clk_init(); 326 hikey960_pmu_init(); 327 hikey960_regulator_enable(); 328 hikey960_tzc_init(); 329 hikey960_peri_init(); 330 hikey960_pinmux_init(); 331 hikey960_gpio_init(); 332 hikey960_init_ufs(); 333 hikey960_io_setup(); 334 } 335