1/* 2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#include <arch.h> 32#include <asm_macros.S> 33 34 .globl enable_irq 35 .globl disable_irq 36 37 .globl enable_fiq 38 .globl disable_fiq 39 40 .globl enable_serror 41 .globl disable_serror 42 43 .globl enable_debug_exceptions 44 .globl disable_debug_exceptions 45 46 .globl read_daif 47 .globl write_daif 48 49 .globl read_spsr_el1 50 .globl read_spsr_el2 51 .globl read_spsr_el3 52 53 .globl write_spsr_el1 54 .globl write_spsr_el2 55 .globl write_spsr_el3 56 57 .globl read_elr_el1 58 .globl read_elr_el2 59 .globl read_elr_el3 60 61 .globl write_elr_el1 62 .globl write_elr_el2 63 .globl write_elr_el3 64 65 .globl get_afflvl_shift 66 .globl mpidr_mask_lower_afflvls 67 .globl dsb 68 .globl isb 69 .globl sev 70 .globl wfe 71 .globl wfi 72 .globl eret 73 .globl smc 74 75 .globl zeromem16 76 .globl memcpy16 77 78 .globl disable_mmu_el3 79 .globl disable_mmu_icache_el3 80 81 82func get_afflvl_shift 83 cmp x0, #3 84 cinc x0, x0, eq 85 mov x1, #MPIDR_AFFLVL_SHIFT 86 lsl x0, x0, x1 87 ret 88 89func mpidr_mask_lower_afflvls 90 cmp x1, #3 91 cinc x1, x1, eq 92 mov x2, #MPIDR_AFFLVL_SHIFT 93 lsl x2, x1, x2 94 lsr x0, x0, x2 95 lsl x0, x0, x2 96 ret 97 98 /* ----------------------------------------------------- 99 * Asynchronous exception manipulation accessors 100 * ----------------------------------------------------- 101 */ 102func enable_irq 103 msr daifclr, #DAIF_IRQ_BIT 104 ret 105 106 107func enable_fiq 108 msr daifclr, #DAIF_FIQ_BIT 109 ret 110 111 112func enable_serror 113 msr daifclr, #DAIF_ABT_BIT 114 ret 115 116 117func enable_debug_exceptions 118 msr daifclr, #DAIF_DBG_BIT 119 ret 120 121 122func disable_irq 123 msr daifset, #DAIF_IRQ_BIT 124 ret 125 126 127func disable_fiq 128 msr daifset, #DAIF_FIQ_BIT 129 ret 130 131 132func disable_serror 133 msr daifset, #DAIF_ABT_BIT 134 ret 135 136 137func disable_debug_exceptions 138 msr daifset, #DAIF_DBG_BIT 139 ret 140 141 142func read_daif 143 mrs x0, daif 144 ret 145 146 147func write_daif 148 msr daif, x0 149 ret 150 151 152func read_spsr_el1 153 mrs x0, spsr_el1 154 ret 155 156 157func read_spsr_el2 158 mrs x0, spsr_el2 159 ret 160 161 162func read_spsr_el3 163 mrs x0, spsr_el3 164 ret 165 166 167func write_spsr_el1 168 msr spsr_el1, x0 169 ret 170 171 172func write_spsr_el2 173 msr spsr_el2, x0 174 ret 175 176 177func write_spsr_el3 178 msr spsr_el3, x0 179 ret 180 181 182func read_elr_el1 183 mrs x0, elr_el1 184 ret 185 186 187func read_elr_el2 188 mrs x0, elr_el2 189 ret 190 191 192func read_elr_el3 193 mrs x0, elr_el3 194 ret 195 196 197func write_elr_el1 198 msr elr_el1, x0 199 ret 200 201 202func write_elr_el2 203 msr elr_el2, x0 204 ret 205 206 207func write_elr_el3 208 msr elr_el3, x0 209 ret 210 211 212func dsb 213 dsb sy 214 ret 215 216 217func isb 218 isb 219 ret 220 221 222func sev 223 sev 224 ret 225 226 227func wfe 228 wfe 229 ret 230 231 232func wfi 233 wfi 234 ret 235 236 237func eret 238 eret 239 240 241func smc 242 smc #0 243 244/* ----------------------------------------------------------------------- 245 * void zeromem16(void *mem, unsigned int length); 246 * 247 * Initialise a memory region to 0. 248 * The memory address must be 16-byte aligned. 249 * ----------------------------------------------------------------------- 250 */ 251func zeromem16 252 add x2, x0, x1 253/* zero 16 bytes at a time */ 254z_loop16: 255 sub x3, x2, x0 256 cmp x3, #16 257 b.lt z_loop1 258 stp xzr, xzr, [x0], #16 259 b z_loop16 260/* zero byte per byte */ 261z_loop1: 262 cmp x0, x2 263 b.eq z_end 264 strb wzr, [x0], #1 265 b z_loop1 266z_end: ret 267 268 269/* -------------------------------------------------------------------------- 270 * void memcpy16(void *dest, const void *src, unsigned int length) 271 * 272 * Copy length bytes from memory area src to memory area dest. 273 * The memory areas should not overlap. 274 * Destination and source addresses must be 16-byte aligned. 275 * -------------------------------------------------------------------------- 276 */ 277func memcpy16 278/* copy 16 bytes at a time */ 279m_loop16: 280 cmp x2, #16 281 b.lt m_loop1 282 ldp x3, x4, [x1], #16 283 stp x3, x4, [x0], #16 284 sub x2, x2, #16 285 b m_loop16 286/* copy byte per byte */ 287m_loop1: 288 cbz x2, m_end 289 ldrb w3, [x1], #1 290 strb w3, [x0], #1 291 subs x2, x2, #1 292 b.ne m_loop1 293m_end: ret 294 295/* --------------------------------------------------------------------------- 296 * Disable the MMU at EL3 297 * This is implemented in assembler to ensure that the data cache is cleaned 298 * and invalidated after the MMU is disabled without any intervening cacheable 299 * data accesses 300 * --------------------------------------------------------------------------- 301 */ 302 303func disable_mmu_el3 304 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT) 305do_disable_mmu: 306 mrs x0, sctlr_el3 307 bic x0, x0, x1 308 msr sctlr_el3, x0 309 isb // ensure MMU is off 310 mov x0, #DCCISW // DCache clean and invalidate 311 b dcsw_op_all 312 313 314func disable_mmu_icache_el3 315 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) 316 b do_disable_mmu 317 318