1/* 2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#include <arch.h> 32#include <asm_macros.S> 33 34 .globl enable_irq 35 .globl disable_irq 36 37 .globl enable_fiq 38 .globl disable_fiq 39 40 .globl enable_serror 41 .globl disable_serror 42 43 .globl enable_debug_exceptions 44 .globl disable_debug_exceptions 45 46 .globl read_daif 47 .globl write_daif 48 49 .globl read_spsr 50 .globl read_spsr_el1 51 .globl read_spsr_el2 52 .globl read_spsr_el3 53 54 .globl write_spsr 55 .globl write_spsr_el1 56 .globl write_spsr_el2 57 .globl write_spsr_el3 58 59 .globl read_elr 60 .globl read_elr_el1 61 .globl read_elr_el2 62 .globl read_elr_el3 63 64 .globl write_elr 65 .globl write_elr_el1 66 .globl write_elr_el2 67 .globl write_elr_el3 68 69 .globl get_afflvl_shift 70 .globl mpidr_mask_lower_afflvls 71 .globl dsb 72 .globl isb 73 .globl sev 74 .globl wfe 75 .globl wfi 76 .globl eret 77 .globl smc 78 79 .globl zeromem16 80 .globl memcpy16 81 82 83func get_afflvl_shift 84 cmp x0, #3 85 cinc x0, x0, eq 86 mov x1, #MPIDR_AFFLVL_SHIFT 87 lsl x0, x0, x1 88 ret 89 90func mpidr_mask_lower_afflvls 91 cmp x1, #3 92 cinc x1, x1, eq 93 mov x2, #MPIDR_AFFLVL_SHIFT 94 lsl x2, x1, x2 95 lsr x0, x0, x2 96 lsl x0, x0, x2 97 ret 98 99 /* ----------------------------------------------------- 100 * Asynchronous exception manipulation accessors 101 * ----------------------------------------------------- 102 */ 103func enable_irq 104 msr daifclr, #DAIF_IRQ_BIT 105 ret 106 107 108func enable_fiq 109 msr daifclr, #DAIF_FIQ_BIT 110 ret 111 112 113func enable_serror 114 msr daifclr, #DAIF_ABT_BIT 115 ret 116 117 118func enable_debug_exceptions 119 msr daifclr, #DAIF_DBG_BIT 120 ret 121 122 123func disable_irq 124 msr daifset, #DAIF_IRQ_BIT 125 ret 126 127 128func disable_fiq 129 msr daifset, #DAIF_FIQ_BIT 130 ret 131 132 133func disable_serror 134 msr daifset, #DAIF_ABT_BIT 135 ret 136 137 138func disable_debug_exceptions 139 msr daifset, #DAIF_DBG_BIT 140 ret 141 142 143func read_daif 144 mrs x0, daif 145 ret 146 147 148func write_daif 149 msr daif, x0 150 ret 151 152 153func read_spsr 154 mrs x0, CurrentEl 155 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT) 156 b.eq read_spsr_el1 157 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT) 158 b.eq read_spsr_el2 159 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT) 160 b.eq read_spsr_el3 161 162 163func read_spsr_el1 164 mrs x0, spsr_el1 165 ret 166 167 168func read_spsr_el2 169 mrs x0, spsr_el2 170 ret 171 172 173func read_spsr_el3 174 mrs x0, spsr_el3 175 ret 176 177 178func write_spsr 179 mrs x1, CurrentEl 180 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT) 181 b.eq write_spsr_el1 182 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT) 183 b.eq write_spsr_el2 184 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT) 185 b.eq write_spsr_el3 186 187 188func write_spsr_el1 189 msr spsr_el1, x0 190 isb 191 ret 192 193 194func write_spsr_el2 195 msr spsr_el2, x0 196 isb 197 ret 198 199 200func write_spsr_el3 201 msr spsr_el3, x0 202 isb 203 ret 204 205 206func read_elr 207 mrs x0, CurrentEl 208 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT) 209 b.eq read_elr_el1 210 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT) 211 b.eq read_elr_el2 212 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT) 213 b.eq read_elr_el3 214 215 216func read_elr_el1 217 mrs x0, elr_el1 218 ret 219 220 221func read_elr_el2 222 mrs x0, elr_el2 223 ret 224 225 226func read_elr_el3 227 mrs x0, elr_el3 228 ret 229 230 231func write_elr 232 mrs x1, CurrentEl 233 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT) 234 b.eq write_elr_el1 235 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT) 236 b.eq write_elr_el2 237 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT) 238 b.eq write_elr_el3 239 240 241func write_elr_el1 242 msr elr_el1, x0 243 isb 244 ret 245 246 247func write_elr_el2 248 msr elr_el2, x0 249 isb 250 ret 251 252 253func write_elr_el3 254 msr elr_el3, x0 255 isb 256 ret 257 258 259func dsb 260 dsb sy 261 ret 262 263 264func isb 265 isb 266 ret 267 268 269func sev 270 sev 271 ret 272 273 274func wfe 275 wfe 276 ret 277 278 279func wfi 280 wfi 281 ret 282 283 284func eret 285 eret 286 287 288func smc 289 smc #0 290 291/* ----------------------------------------------------------------------- 292 * void zeromem16(void *mem, unsigned int length); 293 * 294 * Initialise a memory region to 0. 295 * The memory address must be 16-byte aligned. 296 * ----------------------------------------------------------------------- 297 */ 298func zeromem16 299 add x2, x0, x1 300/* zero 16 bytes at a time */ 301z_loop16: 302 sub x3, x2, x0 303 cmp x3, #16 304 b.lt z_loop1 305 stp xzr, xzr, [x0], #16 306 b z_loop16 307/* zero byte per byte */ 308z_loop1: 309 cmp x0, x2 310 b.eq z_end 311 strb wzr, [x0], #1 312 b z_loop1 313z_end: ret 314 315 316/* -------------------------------------------------------------------------- 317 * void memcpy16(void *dest, const void *src, unsigned int length) 318 * 319 * Copy length bytes from memory area src to memory area dest. 320 * The memory areas should not overlap. 321 * Destination and source addresses must be 16-byte aligned. 322 * -------------------------------------------------------------------------- 323 */ 324func memcpy16 325/* copy 16 bytes at a time */ 326m_loop16: 327 cmp x2, #16 328 b.lt m_loop1 329 ldp x3, x4, [x1], #16 330 stp x3, x4, [x0], #16 331 sub x2, x2, #16 332 b m_loop16 333/* copy byte per byte */ 334m_loop1: 335 cbz x2, m_end 336 ldrb w3, [x1], #1 337 strb w3, [x0], #1 338 subs x2, x2, #1 339 b.ne m_loop1 340m_end: ret 341