1 /*
2 * Copyright (c) 2025, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <lib/spinlock.h>
8
spin_lock(spinlock_t * lock)9 void __attribute__((target("arm"))) spin_lock(spinlock_t *lock)
10 {
11 volatile uint32_t *dst = &(lock->lock);
12 uint32_t src = 1;
13 uint32_t tmp;
14
15 __asm__ volatile (
16 "1:\n"
17 " ldrex %[tmp], [%[dst]]\n"
18 " cmp %[tmp], #0\n"
19 " wfene\n"
20 " strexeq %[tmp], %[src], [%[dst]]\n"
21 " cmpeq %[tmp], #0\n"
22 " bne 1b\n"
23 " dmb\n"
24 : "+m" (*dst), [tmp] "=&r" (tmp), [src] "+r" (src)
25 : [dst] "r" (dst));
26 }
27
spin_unlock(spinlock_t * lock)28 void __attribute__((target("arm"))) spin_unlock(spinlock_t *lock)
29 {
30 volatile uint32_t *dst = &(lock->lock);
31 uint32_t val = 0;
32
33 /*
34 * According to the ARMv8-A Architecture Reference Manual, "when the
35 * global monitor for a PE changes from Exclusive Access state to Open
36 * Access state, an event is generated.". This applies to both AArch32
37 * and AArch64 modes of ARMv8-A. As a result, no explicit SEV with
38 * unlock is required.
39 */
40 __asm__ volatile (
41 /* ARMv7 does not support stl instruction */
42 #if ARM_ARCH_MAJOR == 7
43 "dmb\n"
44 "str %[val], [%[dst]]\n"
45 "dsb\n"
46 "sev\n"
47 #else
48 "stl %[val], [%[dst]]\n"
49 #endif
50 : "=m" (dst)
51 : [val] "r" (val), [dst] "r" (dst));
52 }
53
spin_trylock(spinlock_t * lock)54 bool __attribute__((target("arm"))) spin_trylock(spinlock_t *lock)
55 {
56 volatile uint32_t *dst = &(lock->lock);
57 uint32_t src = 1;
58 uint32_t tmp;
59 bool out;
60
61 __asm__ volatile (
62 "ldrex %[tmp], [%[dst]]\n"
63 "cmp %[tmp], #0\n"
64 "strexeq %[tmp], %[src], [%[dst]]\n"
65 "cmpeq %[tmp], #0\n"
66 "dmb\n"
67 "moveq %[out], #1\n"
68 "movne %[out], #0\n"
69 : "+m" (*dst), [tmp] "=&r" (tmp), [out] "=r" (out)
70 : [src] "r" (src), [dst] "r" (dst));
71
72 return out;
73 }
74