xref: /rk3399_ARM-atf/lib/locks/exclusive/aarch64/spinlock.c (revision 0607fb7f6bd9480eea8e989700b0fd5bc7f79148)
1 /*
2  * Copyright (c) 2025-2026, Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_features.h>
8 #include <lib/spinlock.h>
9 
10 /*
11  * Performs a compare-and-swap of 0 -> 1. If the lock is already held, uses
12  * LDAXR/WFE to efficiently wait.
13  */
14 static void spin_lock_atomic(volatile uint32_t *dst)
15 {
16 	uint32_t src = 1;
17 	uint32_t tmp;
18 
19 	__asm__ volatile (
20 	".arch_extension lse\n"
21 	"1:	mov	%w[tmp], wzr\n"
22 	"2:	casa	%w[tmp], %w[src], [%[dst]]\n"
23 	"	cbz	%w[tmp], 3f\n"
24 	"	ldxr	%w[tmp], [%[dst]]\n"
25 	"	cbz	%w[tmp], 2b\n"
26 	"	wfe\n"
27 	"	b	1b\n"
28 	"3:\n"
29 	: "+m" (*dst), [tmp] "=&r" (tmp), [src] "+r" (src)
30 	: [dst] "r" (dst));
31 }
32 
33 /*
34  * Uses the load-acquire (LDAXR) and store-exclusive (STXR) instruction pair.
35  */
36 static void spin_lock_excl(volatile uint32_t *dst)
37 {
38 	uint32_t src = 1;
39 	uint32_t tmp;
40 
41 	__asm__ volatile (
42 	"	sevl\n"
43 	"1:	wfe\n"
44 	"2:	ldaxr	%w[tmp], [%[dst]]\n"
45 	"	cbnz	%w[tmp], 1b\n"
46 	"	stxr	%w[tmp], %w[src], [%[dst]]\n"
47 	"	cbnz	%w[tmp], 2b\n"
48 	: "+m" (*dst), [tmp] "=&r" (tmp), [src] "+r" (src)
49 	: [dst] "r" (dst));
50 }
51 
52 void spin_lock(spinlock_t *lock)
53 {
54 	volatile uint32_t *dst = &(lock->lock);
55 
56 	if (is_feat_lse_supported()) {
57 		spin_lock_atomic(dst);
58 	} else {
59 		spin_lock_excl(dst);
60 	}
61 }
62 
63 /*
64  * Use store-release to unconditionally clear the spinlock variable. Store
65  * operation generates an event to all cores waiting in WFE when address is
66  * monitored by the global monitor.
67  */
68 void spin_unlock(spinlock_t *lock)
69 {
70 	volatile uint32_t *dst = &(lock->lock);
71 
72 	__asm__ volatile (
73 	"stlr	wzr, [%[dst]]"
74 	: "=m" (dst)
75 	: [dst] "r" (dst));
76 }
77 
78 static bool spin_trylock_atomic(volatile uint32_t *dst)
79 {
80 	uint32_t src = 1;
81 	uint32_t tmp = 0;
82 	bool out;
83 
84 	__asm__ volatile (
85 	".arch_extension lse\n"
86 	"casa	%w[tmp], %w[src], [%[dst]]\n"
87 	"eor	%w[out], %w[tmp], #1\n" /* convert the result to bool */
88 	: "+m" (*dst), [tmp] "+r" (tmp), [out] "=r" (out)
89 	: [src] "r" (src), [dst] "r" (dst));
90 
91 	return out;
92 }
93 
94 static bool spin_trylock_excl(volatile uint32_t *dst)
95 {
96 	uint32_t src = 1;
97 	uint32_t ret;
98 
99 	/*
100 	 * Loop until we either get the lock or are certain that we don't have
101 	 * it. The exclusive store can fail due to racing and not because we
102 	 * don't hold the lock.
103 	 */
104 	while (1) {
105 		__asm__ volatile (
106 		"ldaxr	%w[ret], [%[dst]]\n"
107 		: [ret] "=r" (ret)
108 		: "m" (*dst), [dst] "r" (dst));
109 
110 		/* 1 means lock is held */
111 		if (ret != 0) {
112 			return false;
113 		}
114 
115 		__asm__ volatile (
116 		"stxr	%w[ret], %w[src], [%[dst]]\n"
117 		: "+m" (*dst), [ret] "=&r" (ret)
118 		: [src] "r" (src), [dst] "r" (dst));
119 
120 		if (ret == 0) {
121 			return true;
122 		}
123 	}
124 }
125 
126 /*
127  * Attempts to acquire the spinlock once without spinning. If unlocked (0),
128  * attempts to store 1 to acquire it.
129  */
130 bool spin_trylock(spinlock_t *lock)
131 {
132 	volatile uint32_t *dst = &(lock->lock);
133 
134 	if (is_feat_lse_supported()) {
135 		return spin_trylock_atomic(dst);
136 	} else {
137 		return spin_trylock_excl(dst);
138 	}
139 }
140 
141 #if USE_SPINLOCK_CAS
142 /*
143  * Acquire bitlock using atomic bit set on byte. If the original read value
144  * has the bit set, use load exclusive semantics to monitor the address and
145  * enter WFE.
146  */
147 void bit_lock(bitlock_t *lock, uint8_t mask)
148 {
149 	volatile uint8_t *dst = &(lock->lock);
150 	uint32_t tmp;
151 
152 	/* there is no exclusive fallback */
153 	assert(is_feat_lse_supported());
154 
155 	__asm__ volatile (
156 	"1:	ldsetab	%w[mask], %w[tmp], [%[dst]]\n"
157 	"	tst	%w[tmp], %w[mask]\n"
158 	"	b.eq	2f\n"
159 	"	ldxrb	%w[tmp], [%[dst]]\n"
160 	"	tst	%w[tmp], %w[mask]\n"
161 	"	b.eq	1b\n"
162 	"	wfe\n"
163 	"	b	1b\n"
164 	"2:\n"
165 	: "+m" (*dst), [tmp] "=&r" (tmp)
166 	: [mask] "r" (mask), [dst] "r" (dst));
167 }
168 
169 /*
170  * Use atomic bit clear store-release to unconditionally clear bitlock variable.
171  * Store operation generates an event to all cores waiting in WFE when address
172  * is monitored by the global monitor.
173  */
174 void bit_unlock(bitlock_t *lock, uint8_t mask)
175 {
176 	volatile uint8_t *dst = &(lock->lock);
177 
178 	/* there is no exclusive fallback */
179 	assert(is_feat_lse_supported());
180 
181 	__asm__ volatile (
182 	"stclrlb	%w[mask], [%[dst]]"
183 	: "=m" (dst)
184 	: [mask] "r" (mask), [dst] "r" (dst));
185 }
186 #endif
187