1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * A stand-alone ticket spinlock implementation for use by the non-VHE
4*4882a593Smuzhiyun * KVM hypervisor code running at EL2.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2020 Google LLC
7*4882a593Smuzhiyun * Author: Will Deacon <will@kernel.org>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Heavily based on the implementation removed by c11090474d70 which was:
10*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #ifndef __ARM64_KVM_NVHE_SPINLOCK_H__
14*4882a593Smuzhiyun #define __ARM64_KVM_NVHE_SPINLOCK_H__
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <asm/alternative.h>
17*4882a593Smuzhiyun #include <asm/lse.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun typedef union hyp_spinlock {
20*4882a593Smuzhiyun u32 __val;
21*4882a593Smuzhiyun struct {
22*4882a593Smuzhiyun #ifdef __AARCH64EB__
23*4882a593Smuzhiyun u16 next, owner;
24*4882a593Smuzhiyun #else
25*4882a593Smuzhiyun u16 owner, next;
26*4882a593Smuzhiyun #endif
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun } hyp_spinlock_t;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define hyp_spin_lock_init(l) \
31*4882a593Smuzhiyun do { \
32*4882a593Smuzhiyun *(l) = (hyp_spinlock_t){ .__val = 0 }; \
33*4882a593Smuzhiyun } while (0)
34*4882a593Smuzhiyun
hyp_spin_lock(hyp_spinlock_t * lock)35*4882a593Smuzhiyun static inline void hyp_spin_lock(hyp_spinlock_t *lock)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun u32 tmp;
38*4882a593Smuzhiyun hyp_spinlock_t lockval, newval;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun asm volatile(
41*4882a593Smuzhiyun /* Atomically increment the next ticket. */
42*4882a593Smuzhiyun ARM64_LSE_ATOMIC_INSN(
43*4882a593Smuzhiyun /* LL/SC */
44*4882a593Smuzhiyun " prfm pstl1strm, %3\n"
45*4882a593Smuzhiyun "1: ldaxr %w0, %3\n"
46*4882a593Smuzhiyun " add %w1, %w0, #(1 << 16)\n"
47*4882a593Smuzhiyun " stxr %w2, %w1, %3\n"
48*4882a593Smuzhiyun " cbnz %w2, 1b\n",
49*4882a593Smuzhiyun /* LSE atomics */
50*4882a593Smuzhiyun " mov %w2, #(1 << 16)\n"
51*4882a593Smuzhiyun " ldadda %w2, %w0, %3\n"
52*4882a593Smuzhiyun __nops(3))
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Did we get the lock? */
55*4882a593Smuzhiyun " eor %w1, %w0, %w0, ror #16\n"
56*4882a593Smuzhiyun " cbz %w1, 3f\n"
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun * No: spin on the owner. Send a local event to avoid missing an
59*4882a593Smuzhiyun * unlock before the exclusive load.
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun " sevl\n"
62*4882a593Smuzhiyun "2: wfe\n"
63*4882a593Smuzhiyun " ldaxrh %w2, %4\n"
64*4882a593Smuzhiyun " eor %w1, %w2, %w0, lsr #16\n"
65*4882a593Smuzhiyun " cbnz %w1, 2b\n"
66*4882a593Smuzhiyun /* We got the lock. Critical section starts here. */
67*4882a593Smuzhiyun "3:"
68*4882a593Smuzhiyun : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
69*4882a593Smuzhiyun : "Q" (lock->owner)
70*4882a593Smuzhiyun : "memory");
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
hyp_spin_unlock(hyp_spinlock_t * lock)73*4882a593Smuzhiyun static inline void hyp_spin_unlock(hyp_spinlock_t *lock)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun u64 tmp;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun asm volatile(
78*4882a593Smuzhiyun ARM64_LSE_ATOMIC_INSN(
79*4882a593Smuzhiyun /* LL/SC */
80*4882a593Smuzhiyun " ldrh %w1, %0\n"
81*4882a593Smuzhiyun " add %w1, %w1, #1\n"
82*4882a593Smuzhiyun " stlrh %w1, %0",
83*4882a593Smuzhiyun /* LSE atomics */
84*4882a593Smuzhiyun " mov %w1, #1\n"
85*4882a593Smuzhiyun " staddlh %w1, %0\n"
86*4882a593Smuzhiyun __nops(1))
87*4882a593Smuzhiyun : "=Q" (lock->owner), "=&r" (tmp)
88*4882a593Smuzhiyun :
89*4882a593Smuzhiyun : "memory");
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun #endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */
93