1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2021, Linaro Limited 4 */ 5 6 #include <bitstring.h> 7 #include <drivers/gic.h> 8 #include <kernel/interrupt.h> 9 #include <kernel/mutex.h> 10 #include <kernel/notif.h> 11 #include <kernel/spinlock.h> 12 #include <kernel/thread.h> 13 #include <optee_rpc_cmd.h> 14 #include <sm/optee_smc.h> 15 #include <types_ext.h> 16 17 #if defined(CFG_CORE_ASYNC_NOTIF) 18 static struct mutex notif_mutex = MUTEX_INITIALIZER; 19 static unsigned int notif_lock = SPINLOCK_UNLOCK; 20 21 SLIST_HEAD(notif_driver_head, notif_driver); 22 static struct notif_driver_head notif_driver_head = 23 SLIST_HEAD_INITIALIZER(¬if_driver_head); 24 25 static bitstr_t bit_decl(notif_values, NOTIF_ASYNC_VALUE_MAX + 1); 26 static bitstr_t bit_decl(notif_alloc_values, NOTIF_ASYNC_VALUE_MAX + 1); 27 static bool notif_started; 28 29 TEE_Result notif_alloc_async_value(uint32_t *val) 30 { 31 static bool alloc_values_inited; 32 uint32_t old_itr_status = 0; 33 int bit = 0; 34 35 old_itr_status = cpu_spin_lock_xsave(¬if_lock); 36 37 if (!alloc_values_inited) { 38 bit_set(notif_alloc_values, NOTIF_VALUE_DO_BOTTOM_HALF); 39 alloc_values_inited = true; 40 } 41 42 bit_ffc(notif_alloc_values, (int)NOTIF_ASYNC_VALUE_MAX + 1, &bit); 43 if (bit >= 0) { 44 *val = bit; 45 bit_set(notif_alloc_values, bit); 46 } 47 48 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status); 49 50 if (bit < 0) 51 return TEE_ERROR_OUT_OF_MEMORY; 52 53 return TEE_SUCCESS; 54 } 55 56 void notif_free_async_value(uint32_t val) 57 { 58 uint32_t old_itr_status = 0; 59 60 old_itr_status = cpu_spin_lock_xsave(¬if_lock); 61 62 assert(val < NOTIF_ASYNC_VALUE_MAX); 63 assert(bit_test(notif_alloc_values, val)); 64 bit_clear(notif_alloc_values, val); 65 66 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status); 67 } 68 69 uint32_t notif_get_value(bool *value_valid, bool *value_pending) 70 { 71 uint32_t old_itr_status = 0; 72 uint32_t res = 0; 73 int bit = 0; 74 75 old_itr_status = cpu_spin_lock_xsave(¬if_lock); 76 77 bit_ffs(notif_values, (int)NOTIF_ASYNC_VALUE_MAX + 1, &bit); 78 *value_valid = (bit >= 0); 79 if (!*value_valid) { 80 *value_pending = false; 81 goto out; 82 } 83 84 res = bit; 85 bit_clear(notif_values, res); 86 bit_ffs(notif_values, (int)NOTIF_ASYNC_VALUE_MAX + 1, &bit); 87 *value_pending = (bit >= 0); 88 out: 89 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status); 90 91 return res; 92 } 93 94 void notif_send_async(uint32_t value) 95 { 96 uint32_t old_itr_status = 0; 97 98 COMPILE_TIME_ASSERT(NOTIF_VALUE_DO_BOTTOM_HALF == 99 OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF); 100 COMPILE_TIME_ASSERT(CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE); 101 102 assert(value <= NOTIF_ASYNC_VALUE_MAX); 103 old_itr_status = cpu_spin_lock_xsave(¬if_lock); 104 105 DMSG("0x%"PRIx32, value); 106 bit_set(notif_values, value); 107 itr_raise_pi(CFG_CORE_ASYNC_NOTIF_GIC_INTID); 108 109 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status); 110 } 111 112 bool notif_async_is_started(void) 113 { 114 uint32_t old_itr_status = 0; 115 bool ret = false; 116 117 old_itr_status = cpu_spin_lock_xsave(¬if_lock); 118 ret = notif_started; 119 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status); 120 121 return ret; 122 } 123 124 void notif_register_driver(struct notif_driver *ndrv) 125 { 126 uint32_t old_itr_status = 0; 127 128 old_itr_status = cpu_spin_lock_xsave(¬if_lock); 129 130 SLIST_INSERT_HEAD(¬if_driver_head, ndrv, link); 131 132 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status); 133 } 134 135 void notif_unregister_driver(struct notif_driver *ndrv) 136 { 137 uint32_t old_itr_status = 0; 138 139 old_itr_status = cpu_spin_lock_xsave(¬if_lock); 140 141 SLIST_REMOVE(¬if_driver_head, ndrv, notif_driver, link); 142 143 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status); 144 } 145 146 void notif_deliver_atomic_event(enum notif_event ev) 147 { 148 uint32_t old_itr_status = 0; 149 struct notif_driver *nd = NULL; 150 151 assert(ev == NOTIF_EVENT_STARTED); 152 153 old_itr_status = cpu_spin_lock_xsave(¬if_lock); 154 155 if (notif_started) { 156 DMSG("Already started"); 157 goto out; 158 } 159 notif_started = true; 160 161 SLIST_FOREACH(nd, ¬if_driver_head, link) 162 if (nd->atomic_cb) 163 nd->atomic_cb(nd, ev); 164 165 out: 166 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status); 167 } 168 169 void notif_deliver_event(enum notif_event ev) 170 { 171 uint32_t old_itr_status = 0; 172 struct notif_driver *nd = NULL; 173 struct notif_driver *nd_tmp = NULL; 174 175 assert(ev == NOTIF_EVENT_DO_BOTTOM_HALF || ev == NOTIF_EVENT_STOPPED); 176 177 /* Serialize all yielding notifications */ 178 mutex_lock(¬if_mutex); 179 old_itr_status = cpu_spin_lock_xsave(¬if_lock); 180 181 if (!notif_started) { 182 DMSG("Not started ev %d", (int)ev); 183 goto out; 184 } 185 186 if (ev == NOTIF_EVENT_STOPPED) 187 notif_started = false; 188 189 SLIST_FOREACH_SAFE(nd, ¬if_driver_head, link, nd_tmp) { 190 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status); 191 192 if (nd->yielding_cb) 193 nd->yielding_cb(nd, ev); 194 195 old_itr_status = cpu_spin_lock_xsave(¬if_lock); 196 197 if (ev == NOTIF_EVENT_STOPPED && notif_started) { 198 DMSG("Started again while stopping"); 199 goto out; 200 } 201 } 202 203 out: 204 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status); 205 mutex_unlock(¬if_mutex); 206 } 207 #endif /*CFG_CORE_ASYNC_NOTIF*/ 208 209 static TEE_Result notif_rpc(uint32_t func, uint32_t value) 210 { 211 struct thread_param params = THREAD_PARAM_VALUE(IN, func, value, 0); 212 213 return thread_rpc_cmd(OPTEE_RPC_CMD_NOTIFICATION, 1, ¶ms); 214 } 215 216 TEE_Result notif_wait(uint32_t value) 217 { 218 return notif_rpc(OPTEE_RPC_NOTIFICATION_WAIT, value); 219 } 220 221 TEE_Result notif_send_sync(uint32_t value) 222 { 223 return notif_rpc(OPTEE_RPC_NOTIFICATION_SEND, value); 224 } 225