xref: /optee_os/core/kernel/notif.c (revision 9f34db38245c9b3a4e6e7e63eb78a75e23ab2da3)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021-2024, Linaro Limited
4  */
5 
6 #include <initcall.h>
7 #include <kernel/mutex.h>
8 #include <kernel/notif.h>
9 #include <kernel/panic.h>
10 #include <kernel/spinlock.h>
11 #include <kernel/thread.h>
12 #include <kernel/virtualization.h>
13 #include <mm/core_memprot.h>
14 #include <optee_rpc_cmd.h>
15 #include <types_ext.h>
16 
17 #if defined(CFG_CORE_ASYNC_NOTIF)
18 struct notif_data {
19 	bool notif_started;
20 };
21 
22 static struct mutex notif_mutex = MUTEX_INITIALIZER;
23 static unsigned int notif_lock __nex_data = SPINLOCK_UNLOCK;
24 
25 static struct notif_data default_notif_data;
26 static unsigned int notif_data_id __nex_bss;
27 
28 SLIST_HEAD(notif_driver_head, notif_driver);
29 static struct notif_driver_head notif_driver_head __nex_data =
30 	SLIST_HEAD_INITIALIZER(&notif_driver_head);
31 
32 static struct notif_data *get_notif_data(struct guest_partition *prtn)
33 {
34 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
35 		assert(prtn);
36 		return virt_get_guest_spec_data(prtn, notif_data_id);
37 	}
38 	return &default_notif_data;
39 }
40 
41 bool notif_async_is_started(uint16_t guest_id)
42 {
43 	struct guest_partition *prtn = virt_get_guest(guest_id);
44 	uint32_t old_itr_status = 0;
45 	bool ret = false;
46 
47 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || prtn) {
48 		struct notif_data *ndata = get_notif_data(prtn);
49 
50 		old_itr_status = cpu_spin_lock_xsave(&notif_lock);
51 		ret = ndata->notif_started;
52 		cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
53 	}
54 
55 	virt_put_guest(prtn);
56 	return ret;
57 }
58 
59 void notif_register_driver(struct notif_driver *ndrv)
60 {
61 	uint32_t old_itr_status = 0;
62 
63 	assert(is_nexus(ndrv) && is_unpaged(ndrv->atomic_cb));
64 
65 	old_itr_status = cpu_spin_lock_xsave(&notif_lock);
66 
67 	SLIST_INSERT_HEAD(&notif_driver_head, ndrv, link);
68 
69 	cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
70 }
71 
72 void notif_unregister_driver(struct notif_driver *ndrv)
73 {
74 	uint32_t old_itr_status = 0;
75 
76 	old_itr_status = cpu_spin_lock_xsave(&notif_lock);
77 
78 	SLIST_REMOVE(&notif_driver_head, ndrv, notif_driver, link);
79 
80 	cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
81 }
82 
83 void notif_deliver_atomic_event(enum notif_event ev, uint16_t guest_id)
84 {
85 	struct guest_partition *prtn = virt_get_guest(guest_id);
86 	struct notif_data *ndata = get_notif_data(prtn);
87 	struct notif_driver *nd = NULL;
88 	uint32_t old_itr_status = 0;
89 
90 	old_itr_status = cpu_spin_lock_xsave(&notif_lock);
91 
92 	switch (ev) {
93 	case NOTIF_EVENT_STARTED:
94 		if (ndata->notif_started) {
95 			DMSG("Already started");
96 			goto out;
97 		}
98 		ndata->notif_started = true;
99 		break;
100 	case NOTIF_EVENT_SHUTDOWN:
101 		break;
102 	default:
103 		EMSG("Unknown event %d", (int)ev);
104 		panic();
105 	}
106 
107 	SLIST_FOREACH(nd, &notif_driver_head, link)
108 		if (nd->atomic_cb)
109 			nd->atomic_cb(nd, ev, guest_id);
110 
111 out:
112 	cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
113 	virt_put_guest(prtn);
114 }
115 
116 void notif_deliver_event(enum notif_event ev)
117 {
118 	struct guest_partition *prtn = virt_get_current_guest();
119 	struct notif_data *ndata = get_notif_data(prtn);
120 	uint32_t old_itr_status = 0;
121 	struct notif_driver *nd = NULL;
122 	struct notif_driver *nd_tmp = NULL;
123 
124 	assert(ev == NOTIF_EVENT_DO_BOTTOM_HALF || ev == NOTIF_EVENT_STOPPED);
125 
126 	/* Serialize all yielding notifications */
127 	mutex_lock(&notif_mutex);
128 	old_itr_status = cpu_spin_lock_xsave(&notif_lock);
129 
130 	if (!ndata || !ndata->notif_started) {
131 		DMSG("Not started ev %d", (int)ev);
132 		goto out;
133 	}
134 
135 	if (ev == NOTIF_EVENT_STOPPED)
136 		ndata->notif_started = false;
137 
138 	SLIST_FOREACH_SAFE(nd, &notif_driver_head, link, nd_tmp) {
139 		cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
140 
141 		if (nd->yielding_cb)
142 			nd->yielding_cb(nd, ev);
143 
144 		old_itr_status = cpu_spin_lock_xsave(&notif_lock);
145 
146 		if (ev == NOTIF_EVENT_STOPPED && ndata->notif_started) {
147 			DMSG("Started again while stopping");
148 			goto out;
149 		}
150 	}
151 
152 out:
153 	cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
154 	mutex_unlock(&notif_mutex);
155 	virt_put_guest(prtn);
156 }
157 
158 #ifdef CFG_NS_VIRTUALIZATION
159 static TEE_Result nex_init_notif(void)
160 {
161 	return virt_add_guest_spec_data(&notif_data_id,
162 					sizeof(struct notif_data), NULL);
163 }
164 nex_early_init(nex_init_notif);
165 #endif
166 
167 #endif /*CFG_CORE_ASYNC_NOTIF*/
168 
169 static TEE_Result notif_rpc(uint32_t func, uint32_t value1, uint32_t value2)
170 {
171 	struct thread_param params =
172 		THREAD_PARAM_VALUE(IN, func, value1, value2);
173 
174 	return thread_rpc_cmd(OPTEE_RPC_CMD_NOTIFICATION, 1, &params);
175 }
176 
177 TEE_Result notif_wait(uint32_t value)
178 {
179 	return notif_rpc(OPTEE_RPC_NOTIFICATION_WAIT, value, 0);
180 }
181 
182 TEE_Result notif_send_sync(uint32_t value)
183 {
184 	return notif_rpc(OPTEE_RPC_NOTIFICATION_SEND, value, 0);
185 }
186 
187 TEE_Result notif_wait_timeout(uint32_t value, uint32_t timeout_ms)
188 {
189 	return notif_rpc(OPTEE_RPC_NOTIFICATION_WAIT, value, timeout_ms);
190 }
191 
192