xref: /optee_os/core/kernel/notif.c (revision 25675979615c01f3c6bfbe105f53e07e939dd739)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021-2023, Linaro Limited
4  */
5 
6 #include <kernel/mutex.h>
7 #include <kernel/notif.h>
8 #include <kernel/spinlock.h>
9 #include <kernel/thread.h>
10 #include <mm/core_memprot.h>
11 #include <optee_rpc_cmd.h>
12 #include <types_ext.h>
13 
14 #if defined(CFG_CORE_ASYNC_NOTIF)
15 static struct mutex notif_mutex = MUTEX_INITIALIZER;
16 static unsigned int notif_lock __nex_data = SPINLOCK_UNLOCK;
17 static bool notif_started;
18 
19 SLIST_HEAD(notif_driver_head, notif_driver);
20 static struct notif_driver_head notif_driver_head __nex_data =
21 	SLIST_HEAD_INITIALIZER(&notif_driver_head);
22 
23 
24 bool notif_async_is_started(void)
25 {
26 	uint32_t old_itr_status = 0;
27 	bool ret = false;
28 
29 	old_itr_status = cpu_spin_lock_xsave(&notif_lock);
30 	ret = notif_started;
31 	cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
32 
33 	return ret;
34 }
35 
36 void notif_register_driver(struct notif_driver *ndrv)
37 {
38 	uint32_t old_itr_status = 0;
39 
40 	assert(is_nexus(ndrv) && is_unpaged(ndrv->atomic_cb));
41 
42 	old_itr_status = cpu_spin_lock_xsave(&notif_lock);
43 
44 	SLIST_INSERT_HEAD(&notif_driver_head, ndrv, link);
45 
46 	cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
47 }
48 
49 void notif_unregister_driver(struct notif_driver *ndrv)
50 {
51 	uint32_t old_itr_status = 0;
52 
53 	old_itr_status = cpu_spin_lock_xsave(&notif_lock);
54 
55 	SLIST_REMOVE(&notif_driver_head, ndrv, notif_driver, link);
56 
57 	cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
58 }
59 
60 void notif_deliver_atomic_event(enum notif_event ev)
61 {
62 	uint32_t old_itr_status = 0;
63 	struct notif_driver *nd = NULL;
64 
65 	assert(ev == NOTIF_EVENT_STARTED);
66 
67 	old_itr_status = cpu_spin_lock_xsave(&notif_lock);
68 
69 	if (notif_started) {
70 		DMSG("Already started");
71 		goto out;
72 	}
73 	notif_started = true;
74 
75 	SLIST_FOREACH(nd, &notif_driver_head, link)
76 		if (nd->atomic_cb)
77 			nd->atomic_cb(nd, ev);
78 
79 out:
80 	cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
81 }
82 
83 void notif_deliver_event(enum notif_event ev)
84 {
85 	uint32_t old_itr_status = 0;
86 	struct notif_driver *nd = NULL;
87 	struct notif_driver *nd_tmp = NULL;
88 
89 	assert(ev == NOTIF_EVENT_DO_BOTTOM_HALF || ev == NOTIF_EVENT_STOPPED);
90 
91 	/* Serialize all yielding notifications */
92 	mutex_lock(&notif_mutex);
93 	old_itr_status = cpu_spin_lock_xsave(&notif_lock);
94 
95 	if (!notif_started) {
96 		DMSG("Not started ev %d", (int)ev);
97 		goto out;
98 	}
99 
100 	if (ev == NOTIF_EVENT_STOPPED)
101 		notif_started = false;
102 
103 	SLIST_FOREACH_SAFE(nd, &notif_driver_head, link, nd_tmp) {
104 		cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
105 
106 		if (nd->yielding_cb)
107 			nd->yielding_cb(nd, ev);
108 
109 		old_itr_status = cpu_spin_lock_xsave(&notif_lock);
110 
111 		if (ev == NOTIF_EVENT_STOPPED && notif_started) {
112 			DMSG("Started again while stopping");
113 			goto out;
114 		}
115 	}
116 
117 out:
118 	cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
119 	mutex_unlock(&notif_mutex);
120 }
121 #endif /*CFG_CORE_ASYNC_NOTIF*/
122 
123 static TEE_Result notif_rpc(uint32_t func, uint32_t value1, uint32_t value2)
124 {
125 	struct thread_param params =
126 		THREAD_PARAM_VALUE(IN, func, value1, value2);
127 
128 	return thread_rpc_cmd(OPTEE_RPC_CMD_NOTIFICATION, 1, &params);
129 }
130 
131 TEE_Result notif_wait(uint32_t value)
132 {
133 	return notif_rpc(OPTEE_RPC_NOTIFICATION_WAIT, value, 0);
134 }
135 
136 TEE_Result notif_send_sync(uint32_t value)
137 {
138 	return notif_rpc(OPTEE_RPC_NOTIFICATION_SEND, value, 0);
139 }
140 
141 TEE_Result notif_wait_timeout(uint32_t value, uint32_t timeout_ms)
142 {
143 	return notif_rpc(OPTEE_RPC_NOTIFICATION_WAIT, value, timeout_ms);
144 }
145