xref: /optee_os/core/kernel/callout.c (revision 95b8c5356384cf3dd8f41e1b5b999d3cfbcc3468)
1cf707bd0SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause
2cf707bd0SJens Wiklander /*
3cf707bd0SJens Wiklander  * Copyright (c) 2024, Linaro Limited
4cf707bd0SJens Wiklander  */
5cf707bd0SJens Wiklander 
6cf707bd0SJens Wiklander #include <kernel/callout.h>
7cf707bd0SJens Wiklander #include <kernel/misc.h>
8cf707bd0SJens Wiklander #include <kernel/spinlock.h>
9cf707bd0SJens Wiklander #include <mm/core_memprot.h>
10cf707bd0SJens Wiklander 
11cf707bd0SJens Wiklander TAILQ_HEAD(callout_head, callout);
12cf707bd0SJens Wiklander 
13cf707bd0SJens Wiklander static unsigned int callout_sched_lock __nex_data = SPINLOCK_UNLOCK;
14cf707bd0SJens Wiklander static size_t callout_sched_core __nex_bss;
15cf707bd0SJens Wiklander static unsigned int callout_lock __nex_data = SPINLOCK_UNLOCK;
16cf707bd0SJens Wiklander static const struct callout_timer_desc *callout_desc __nex_bss;
17cf707bd0SJens Wiklander static struct callout_head callout_head __nex_data =
18cf707bd0SJens Wiklander 	TAILQ_HEAD_INITIALIZER(callout_head);
19cf707bd0SJens Wiklander 
insert_callout(struct callout * co)20cf707bd0SJens Wiklander static void insert_callout(struct callout *co)
21cf707bd0SJens Wiklander {
22cf707bd0SJens Wiklander 	struct callout *co2 = NULL;
23cf707bd0SJens Wiklander 
24cf707bd0SJens Wiklander 	TAILQ_FOREACH(co2, &callout_head, link) {
25cf707bd0SJens Wiklander 		if (co->expiry_value < co2->expiry_value) {
26cf707bd0SJens Wiklander 			TAILQ_INSERT_BEFORE(co2, co, link);
27cf707bd0SJens Wiklander 			return;
28cf707bd0SJens Wiklander 		}
29cf707bd0SJens Wiklander 	}
30cf707bd0SJens Wiklander 
31cf707bd0SJens Wiklander 	TAILQ_INSERT_TAIL(&callout_head, co, link);
32cf707bd0SJens Wiklander }
33cf707bd0SJens Wiklander 
schedule_next_timeout(void)34cf707bd0SJens Wiklander static void schedule_next_timeout(void)
35cf707bd0SJens Wiklander {
36cf707bd0SJens Wiklander 	const struct callout_timer_desc *desc = callout_desc;
37cf707bd0SJens Wiklander 	struct callout *co = TAILQ_FIRST(&callout_head);
38cf707bd0SJens Wiklander 
39cf707bd0SJens Wiklander 	if (co)
40cf707bd0SJens Wiklander 		desc->set_next_timeout(desc, co->expiry_value);
41cf707bd0SJens Wiklander 	else
42cf707bd0SJens Wiklander 		desc->disable_timeout(desc);
43cf707bd0SJens Wiklander 
44cf707bd0SJens Wiklander 	if (desc->is_per_cpu) {
45cf707bd0SJens Wiklander 		/*
46cf707bd0SJens Wiklander 		 * Remember which core is supposed to receive the next
47cf707bd0SJens Wiklander 		 * timer interrupt. This will not disable timers on other
48cf707bd0SJens Wiklander 		 * CPUs, instead they will be ignored as a spurious call.
49cf707bd0SJens Wiklander 		 */
50cf707bd0SJens Wiklander 		cpu_spin_lock(&callout_sched_lock);
51cf707bd0SJens Wiklander 		callout_sched_core = get_core_pos();
52cf707bd0SJens Wiklander 		cpu_spin_unlock(&callout_sched_lock);
53cf707bd0SJens Wiklander 	}
54cf707bd0SJens Wiklander }
55cf707bd0SJens Wiklander 
callout_is_active(struct callout * co)56cf707bd0SJens Wiklander static bool callout_is_active(struct callout *co)
57cf707bd0SJens Wiklander {
58cf707bd0SJens Wiklander 	struct callout *co2 = NULL;
59cf707bd0SJens Wiklander 
60cf707bd0SJens Wiklander 	TAILQ_FOREACH(co2, &callout_head, link)
61cf707bd0SJens Wiklander 		if (co2 == co)
62cf707bd0SJens Wiklander 			return true;
63cf707bd0SJens Wiklander 
64cf707bd0SJens Wiklander 	return false;
65cf707bd0SJens Wiklander }
66cf707bd0SJens Wiklander 
callout_rem(struct callout * co)67cf707bd0SJens Wiklander void callout_rem(struct callout *co)
68cf707bd0SJens Wiklander {
69cf707bd0SJens Wiklander 	uint32_t state = 0;
70cf707bd0SJens Wiklander 
71cf707bd0SJens Wiklander 	state = cpu_spin_lock_xsave(&callout_lock);
72cf707bd0SJens Wiklander 
73cf707bd0SJens Wiklander 	if (callout_is_active(co)) {
74cf707bd0SJens Wiklander 		TAILQ_REMOVE(&callout_head, co, link);
75cf707bd0SJens Wiklander 		schedule_next_timeout();
76cf707bd0SJens Wiklander 	}
77cf707bd0SJens Wiklander 
78cf707bd0SJens Wiklander 	cpu_spin_unlock_xrestore(&callout_lock, state);
79cf707bd0SJens Wiklander }
80cf707bd0SJens Wiklander 
callout_add(struct callout * co,bool (* callback)(struct callout * co),uint32_t ms)81cf707bd0SJens Wiklander void callout_add(struct callout *co, bool (*callback)(struct callout *co),
82cf707bd0SJens Wiklander 		 uint32_t ms)
83cf707bd0SJens Wiklander {
84cf707bd0SJens Wiklander 	const struct callout_timer_desc *desc = callout_desc;
85cf707bd0SJens Wiklander 	uint32_t state = 0;
86cf707bd0SJens Wiklander 
87cf707bd0SJens Wiklander 	state = cpu_spin_lock_xsave(&callout_lock);
88cf707bd0SJens Wiklander 
89c5b5aca0SJens Wiklander 	assert(is_nexus(co) && !callout_is_active(co) && is_unpaged(callback));
90cf707bd0SJens Wiklander 	*co = (struct callout){ .callback = callback, };
91cf707bd0SJens Wiklander 
92cf707bd0SJens Wiklander 	if (desc) {
93cf707bd0SJens Wiklander 		co->period = desc->ms_to_ticks(desc, ms);
94cf707bd0SJens Wiklander 		co->expiry_value = desc->get_now(desc) + co->period;
95cf707bd0SJens Wiklander 	} else {
96cf707bd0SJens Wiklander 		/* This will be converted to ticks in callout_service_init(). */
97cf707bd0SJens Wiklander 		co->period = ms;
98cf707bd0SJens Wiklander 	}
99cf707bd0SJens Wiklander 
100cf707bd0SJens Wiklander 	insert_callout(co);
101cf707bd0SJens Wiklander 	if (desc && co == TAILQ_FIRST(&callout_head))
102cf707bd0SJens Wiklander 		schedule_next_timeout();
103cf707bd0SJens Wiklander 
104cf707bd0SJens Wiklander 	cpu_spin_unlock_xrestore(&callout_lock, state);
105cf707bd0SJens Wiklander }
106cf707bd0SJens Wiklander 
callout_set_next_timeout(struct callout * co,uint32_t ms)107cf707bd0SJens Wiklander void callout_set_next_timeout(struct callout *co, uint32_t ms)
108cf707bd0SJens Wiklander {
109cf707bd0SJens Wiklander 	co->period = callout_desc->ms_to_ticks(callout_desc, ms);
110cf707bd0SJens Wiklander }
111cf707bd0SJens Wiklander 
callout_service_init(const struct callout_timer_desc * desc)112cf707bd0SJens Wiklander void callout_service_init(const struct callout_timer_desc *desc)
113cf707bd0SJens Wiklander {
114cf707bd0SJens Wiklander 	struct callout_head tmp_head = TAILQ_HEAD_INITIALIZER(tmp_head);
115cf707bd0SJens Wiklander 	struct callout *co = NULL;
116cf707bd0SJens Wiklander 	uint32_t state = 0;
117cf707bd0SJens Wiklander 	uint64_t now = 0;
118cf707bd0SJens Wiklander 
119cf707bd0SJens Wiklander 	state = cpu_spin_lock_xsave(&callout_lock);
120cf707bd0SJens Wiklander 
121cf707bd0SJens Wiklander 	assert(!callout_desc);
122cf707bd0SJens Wiklander 	assert(is_nexus(desc) && is_unpaged(desc->disable_timeout) &&
123cf707bd0SJens Wiklander 	       is_unpaged(desc->set_next_timeout) &&
124cf707bd0SJens Wiklander 	       is_unpaged(desc->ms_to_ticks) && is_unpaged(desc->get_now));
125cf707bd0SJens Wiklander 
126cf707bd0SJens Wiklander 	callout_desc = desc;
127cf707bd0SJens Wiklander 	now = desc->get_now(desc);
128cf707bd0SJens Wiklander 
129cf707bd0SJens Wiklander 	TAILQ_CONCAT(&tmp_head, &callout_head, link);
130cf707bd0SJens Wiklander 	while (!TAILQ_EMPTY(&tmp_head)) {
131cf707bd0SJens Wiklander 		co = TAILQ_FIRST(&tmp_head);
132cf707bd0SJens Wiklander 		TAILQ_REMOVE(&tmp_head, co, link);
133cf707bd0SJens Wiklander 
134cf707bd0SJens Wiklander 		/*
135cf707bd0SJens Wiklander 		 * Periods set before the timer descriptor are in
136cf707bd0SJens Wiklander 		 * milliseconds since the frequency of the timer isn't
137cf707bd0SJens Wiklander 		 * available at that point. So update it to ticks now.
138cf707bd0SJens Wiklander 		 */
139cf707bd0SJens Wiklander 		co->period = desc->ms_to_ticks(desc, co->period);
140cf707bd0SJens Wiklander 		co->expiry_value = now + co->period;
141cf707bd0SJens Wiklander 		insert_callout(co);
142cf707bd0SJens Wiklander 	}
143cf707bd0SJens Wiklander 	schedule_next_timeout();
144cf707bd0SJens Wiklander 
145cf707bd0SJens Wiklander 	cpu_spin_unlock_xrestore(&callout_lock, state);
146cf707bd0SJens Wiklander }
147cf707bd0SJens Wiklander 
callout_service_cb(void)148cf707bd0SJens Wiklander void callout_service_cb(void)
149cf707bd0SJens Wiklander {
150cf707bd0SJens Wiklander 	const struct callout_timer_desc *desc = callout_desc;
151cf707bd0SJens Wiklander 	struct callout *co = NULL;
152cf707bd0SJens Wiklander 	uint64_t now = 0;
153cf707bd0SJens Wiklander 
154cf707bd0SJens Wiklander 	if (desc->is_per_cpu) {
155cf707bd0SJens Wiklander 		bool do_callout = false;
156cf707bd0SJens Wiklander 
157cf707bd0SJens Wiklander 		/*
158cf707bd0SJens Wiklander 		 * schedule_next_timeout() saves the core it was last
159cf707bd0SJens Wiklander 		 * called on. If there's a mismatch here it means that
160cf707bd0SJens Wiklander 		 * another core has been scheduled for the next callout, so
161*95b8c535SJens Wiklander 		 * there's no work to be done for this core and we can
162*95b8c535SJens Wiklander 		 * disable the timeout on this CPU.
163cf707bd0SJens Wiklander 		 */
164cf707bd0SJens Wiklander 		cpu_spin_lock(&callout_sched_lock);
165cf707bd0SJens Wiklander 		do_callout = (get_core_pos() == callout_sched_core);
166*95b8c535SJens Wiklander 		if (!do_callout)
167*95b8c535SJens Wiklander 			desc->disable_timeout(desc);
168cf707bd0SJens Wiklander 		cpu_spin_unlock(&callout_sched_lock);
169cf707bd0SJens Wiklander 		if (!do_callout)
170cf707bd0SJens Wiklander 			return;
171cf707bd0SJens Wiklander 	}
172cf707bd0SJens Wiklander 
173cf707bd0SJens Wiklander 	cpu_spin_lock(&callout_lock);
174cf707bd0SJens Wiklander 
175cf707bd0SJens Wiklander 	now = desc->get_now(desc);
176cf707bd0SJens Wiklander 	while (!TAILQ_EMPTY(&callout_head)) {
177cf707bd0SJens Wiklander 		co = TAILQ_FIRST(&callout_head);
178cf707bd0SJens Wiklander 		if (co->expiry_value > now)
179cf707bd0SJens Wiklander 			break;
180cf707bd0SJens Wiklander 
181cf707bd0SJens Wiklander 		TAILQ_REMOVE(&callout_head, co, link);
182cf707bd0SJens Wiklander 
183cf707bd0SJens Wiklander 		if (co->callback(co)) {
184cf707bd0SJens Wiklander 			co->expiry_value += co->period;
185cf707bd0SJens Wiklander 			insert_callout(co);
186cf707bd0SJens Wiklander 		}
187cf707bd0SJens Wiklander 	}
188cf707bd0SJens Wiklander 	schedule_next_timeout();
189cf707bd0SJens Wiklander 
190cf707bd0SJens Wiklander 	cpu_spin_unlock(&callout_lock);
191cf707bd0SJens Wiklander }
192