xref: /optee_os/core/kernel/callout.c (revision cf707bd0d6955a19c8b024acae3c46f78088648f)
1*cf707bd0SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause
2*cf707bd0SJens Wiklander /*
3*cf707bd0SJens Wiklander  * Copyright (c) 2024, Linaro Limited
4*cf707bd0SJens Wiklander  */
5*cf707bd0SJens Wiklander 
6*cf707bd0SJens Wiklander #include <kernel/callout.h>
7*cf707bd0SJens Wiklander #include <kernel/misc.h>
8*cf707bd0SJens Wiklander #include <kernel/spinlock.h>
9*cf707bd0SJens Wiklander #include <mm/core_memprot.h>
10*cf707bd0SJens Wiklander 
11*cf707bd0SJens Wiklander TAILQ_HEAD(callout_head, callout);
12*cf707bd0SJens Wiklander 
13*cf707bd0SJens Wiklander static unsigned int callout_sched_lock __nex_data = SPINLOCK_UNLOCK;
14*cf707bd0SJens Wiklander static size_t callout_sched_core __nex_bss;
15*cf707bd0SJens Wiklander static unsigned int callout_lock __nex_data = SPINLOCK_UNLOCK;
16*cf707bd0SJens Wiklander static const struct callout_timer_desc *callout_desc __nex_bss;
17*cf707bd0SJens Wiklander static struct callout_head callout_head __nex_data =
18*cf707bd0SJens Wiklander 	TAILQ_HEAD_INITIALIZER(callout_head);
19*cf707bd0SJens Wiklander 
20*cf707bd0SJens Wiklander static void insert_callout(struct callout *co)
21*cf707bd0SJens Wiklander {
22*cf707bd0SJens Wiklander 	struct callout *co2 = NULL;
23*cf707bd0SJens Wiklander 
24*cf707bd0SJens Wiklander 	TAILQ_FOREACH(co2, &callout_head, link) {
25*cf707bd0SJens Wiklander 		if (co->expiry_value < co2->expiry_value) {
26*cf707bd0SJens Wiklander 			TAILQ_INSERT_BEFORE(co2, co, link);
27*cf707bd0SJens Wiklander 			return;
28*cf707bd0SJens Wiklander 		}
29*cf707bd0SJens Wiklander 	}
30*cf707bd0SJens Wiklander 
31*cf707bd0SJens Wiklander 	TAILQ_INSERT_TAIL(&callout_head, co, link);
32*cf707bd0SJens Wiklander }
33*cf707bd0SJens Wiklander 
34*cf707bd0SJens Wiklander static void schedule_next_timeout(void)
35*cf707bd0SJens Wiklander {
36*cf707bd0SJens Wiklander 	const struct callout_timer_desc *desc = callout_desc;
37*cf707bd0SJens Wiklander 	struct callout *co = TAILQ_FIRST(&callout_head);
38*cf707bd0SJens Wiklander 
39*cf707bd0SJens Wiklander 	if (co)
40*cf707bd0SJens Wiklander 		desc->set_next_timeout(desc, co->expiry_value);
41*cf707bd0SJens Wiklander 	else
42*cf707bd0SJens Wiklander 		desc->disable_timeout(desc);
43*cf707bd0SJens Wiklander 
44*cf707bd0SJens Wiklander 	if (desc->is_per_cpu) {
45*cf707bd0SJens Wiklander 		/*
46*cf707bd0SJens Wiklander 		 * Remember which core is supposed to receive the next
47*cf707bd0SJens Wiklander 		 * timer interrupt. This will not disable timers on other
48*cf707bd0SJens Wiklander 		 * CPUs, instead they will be ignored as a spurious call.
49*cf707bd0SJens Wiklander 		 */
50*cf707bd0SJens Wiklander 		cpu_spin_lock(&callout_sched_lock);
51*cf707bd0SJens Wiklander 		callout_sched_core = get_core_pos();
52*cf707bd0SJens Wiklander 		cpu_spin_unlock(&callout_sched_lock);
53*cf707bd0SJens Wiklander 	}
54*cf707bd0SJens Wiklander }
55*cf707bd0SJens Wiklander 
56*cf707bd0SJens Wiklander static bool callout_is_active(struct callout *co)
57*cf707bd0SJens Wiklander {
58*cf707bd0SJens Wiklander 	struct callout *co2 = NULL;
59*cf707bd0SJens Wiklander 
60*cf707bd0SJens Wiklander 	TAILQ_FOREACH(co2, &callout_head, link)
61*cf707bd0SJens Wiklander 		if (co2 == co)
62*cf707bd0SJens Wiklander 			return true;
63*cf707bd0SJens Wiklander 
64*cf707bd0SJens Wiklander 	return false;
65*cf707bd0SJens Wiklander }
66*cf707bd0SJens Wiklander 
67*cf707bd0SJens Wiklander void callout_rem(struct callout *co)
68*cf707bd0SJens Wiklander {
69*cf707bd0SJens Wiklander 	uint32_t state = 0;
70*cf707bd0SJens Wiklander 
71*cf707bd0SJens Wiklander 	state = cpu_spin_lock_xsave(&callout_lock);
72*cf707bd0SJens Wiklander 
73*cf707bd0SJens Wiklander 	if (callout_is_active(co)) {
74*cf707bd0SJens Wiklander 		TAILQ_REMOVE(&callout_head, co, link);
75*cf707bd0SJens Wiklander 		schedule_next_timeout();
76*cf707bd0SJens Wiklander 	}
77*cf707bd0SJens Wiklander 
78*cf707bd0SJens Wiklander 	cpu_spin_unlock_xrestore(&callout_lock, state);
79*cf707bd0SJens Wiklander }
80*cf707bd0SJens Wiklander 
81*cf707bd0SJens Wiklander void callout_add(struct callout *co, bool (*callback)(struct callout *co),
82*cf707bd0SJens Wiklander 		 uint32_t ms)
83*cf707bd0SJens Wiklander {
84*cf707bd0SJens Wiklander 	const struct callout_timer_desc *desc = callout_desc;
85*cf707bd0SJens Wiklander 	uint32_t state = 0;
86*cf707bd0SJens Wiklander 
87*cf707bd0SJens Wiklander 	state = cpu_spin_lock_xsave(&callout_lock);
88*cf707bd0SJens Wiklander 
89*cf707bd0SJens Wiklander 	assert(is_nexus(co) && !callout_is_active(co));
90*cf707bd0SJens Wiklander 	*co = (struct callout){ .callback = callback, };
91*cf707bd0SJens Wiklander 
92*cf707bd0SJens Wiklander 	if (desc) {
93*cf707bd0SJens Wiklander 		co->period = desc->ms_to_ticks(desc, ms);
94*cf707bd0SJens Wiklander 		co->expiry_value = desc->get_now(desc) + co->period;
95*cf707bd0SJens Wiklander 	} else {
96*cf707bd0SJens Wiklander 		/* This will be converted to ticks in callout_service_init(). */
97*cf707bd0SJens Wiklander 		co->period = ms;
98*cf707bd0SJens Wiklander 	}
99*cf707bd0SJens Wiklander 
100*cf707bd0SJens Wiklander 	insert_callout(co);
101*cf707bd0SJens Wiklander 	if (desc && co == TAILQ_FIRST(&callout_head))
102*cf707bd0SJens Wiklander 		schedule_next_timeout();
103*cf707bd0SJens Wiklander 
104*cf707bd0SJens Wiklander 	cpu_spin_unlock_xrestore(&callout_lock, state);
105*cf707bd0SJens Wiklander }
106*cf707bd0SJens Wiklander 
107*cf707bd0SJens Wiklander void callout_set_next_timeout(struct callout *co, uint32_t ms)
108*cf707bd0SJens Wiklander {
109*cf707bd0SJens Wiklander 	co->period = callout_desc->ms_to_ticks(callout_desc, ms);
110*cf707bd0SJens Wiklander }
111*cf707bd0SJens Wiklander 
112*cf707bd0SJens Wiklander void callout_service_init(const struct callout_timer_desc *desc)
113*cf707bd0SJens Wiklander {
114*cf707bd0SJens Wiklander 	struct callout_head tmp_head = TAILQ_HEAD_INITIALIZER(tmp_head);
115*cf707bd0SJens Wiklander 	struct callout *co = NULL;
116*cf707bd0SJens Wiklander 	uint32_t state = 0;
117*cf707bd0SJens Wiklander 	uint64_t now = 0;
118*cf707bd0SJens Wiklander 
119*cf707bd0SJens Wiklander 	state = cpu_spin_lock_xsave(&callout_lock);
120*cf707bd0SJens Wiklander 
121*cf707bd0SJens Wiklander 	assert(!callout_desc);
122*cf707bd0SJens Wiklander 	assert(is_nexus(desc) && is_unpaged(desc->disable_timeout) &&
123*cf707bd0SJens Wiklander 	       is_unpaged(desc->set_next_timeout) &&
124*cf707bd0SJens Wiklander 	       is_unpaged(desc->ms_to_ticks) && is_unpaged(desc->get_now));
125*cf707bd0SJens Wiklander 
126*cf707bd0SJens Wiklander 	callout_desc = desc;
127*cf707bd0SJens Wiklander 	now = desc->get_now(desc);
128*cf707bd0SJens Wiklander 
129*cf707bd0SJens Wiklander 	TAILQ_CONCAT(&tmp_head, &callout_head, link);
130*cf707bd0SJens Wiklander 	while (!TAILQ_EMPTY(&tmp_head)) {
131*cf707bd0SJens Wiklander 		co = TAILQ_FIRST(&tmp_head);
132*cf707bd0SJens Wiklander 		TAILQ_REMOVE(&tmp_head, co, link);
133*cf707bd0SJens Wiklander 
134*cf707bd0SJens Wiklander 		/*
135*cf707bd0SJens Wiklander 		 * Periods set before the timer descriptor are in
136*cf707bd0SJens Wiklander 		 * milliseconds since the frequency of the timer isn't
137*cf707bd0SJens Wiklander 		 * available at that point. So update it to ticks now.
138*cf707bd0SJens Wiklander 		 */
139*cf707bd0SJens Wiklander 		co->period = desc->ms_to_ticks(desc, co->period);
140*cf707bd0SJens Wiklander 		co->expiry_value = now + co->period;
141*cf707bd0SJens Wiklander 		insert_callout(co);
142*cf707bd0SJens Wiklander 	}
143*cf707bd0SJens Wiklander 	schedule_next_timeout();
144*cf707bd0SJens Wiklander 
145*cf707bd0SJens Wiklander 	cpu_spin_unlock_xrestore(&callout_lock, state);
146*cf707bd0SJens Wiklander }
147*cf707bd0SJens Wiklander 
148*cf707bd0SJens Wiklander void callout_service_cb(void)
149*cf707bd0SJens Wiklander {
150*cf707bd0SJens Wiklander 	const struct callout_timer_desc *desc = callout_desc;
151*cf707bd0SJens Wiklander 	struct callout *co = NULL;
152*cf707bd0SJens Wiklander 	uint64_t now = 0;
153*cf707bd0SJens Wiklander 
154*cf707bd0SJens Wiklander 	if (desc->is_per_cpu) {
155*cf707bd0SJens Wiklander 		bool do_callout = false;
156*cf707bd0SJens Wiklander 
157*cf707bd0SJens Wiklander 		/*
158*cf707bd0SJens Wiklander 		 * schedule_next_timeout() saves the core it was last
159*cf707bd0SJens Wiklander 		 * called on. If there's a mismatch here it means that
160*cf707bd0SJens Wiklander 		 * another core has been scheduled for the next callout, so
161*cf707bd0SJens Wiklander 		 * there's no work to be done for this core.
162*cf707bd0SJens Wiklander 		 */
163*cf707bd0SJens Wiklander 		cpu_spin_lock(&callout_sched_lock);
164*cf707bd0SJens Wiklander 		do_callout = (get_core_pos() == callout_sched_core);
165*cf707bd0SJens Wiklander 		cpu_spin_unlock(&callout_sched_lock);
166*cf707bd0SJens Wiklander 		if (!do_callout)
167*cf707bd0SJens Wiklander 			return;
168*cf707bd0SJens Wiklander 	}
169*cf707bd0SJens Wiklander 
170*cf707bd0SJens Wiklander 	cpu_spin_lock(&callout_lock);
171*cf707bd0SJens Wiklander 
172*cf707bd0SJens Wiklander 	now = desc->get_now(desc);
173*cf707bd0SJens Wiklander 	while (!TAILQ_EMPTY(&callout_head)) {
174*cf707bd0SJens Wiklander 		co = TAILQ_FIRST(&callout_head);
175*cf707bd0SJens Wiklander 		if (co->expiry_value > now)
176*cf707bd0SJens Wiklander 			break;
177*cf707bd0SJens Wiklander 
178*cf707bd0SJens Wiklander 		TAILQ_REMOVE(&callout_head, co, link);
179*cf707bd0SJens Wiklander 
180*cf707bd0SJens Wiklander 		if (co->callback(co)) {
181*cf707bd0SJens Wiklander 			co->expiry_value += co->period;
182*cf707bd0SJens Wiklander 			insert_callout(co);
183*cf707bd0SJens Wiklander 		}
184*cf707bd0SJens Wiklander 	}
185*cf707bd0SJens Wiklander 	schedule_next_timeout();
186*cf707bd0SJens Wiklander 
187*cf707bd0SJens Wiklander 	cpu_spin_unlock(&callout_lock);
188*cf707bd0SJens Wiklander }
189