1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2024, Linaro Limited
4 */
5
6 #include <kernel/callout.h>
7 #include <kernel/misc.h>
8 #include <kernel/spinlock.h>
9 #include <mm/core_memprot.h>
10
11 TAILQ_HEAD(callout_head, callout);
12
13 static unsigned int callout_sched_lock __nex_data = SPINLOCK_UNLOCK;
14 static size_t callout_sched_core __nex_bss;
15 static unsigned int callout_lock __nex_data = SPINLOCK_UNLOCK;
16 static const struct callout_timer_desc *callout_desc __nex_bss;
17 static struct callout_head callout_head __nex_data =
18 TAILQ_HEAD_INITIALIZER(callout_head);
19
insert_callout(struct callout * co)20 static void insert_callout(struct callout *co)
21 {
22 struct callout *co2 = NULL;
23
24 TAILQ_FOREACH(co2, &callout_head, link) {
25 if (co->expiry_value < co2->expiry_value) {
26 TAILQ_INSERT_BEFORE(co2, co, link);
27 return;
28 }
29 }
30
31 TAILQ_INSERT_TAIL(&callout_head, co, link);
32 }
33
schedule_next_timeout(void)34 static void schedule_next_timeout(void)
35 {
36 const struct callout_timer_desc *desc = callout_desc;
37 struct callout *co = TAILQ_FIRST(&callout_head);
38
39 if (co)
40 desc->set_next_timeout(desc, co->expiry_value);
41 else
42 desc->disable_timeout(desc);
43
44 if (desc->is_per_cpu) {
45 /*
46 * Remember which core is supposed to receive the next
47 * timer interrupt. This will not disable timers on other
48 * CPUs, instead they will be ignored as a spurious call.
49 */
50 cpu_spin_lock(&callout_sched_lock);
51 callout_sched_core = get_core_pos();
52 cpu_spin_unlock(&callout_sched_lock);
53 }
54 }
55
callout_is_active(struct callout * co)56 static bool callout_is_active(struct callout *co)
57 {
58 struct callout *co2 = NULL;
59
60 TAILQ_FOREACH(co2, &callout_head, link)
61 if (co2 == co)
62 return true;
63
64 return false;
65 }
66
callout_rem(struct callout * co)67 void callout_rem(struct callout *co)
68 {
69 uint32_t state = 0;
70
71 state = cpu_spin_lock_xsave(&callout_lock);
72
73 if (callout_is_active(co)) {
74 TAILQ_REMOVE(&callout_head, co, link);
75 schedule_next_timeout();
76 }
77
78 cpu_spin_unlock_xrestore(&callout_lock, state);
79 }
80
callout_add(struct callout * co,bool (* callback)(struct callout * co),uint32_t ms)81 void callout_add(struct callout *co, bool (*callback)(struct callout *co),
82 uint32_t ms)
83 {
84 const struct callout_timer_desc *desc = callout_desc;
85 uint32_t state = 0;
86
87 state = cpu_spin_lock_xsave(&callout_lock);
88
89 assert(is_nexus(co) && !callout_is_active(co) && is_unpaged(callback));
90 *co = (struct callout){ .callback = callback, };
91
92 if (desc) {
93 co->period = desc->ms_to_ticks(desc, ms);
94 co->expiry_value = desc->get_now(desc) + co->period;
95 } else {
96 /* This will be converted to ticks in callout_service_init(). */
97 co->period = ms;
98 }
99
100 insert_callout(co);
101 if (desc && co == TAILQ_FIRST(&callout_head))
102 schedule_next_timeout();
103
104 cpu_spin_unlock_xrestore(&callout_lock, state);
105 }
106
callout_set_next_timeout(struct callout * co,uint32_t ms)107 void callout_set_next_timeout(struct callout *co, uint32_t ms)
108 {
109 co->period = callout_desc->ms_to_ticks(callout_desc, ms);
110 }
111
callout_service_init(const struct callout_timer_desc * desc)112 void callout_service_init(const struct callout_timer_desc *desc)
113 {
114 struct callout_head tmp_head = TAILQ_HEAD_INITIALIZER(tmp_head);
115 struct callout *co = NULL;
116 uint32_t state = 0;
117 uint64_t now = 0;
118
119 state = cpu_spin_lock_xsave(&callout_lock);
120
121 assert(!callout_desc);
122 assert(is_nexus(desc) && is_unpaged(desc->disable_timeout) &&
123 is_unpaged(desc->set_next_timeout) &&
124 is_unpaged(desc->ms_to_ticks) && is_unpaged(desc->get_now));
125
126 callout_desc = desc;
127 now = desc->get_now(desc);
128
129 TAILQ_CONCAT(&tmp_head, &callout_head, link);
130 while (!TAILQ_EMPTY(&tmp_head)) {
131 co = TAILQ_FIRST(&tmp_head);
132 TAILQ_REMOVE(&tmp_head, co, link);
133
134 /*
135 * Periods set before the timer descriptor are in
136 * milliseconds since the frequency of the timer isn't
137 * available at that point. So update it to ticks now.
138 */
139 co->period = desc->ms_to_ticks(desc, co->period);
140 co->expiry_value = now + co->period;
141 insert_callout(co);
142 }
143 schedule_next_timeout();
144
145 cpu_spin_unlock_xrestore(&callout_lock, state);
146 }
147
callout_service_cb(void)148 void callout_service_cb(void)
149 {
150 const struct callout_timer_desc *desc = callout_desc;
151 struct callout *co = NULL;
152 uint64_t now = 0;
153
154 if (desc->is_per_cpu) {
155 bool do_callout = false;
156
157 /*
158 * schedule_next_timeout() saves the core it was last
159 * called on. If there's a mismatch here it means that
160 * another core has been scheduled for the next callout, so
161 * there's no work to be done for this core and we can
162 * disable the timeout on this CPU.
163 */
164 cpu_spin_lock(&callout_sched_lock);
165 do_callout = (get_core_pos() == callout_sched_core);
166 if (!do_callout)
167 desc->disable_timeout(desc);
168 cpu_spin_unlock(&callout_sched_lock);
169 if (!do_callout)
170 return;
171 }
172
173 cpu_spin_lock(&callout_lock);
174
175 now = desc->get_now(desc);
176 while (!TAILQ_EMPTY(&callout_head)) {
177 co = TAILQ_FIRST(&callout_head);
178 if (co->expiry_value > now)
179 break;
180
181 TAILQ_REMOVE(&callout_head, co, link);
182
183 if (co->callback(co)) {
184 co->expiry_value += co->period;
185 insert_callout(co);
186 }
187 }
188 schedule_next_timeout();
189
190 cpu_spin_unlock(&callout_lock);
191 }
192