xref: /optee_os/core/drivers/plic.c (revision 45fecab081173ef58b1cb14b6ddf6892b0b9d3f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 NXP
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <drivers/plic.h>
9 #include <io.h>
10 #include <kernel/dt.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <mm/core_memprot.h>
14 #include <mm/core_mmu.h>
15 #include <trace.h>
16 
17 #define PLIC_PRIORITY_OFFSET		0
18 #define PLIC_PENDING_OFFSET		0x1000
19 #define PLIC_ENABLE_OFFSET		0x2000
20 #define PLIC_THRESHOLD_OFFSET		0x200000
21 #define PLIC_CLAIM_OFFSET		0x200004
22 
23 #define PLIC_PRIORITY_SHIFT_PER_SOURCE	U(2)
24 #define PLIC_PENDING_SHIFT_PER_SOURCE	U(0)
25 
26 #define PLIC_ENABLE_SHIFT_PER_TARGET	U(7)
27 #define PLIC_THRESHOLD_SHIFT_PER_TARGET	U(12)
28 #define PLIC_CLAIM_SHIFT_PER_TARGET	U(12)
29 
30 #define PLIC_PRIORITY(base, source) \
31 		((base) + PLIC_PRIORITY_OFFSET + \
32 		SHIFT_U32(source, PLIC_PRIORITY_SHIFT_PER_SOURCE) \
33 	)
34 #define PLIC_PENDING(base, source) \
35 		((base) + PLIC_PENDING_OFFSET + \
36 		(4 * ((source) / 32)) \
37 	)
38 #define PLIC_ENABLE(base, source, context) \
39 		((base) + PLIC_ENABLE_OFFSET + \
40 		SHIFT_U32(context, PLIC_ENABLE_SHIFT_PER_TARGET) +\
41 		(4 * ((source) / 32)) \
42 	)
43 #define PLIC_THRESHOLD(base, context) \
44 		((base) + PLIC_THRESHOLD_OFFSET + \
45 		SHIFT_U32(context, PLIC_THRESHOLD_SHIFT_PER_TARGET) \
46 	)
47 #define PLIC_COMPLETE(base, context) \
48 		((base) + PLIC_CLAIM_OFFSET + \
49 		SHIFT_U32(context, PLIC_CLAIM_SHIFT_PER_TARGET) \
50 	)
51 #define PLIC_CLAIM(base, context) PLIC_COMPLETE(base, context)
52 
53 register_phys_mem_pgdir(MEM_AREA_IO_SEC, PLIC_BASE, PLIC_REG_SIZE);
54 
55 struct plic_data {
56 	vaddr_t plic_base;
57 	size_t max_it;
58 	struct itr_chip chip;
59 };
60 
61 static struct plic_data plic_data __nex_bss;
62 
63 /*
64  * We assume that each hart has M-mode and S-mode, so the contexts look like:
65  * PLIC context 0 is hart 0 M-mode
66  * PLIC context 1 is hart 0 S-mode
67  * PLIC context 2 is hart 1 M-mode
68  * PLIC context 3 is hart 1 S-mode
69  * ...
70  */
71 static uint32_t plic_get_context(void)
72 {
73 	size_t hartid = get_core_pos();
74 	bool smode = IS_ENABLED(CFG_RISCV_S_MODE) ? true : false;
75 
76 	return hartid * 2 + smode;
77 }
78 
79 static bool __maybe_unused
80 plic_is_pending(struct plic_data *pd, uint32_t source)
81 {
82 	return io_read32(PLIC_PENDING(pd->plic_base, source)) &
83 	       BIT(source % 32);
84 }
85 
86 static void plic_set_pending(struct plic_data *pd, uint32_t source)
87 {
88 	io_setbits32(PLIC_PENDING(pd->plic_base, source), BIT(source % 32));
89 }
90 
91 static void plic_enable_interrupt(struct plic_data *pd, uint32_t source)
92 {
93 	uint32_t context = plic_get_context();
94 
95 	io_setbits32(PLIC_ENABLE(pd->plic_base, source, context),
96 		     BIT(source & 0x1f));
97 }
98 
99 static uint32_t __maybe_unused
100 plic_get_interrupt_enable(struct plic_data *pd, uint32_t source)
101 {
102 	uint32_t context = plic_get_context();
103 
104 	return io_read32(PLIC_ENABLE(pd->plic_base, source, context)) &
105 	       BIT(source & 0x1f);
106 }
107 
108 static void plic_disable_interrupt(struct plic_data *pd, uint32_t source)
109 {
110 	uint32_t context = plic_get_context();
111 
112 	io_clrbits32(PLIC_ENABLE(pd->plic_base, source, context),
113 		     BIT(source & 0x1f));
114 }
115 
116 static uint32_t __maybe_unused plic_get_threshold(struct plic_data *pd)
117 {
118 	uint32_t context = plic_get_context();
119 
120 	return io_read32(PLIC_THRESHOLD(pd->plic_base, context));
121 }
122 
123 static void plic_set_threshold(struct plic_data *pd, uint32_t threshold)
124 {
125 	uint32_t context = plic_get_context();
126 
127 	io_write32(PLIC_THRESHOLD(pd->plic_base, context), threshold);
128 }
129 
130 static uint32_t __maybe_unused
131 plic_get_priority(struct plic_data *pd, uint32_t source)
132 {
133 	return io_read32(PLIC_PRIORITY(pd->plic_base, source));
134 }
135 
136 static void plic_set_priority(struct plic_data *pd, uint32_t source,
137 			      uint32_t priority)
138 {
139 	io_write32(PLIC_PRIORITY(pd->plic_base, source), priority);
140 }
141 
142 static uint32_t plic_claim_interrupt(struct plic_data *pd)
143 {
144 	uint32_t context = plic_get_context();
145 
146 	return io_read32(PLIC_CLAIM(pd->plic_base, context));
147 }
148 
149 static void plic_complete_interrupt(struct plic_data *pd, uint32_t source)
150 {
151 	uint32_t context = plic_get_context();
152 
153 	io_write32(PLIC_CLAIM(pd->plic_base, context), source);
154 }
155 
156 static void plic_op_configure(struct itr_chip *chip, size_t it,
157 			      uint32_t type __unused, uint32_t prio)
158 {
159 	struct plic_data *pd = container_of(chip, struct plic_data, chip);
160 
161 	if (it > pd->max_it)
162 		panic();
163 
164 	plic_disable_interrupt(pd, it);
165 	plic_set_priority(pd, it, prio);
166 }
167 
168 static void plic_op_enable(struct itr_chip *chip, size_t it)
169 {
170 	struct plic_data *pd = container_of(chip, struct plic_data, chip);
171 
172 	if (it > pd->max_it)
173 		panic();
174 
175 	plic_enable_interrupt(pd, it);
176 }
177 
178 static void plic_op_disable(struct itr_chip *chip, size_t it)
179 {
180 	struct plic_data *pd = container_of(chip, struct plic_data, chip);
181 
182 	if (it > pd->max_it)
183 		panic();
184 
185 	plic_disable_interrupt(pd, it);
186 }
187 
188 static void plic_op_raise_pi(struct itr_chip *chip, size_t it)
189 {
190 	struct plic_data *pd = container_of(chip, struct plic_data, chip);
191 
192 	if (it > pd->max_it)
193 		panic();
194 
195 	plic_set_pending(pd, it);
196 }
197 
198 static void plic_op_raise_sgi(struct itr_chip *chip __unused,
199 			      size_t it __unused, uint32_t cpu_mask __unused)
200 {
201 }
202 
203 static void plic_op_set_affinity(struct itr_chip *chip __unused,
204 				 size_t it __unused, uint8_t cpu_mask __unused)
205 {
206 }
207 
208 static int plic_dt_get_irq(const uint32_t *properties __unused,
209 			   int count __unused, uint32_t *type __unused,
210 			   uint32_t *prio __unused)
211 {
212 	return DT_INFO_INVALID_INTERRUPT;
213 }
214 
215 static size_t probe_max_it(vaddr_t plic_base __unused)
216 {
217 	return PLIC_NUM_SOURCES;
218 }
219 
220 static const struct itr_ops plic_ops = {
221 	.configure = plic_op_configure,
222 	.mask = plic_op_disable,
223 	.unmask = plic_op_enable,
224 	.enable = plic_op_enable,
225 	.disable = plic_op_disable,
226 	.raise_pi = plic_op_raise_pi,
227 	.raise_sgi = plic_op_raise_sgi,
228 	.set_affinity = plic_op_set_affinity,
229 };
230 
231 static void plic_init_base_addr(struct plic_data *pd, paddr_t plic_base_pa)
232 {
233 	vaddr_t plic_base = 0;
234 
235 	assert(cpu_mmu_enabled());
236 
237 	plic_base = core_mmu_get_va(plic_base_pa, MEM_AREA_IO_SEC,
238 				    PLIC_REG_SIZE);
239 	if (!plic_base)
240 		panic();
241 
242 	pd->plic_base = plic_base;
243 	pd->max_it = probe_max_it(plic_base);
244 	pd->chip.ops = &plic_ops;
245 
246 	if (IS_ENABLED(CFG_DT))
247 		pd->chip.dt_get_irq = plic_dt_get_irq;
248 }
249 
250 void plic_hart_init(void)
251 {
252 	/* TODO: To be called by secondary harts */
253 }
254 
255 void plic_init(paddr_t plic_base_pa)
256 {
257 	struct plic_data *pd = &plic_data;
258 	size_t n = 0;
259 
260 	plic_init_base_addr(pd, plic_base_pa);
261 
262 	for (n = 0; n <= pd->max_it; n++) {
263 		plic_disable_interrupt(pd, n);
264 		plic_set_priority(pd, n, 1);
265 	}
266 
267 	plic_set_threshold(pd, 0);
268 
269 	interrupt_main_init(&plic_data.chip);
270 }
271 
272 void plic_it_handle(void)
273 {
274 	struct plic_data *pd = &plic_data;
275 	uint32_t id = plic_claim_interrupt(pd);
276 
277 	if (id > 0 && id <= pd->max_it)
278 		interrupt_call_handlers(&pd->chip, id);
279 	else
280 		DMSG("ignoring interrupt %" PRIu32, id);
281 
282 	plic_complete_interrupt(pd, id);
283 }
284