xref: /optee_os/core/drivers/plic.c (revision 4edd96e6d7a7228e907cf498b23e5b5fbdaf39a0)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 NXP
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <drivers/plic.h>
9 #include <io.h>
10 #include <kernel/dt.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <mm/core_memprot.h>
14 #include <mm/core_mmu.h>
15 #include <trace.h>
16 
17 #define PLIC_PRIORITY_OFFSET		0
18 #define PLIC_PENDING_OFFSET		0x1000
19 #define PLIC_ENABLE_OFFSET		0x2000
20 #define PLIC_THRESHOLD_OFFSET		0x200000
21 #define PLIC_CLAIM_OFFSET		0x200004
22 
23 #define PLIC_PRIORITY_SHIFT_PER_SOURCE	U(2)
24 #define PLIC_PENDING_SHIFT_PER_SOURCE	U(0)
25 
26 #define PLIC_ENABLE_SHIFT_PER_TARGET	U(7)
27 #define PLIC_THRESHOLD_SHIFT_PER_TARGET	U(12)
28 #define PLIC_CLAIM_SHIFT_PER_TARGET	U(12)
29 
30 #define PLIC_PRIORITY(base, source) \
31 		((base) + PLIC_PRIORITY_OFFSET + \
32 		SHIFT_U32(source, PLIC_PRIORITY_SHIFT_PER_SOURCE) \
33 	)
34 #define PLIC_PENDING(base, source) \
35 		((base) + PLIC_PENDING_OFFSET + \
36 		(4 * ((source) / 32)) \
37 	)
38 #define PLIC_ENABLE(base, source, context) \
39 		((base) + PLIC_ENABLE_OFFSET + \
40 		SHIFT_U32(context, PLIC_ENABLE_SHIFT_PER_TARGET) +\
41 		(4 * ((source) / 32)) \
42 	)
43 #define PLIC_THRESHOLD(base, context) \
44 		((base) + PLIC_THRESHOLD_OFFSET + \
45 		SHIFT_U32(context, PLIC_THRESHOLD_SHIFT_PER_TARGET) \
46 	)
47 #define PLIC_COMPLETE(base, context) \
48 		((base) + PLIC_CLAIM_OFFSET + \
49 		SHIFT_U32(context, PLIC_CLAIM_SHIFT_PER_TARGET) \
50 	)
51 #define PLIC_CLAIM(base, context) PLIC_COMPLETE(base, context)
52 
53 register_phys_mem_pgdir(MEM_AREA_IO_SEC, PLIC_BASE, PLIC_REG_SIZE);
54 
55 /*
56  * We assume that each hart has M-mode and S-mode, so the contexts look like:
57  * PLIC context 0 is hart 0 M-mode
58  * PLIC context 1 is hart 0 S-mode
59  * PLIC context 2 is hart 1 M-mode
60  * PLIC context 3 is hart 1 S-mode
61  * ...
62  */
63 static uint32_t plic_get_context(void)
64 {
65 	size_t hartid = get_core_pos();
66 	bool smode = IS_ENABLED(CFG_RISCV_S_MODE) ? true : false;
67 
68 	return hartid * 2 + smode;
69 }
70 
71 static bool __maybe_unused
72 plic_is_pending(struct plic_data *pd, uint32_t source)
73 {
74 	return io_read32(PLIC_PENDING(pd->plic_base, source)) &
75 	       BIT(source % 32);
76 }
77 
78 static void plic_set_pending(struct plic_data *pd, uint32_t source)
79 {
80 	io_setbits32(PLIC_PENDING(pd->plic_base, source), BIT(source % 32));
81 }
82 
83 static void plic_enable_interrupt(struct plic_data *pd, uint32_t source)
84 {
85 	uint32_t context = plic_get_context();
86 
87 	io_setbits32(PLIC_ENABLE(pd->plic_base, source, context),
88 		     BIT(source & 0x1f));
89 }
90 
91 static uint32_t __maybe_unused
92 plic_get_interrupt_enable(struct plic_data *pd, uint32_t source)
93 {
94 	uint32_t context = plic_get_context();
95 
96 	return io_read32(PLIC_ENABLE(pd->plic_base, source, context)) &
97 	       BIT(source & 0x1f);
98 }
99 
100 static void plic_disable_interrupt(struct plic_data *pd, uint32_t source)
101 {
102 	uint32_t context = plic_get_context();
103 
104 	io_clrbits32(PLIC_ENABLE(pd->plic_base, source, context),
105 		     BIT(source & 0x1f));
106 }
107 
108 static uint32_t __maybe_unused plic_get_threshold(struct plic_data *pd)
109 {
110 	uint32_t context = plic_get_context();
111 
112 	return io_read32(PLIC_THRESHOLD(pd->plic_base, context));
113 }
114 
115 static void plic_set_threshold(struct plic_data *pd, uint32_t threshold)
116 {
117 	uint32_t context = plic_get_context();
118 
119 	io_write32(PLIC_THRESHOLD(pd->plic_base, context), threshold);
120 }
121 
122 static uint32_t __maybe_unused
123 plic_get_priority(struct plic_data *pd, uint32_t source)
124 {
125 	return io_read32(PLIC_PRIORITY(pd->plic_base, source));
126 }
127 
128 static void plic_set_priority(struct plic_data *pd, uint32_t source,
129 			      uint32_t priority)
130 {
131 	io_write32(PLIC_PRIORITY(pd->plic_base, source), priority);
132 }
133 
134 static uint32_t plic_claim_interrupt(struct plic_data *pd)
135 {
136 	uint32_t context = plic_get_context();
137 
138 	return io_read32(PLIC_CLAIM(pd->plic_base, context));
139 }
140 
141 static void plic_complete_interrupt(struct plic_data *pd, uint32_t source)
142 {
143 	uint32_t context = plic_get_context();
144 
145 	io_write32(PLIC_CLAIM(pd->plic_base, context), source);
146 }
147 
148 static void plic_op_add(struct itr_chip *chip, size_t it,
149 			uint32_t type __unused,
150 			uint32_t prio)
151 {
152 	struct plic_data *pd = container_of(chip, struct plic_data, chip);
153 
154 	if (it > pd->max_it)
155 		panic();
156 
157 	plic_disable_interrupt(pd, it);
158 	plic_set_priority(pd, it, prio);
159 }
160 
161 static void plic_op_enable(struct itr_chip *chip, size_t it)
162 {
163 	struct plic_data *pd = container_of(chip, struct plic_data, chip);
164 
165 	if (it > pd->max_it)
166 		panic();
167 
168 	plic_enable_interrupt(pd, it);
169 }
170 
171 static void plic_op_disable(struct itr_chip *chip, size_t it)
172 {
173 	struct plic_data *pd = container_of(chip, struct plic_data, chip);
174 
175 	if (it > pd->max_it)
176 		panic();
177 
178 	plic_disable_interrupt(pd, it);
179 }
180 
181 static void plic_op_raise_pi(struct itr_chip *chip, size_t it)
182 {
183 	struct plic_data *pd = container_of(chip, struct plic_data, chip);
184 
185 	if (it > pd->max_it)
186 		panic();
187 
188 	plic_set_pending(pd, it);
189 }
190 
191 static void plic_op_raise_sgi(struct itr_chip *chip __unused,
192 			      size_t it __unused, uint8_t cpu_mask __unused)
193 {
194 }
195 
196 static void plic_op_set_affinity(struct itr_chip *chip __unused,
197 				 size_t it __unused, uint8_t cpu_mask __unused)
198 {
199 }
200 
201 static int plic_dt_get_irq(const uint32_t *properties __unused,
202 			   int count __unused, uint32_t *type __unused,
203 			   uint32_t *prio __unused)
204 {
205 	return DT_INFO_INVALID_INTERRUPT;
206 }
207 
208 static size_t probe_max_it(vaddr_t plic_base __unused)
209 {
210 	return PLIC_NUM_SOURCES;
211 }
212 
213 static const struct itr_ops plic_ops = {
214 	.add = plic_op_add,
215 	.enable = plic_op_enable,
216 	.disable = plic_op_disable,
217 	.raise_pi = plic_op_raise_pi,
218 	.raise_sgi = plic_op_raise_sgi,
219 	.set_affinity = plic_op_set_affinity,
220 };
221 
222 void plic_init_base_addr(struct plic_data *pd, paddr_t plic_base_pa)
223 {
224 	vaddr_t plic_base = 0;
225 
226 	assert(cpu_mmu_enabled());
227 
228 	plic_base = core_mmu_get_va(plic_base_pa, MEM_AREA_IO_SEC,
229 				    PLIC_REG_SIZE);
230 	if (!plic_base)
231 		panic();
232 
233 	pd->plic_base = plic_base;
234 	pd->max_it = probe_max_it(plic_base);
235 	pd->chip.ops = &plic_ops;
236 
237 	if (IS_ENABLED(CFG_DT))
238 		pd->chip.dt_get_irq = plic_dt_get_irq;
239 }
240 
241 void plic_hart_init(struct plic_data *pd __unused)
242 {
243 	/* TODO: To be called by secondary harts */
244 }
245 
246 void plic_init(struct plic_data *pd, paddr_t plic_base_pa)
247 {
248 	size_t n = 0;
249 
250 	plic_init_base_addr(pd, plic_base_pa);
251 
252 	for (n = 0; n <= pd->max_it; n++) {
253 		plic_disable_interrupt(pd, n);
254 		plic_set_priority(pd, n, 1);
255 	}
256 
257 	plic_set_threshold(pd, 0);
258 }
259 
260 void plic_it_handle(struct plic_data *pd)
261 {
262 	uint32_t id = plic_claim_interrupt(pd);
263 
264 	if (id <= pd->max_it)
265 		itr_handle(id);
266 	else
267 		DMSG("ignoring interrupt %" PRIu32, id);
268 
269 	plic_complete_interrupt(pd, id);
270 }
271