1 /*
2 * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7 #include <errno.h>
8 #include <stdbool.h>
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <string.h>
12
13 #include <common/debug.h>
14 #include <common/runtime_svc.h>
15 #include <context.h>
16 #include <lib/coreboot.h>
17 #include <lib/utils_def.h>
18 #include <lib/xlat_tables/xlat_tables_v2.h>
19 #include <smccc_helpers.h>
20 #include <tools_share/uuid.h>
21
22 #include <drivers/qti/accesscontrol/accesscontrol.h>
23
24 #include <qti_plat.h>
25 #include <qti_secure_io_cfg.h>
26
27 /*
28 * SIP service - SMC function IDs for SiP Service queries
29 *
30 */
31 #define QTI_SIP_SVC_CALL_COUNT_ID U(0x0200ff00)
32 #define QTI_SIP_SVC_UID_ID U(0x0200ff01)
33 /* 0x8200ff02 is reserved*/
34 #define QTI_SIP_SVC_VERSION_ID U(0x0200ff03)
35 #define QTI_SIP_SVC_AVAILABLE_ID U(0x02000601)
36 /*
37 * Syscall's to allow Non Secure world accessing peripheral/IO memory
38 * those are secure/proteced BUT not required to be secure.
39 */
40 #define QTI_SIP_SVC_SECURE_IO_READ_ID U(0x02000501)
41 #define QTI_SIP_SVC_SECURE_IO_WRITE_ID U(0x02000502)
42
43 /*
44 * Syscall's to assigns a list of intermediate PAs from a
45 * source Virtual Machine (VM) to a destination VM.
46 */
47 #define QTI_SIP_SVC_MEM_ASSIGN_ID U(0x02000C16)
48
49 #define QTI_SIP_SVC_SECURE_IO_READ_PARAM_ID U(0x1)
50 #define QTI_SIP_SVC_SECURE_IO_WRITE_PARAM_ID U(0x2)
51 #define QTI_SIP_SVC_MEM_ASSIGN_PARAM_ID U(0x1117)
52
53 #define QTI_SIP_SVC_CALL_COUNT U(0x3)
54 #define QTI_SIP_SVC_VERSION_MAJOR U(0x0)
55 #define QTI_SIP_SVC_VERSION_MINOR U(0x0)
56
57 #define QTI_VM_LAST U(44)
58 #define SIZE4K U(0x1000)
59 #define QTI_VM_MAX_LIST_SIZE U(0x20)
60
61 #define FUNCID_OEN_NUM_MASK ((FUNCID_OEN_MASK << FUNCID_OEN_SHIFT)\
62 |(FUNCID_NUM_MASK << FUNCID_NUM_SHIFT))
63
64 struct qti_mmap_params {
65 qti_accesscontrol_mem_t mem[QTI_VM_MAX_LIST_SIZE];
66 u_register_t mem_cnt;
67 qti_accesscontrol_perm_t dst[QTI_VM_LAST];
68 u_register_t dst_cnt;
69 uint32_t src[QTI_VM_LAST];
70 u_register_t src_cnt;
71 };
72
73 enum {
74 QTI_SIP_SUCCESS = 0,
75 QTI_SIP_NOT_SUPPORTED = -1,
76 QTI_SIP_PREEMPTED = -2,
77 QTI_SIP_INVALID_PARAM = -3,
78 };
79
80 /* QTI SiP Service UUID */
81 DEFINE_SVC_UUID2(qti_sip_svc_uid,
82 0x43864748, 0x217f, 0x41ad, 0xaa, 0x5a,
83 0xba, 0xe7, 0x0f, 0xa5, 0x52, 0xaf);
84
qti_is_secure_io_access_allowed(u_register_t addr)85 static bool qti_is_secure_io_access_allowed(u_register_t addr)
86 {
87 int i = 0;
88
89 for (i = 0; i < ARRAY_SIZE(qti_secure_io_allowed_regs); i++) {
90 if ((uintptr_t) addr == qti_secure_io_allowed_regs[i]) {
91 return true;
92 }
93 }
94
95 return false;
96 }
97
qti_check_syscall_availability(u_register_t smc_fid)98 static bool qti_check_syscall_availability(u_register_t smc_fid)
99 {
100 switch (smc_fid) {
101 case QTI_SIP_SVC_CALL_COUNT_ID:
102 case QTI_SIP_SVC_UID_ID:
103 case QTI_SIP_SVC_VERSION_ID:
104 case QTI_SIP_SVC_AVAILABLE_ID:
105 case QTI_SIP_SVC_SECURE_IO_READ_ID:
106 case QTI_SIP_SVC_SECURE_IO_WRITE_ID:
107 case QTI_SIP_SVC_MEM_ASSIGN_ID:
108 return true;
109 default:
110 return false;
111 }
112 }
113
qti_mem_assign_validate_param(qti_accesscontrol_mem_t * mem_info,u_register_t u_num_mappings,uint32_t * src_vm_list,u_register_t src_vm_list_cnt,qti_accesscontrol_perm_t * dst_vm_list,u_register_t dst_vm_list_cnt)114 static bool qti_mem_assign_validate_param(qti_accesscontrol_mem_t *mem_info,
115 u_register_t u_num_mappings,
116 uint32_t *src_vm_list,
117 u_register_t src_vm_list_cnt,
118 qti_accesscontrol_perm_t *dst_vm_list,
119 u_register_t dst_vm_list_cnt)
120 {
121 u_register_t end;
122 int i;
123
124 if ((src_vm_list == NULL) || (dst_vm_list == NULL)
125 || (mem_info == NULL) || (src_vm_list_cnt == 0)
126 || (src_vm_list_cnt >= QTI_VM_LAST) || (dst_vm_list_cnt == 0)
127 || (dst_vm_list_cnt >= QTI_VM_LAST) || (u_num_mappings == 0)
128 || u_num_mappings > QTI_VM_MAX_LIST_SIZE) {
129 ERROR("vm count is 0 or more then QTI_VM_LAST or empty list\n");
130 ERROR("src_vm_list %p dst_vm_list %p mem_info %p src_vm_list_cnt %u dst_vm_list_cnt %u u_num_mappings %u\n",
131 src_vm_list, dst_vm_list, mem_info,
132 (unsigned int)src_vm_list_cnt,
133 (unsigned int)dst_vm_list_cnt,
134 (unsigned int)u_num_mappings);
135 return false;
136 }
137
138 for (i = 0; i < u_num_mappings; i++) {
139 if (((mem_info[i].mem_addr & (SIZE4K - 1)) != 0)
140 || (mem_info[i].mem_size == 0)
141 || ((mem_info[i].mem_size & (SIZE4K - 1)) != 0)) {
142 ERROR("mem_info passed buffer 0x%x or size 0x%x is not 4k aligned\n",
143 (unsigned int)mem_info[i].mem_addr,
144 (unsigned int)mem_info[i].mem_size);
145 return false;
146 }
147
148 if (add_overflow(mem_info[i].mem_addr, mem_info[i].mem_size,
149 &end) != 0) {
150 ERROR("overflow in mem_addr 0x%x add mem_size 0x%x\n",
151 (unsigned int)mem_info[i].mem_addr,
152 (unsigned int)mem_info[i].mem_size);
153 return false;
154 }
155
156 #if COREBOOT == 1
157 coreboot_memory_t mem_type = coreboot_get_memory_type(
158 mem_info[i].mem_addr,
159 mem_info[i].mem_size);
160 if (mem_type != CB_MEM_RAM && mem_type != CB_MEM_RESERVED) {
161 ERROR("memory region not in CB MEM RAM or RESERVED area: region start 0x%x size 0x%x\n",
162 (unsigned int)mem_info[i].mem_addr,
163 (unsigned int)mem_info[i].mem_size);
164 return false;
165 }
166 #endif
167 }
168 for (i = 0; i < src_vm_list_cnt; i++) {
169 if (src_vm_list[i] >= QTI_VM_LAST) {
170 ERROR("src_vm_list[%d] 0x%x is more then QTI_VM_LAST\n",
171 i, (unsigned int)src_vm_list[i]);
172 return false;
173 }
174 }
175 for (i = 0; i < dst_vm_list_cnt; i++) {
176 if (dst_vm_list[i].dst_vm >= QTI_VM_LAST) {
177 ERROR("dst_vm_list[%d] 0x%x is more then QTI_VM_LAST\n",
178 i, (unsigned int)dst_vm_list[i].dst_vm);
179 return false;
180 }
181 }
182 return true;
183 }
184
get_indirect_args(uint32_t smccc,u_register_t * x5,u_register_t * x6,u_register_t * x7)185 static int get_indirect_args(uint32_t smccc, u_register_t *x5, u_register_t *x6,
186 u_register_t *x7)
187 {
188 const uintptr_t addr = (uintptr_t)*x5;
189 int ret = -EPERM;
190 size_t len = 0;
191
192 if (smccc == SMC_32) {
193 len = sizeof(uint32_t) * 4;
194 } else {
195 len = sizeof(uint64_t) * 4;
196 }
197
198 ret = qti_mmap_add_dynamic_region(addr, len, MT_NS | MT_RO_DATA);
199 if (ret != 0) {
200 ERROR("map failed for params NS Buffer 0x%lx 0x%lx\n",
201 (unsigned long)addr, (unsigned long)len);
202 return ret;
203 }
204
205 if (smccc == SMC_32) {
206 const uint32_t *args = (const uint32_t *)addr;
207
208 *x5 = args[0];
209 *x6 = args[1];
210 *x7 = args[2];
211 } else {
212 const uint64_t *args = (const uint64_t *)addr;
213
214 *x5 = args[0];
215 *x6 = args[1];
216 *x7 = args[2];
217 }
218
219 ret = qti_mmap_remove_dynamic_region(addr, len);
220 if (ret != 0) {
221 ERROR("unmap failed for params NS Buffer 0x%lx 0x%lx\n",
222 (unsigned long)addr, (unsigned long)len);
223 }
224
225 return ret;
226 }
227
get_mem_params(struct qti_mmap_params * params,u_register_t x2,u_register_t x3,u_register_t x4,u_register_t x5,u_register_t x6,u_register_t x7)228 static int get_mem_params(struct qti_mmap_params *params,
229 u_register_t x2, u_register_t x3, u_register_t x4,
230 u_register_t x5, u_register_t x6, u_register_t x7)
231 {
232 u_register_t e2, e4, e6;
233 u_register_t start = 0;
234 u_register_t end = 0;
235 u_register_t len = 0;
236 bool rc = false;
237 int ret = -EINVAL;
238 int ret1 = -EINVAL;
239
240 /* Overflow check:
241 * args 2,4,6 contain buffer addresses
242 * args 3,5,7 contain buffer sizes
243 */
244 if (x2 == 0 || x4 == 0 || x6 == 0) {
245 return -EINVAL;
246 }
247
248 if (add_overflow(x2, x3, &e2) != 0 ||
249 add_overflow(x4, x5, &e4) != 0 ||
250 add_overflow(x6, x7, &e6) != 0) {
251 ERROR("map failed for params NS Buffer2, invalid params\n");
252 return -EINVAL;
253 }
254
255 start = MIN(x2, x4);
256 start = MIN(start, x6);
257 end = MAX(e2, e4);
258 end = MAX(end, e6);
259 len = end - start;
260
261 ret = qti_mmap_add_dynamic_region((uintptr_t)start, (size_t)len,
262 (MT_NS | MT_RO_DATA));
263 if (ret != 0) {
264 ERROR("map failed for params NS Buffer2 0x%lx 0x%lx\n",
265 (unsigned long)start, (unsigned long)len);
266 return ret;
267 }
268
269 /* Parameter validation */
270 ret = -EINVAL;
271
272 if ((x3 % sizeof(qti_accesscontrol_mem_t)) != 0U ||
273 (x5 % sizeof(uint32_t)) != 0U ||
274 (x7 % sizeof(qti_accesscontrol_perm_t)) != 0U) {
275 ERROR("invalid parameter buffer sizes\n");
276 goto error;
277 }
278
279 params->mem_cnt = x3 / sizeof(params->mem[0]);
280 if (params->mem_cnt > ARRAY_SIZE(params->mem)) {
281 ERROR("Param validation failed\n");
282 goto error;
283 }
284 memcpy(params->mem, (void *)(uintptr_t)x2,
285 params->mem_cnt * sizeof(params->mem[0]));
286
287 params->src_cnt = x5 / sizeof(params->src[0]);
288 if (params->src_cnt >= ARRAY_SIZE(params->src)) {
289 ERROR("Param validation failed\n");
290 goto error;
291 }
292 memcpy(params->src, (void *)(uintptr_t)x4,
293 params->src_cnt * sizeof(params->src[0]));
294
295 params->dst_cnt = x7 / sizeof(params->dst[0]);
296 if (params->dst_cnt >= ARRAY_SIZE(params->dst)) {
297 ERROR("Param validation failed\n");
298 goto error;
299 }
300 memcpy(params->dst, (void *)(uintptr_t)x6,
301 params->dst_cnt * sizeof(params->dst[0]));
302
303 rc = qti_mem_assign_validate_param(params->mem, params->mem_cnt,
304 params->src, params->src_cnt,
305 params->dst, params->dst_cnt);
306 if (rc != true) {
307 ERROR("Param validation failed\n");
308 }
309
310 ret = rc == true ? 0 : -EINVAL;
311 error:
312 ret1 = qti_mmap_remove_dynamic_region((uintptr_t)start, (size_t)len);
313 if (ret1 != 0) {
314 ERROR("unmap failed for params NS Buffer 0x%lx 0x%lx\n",
315 (unsigned long)start, (unsigned long)len);
316 }
317
318 return ret ? ret : ret1;
319 }
320
321
qti_sip_mem_assign(void * handle,uint32_t smc_cc,u_register_t x1,u_register_t x2,u_register_t x3,u_register_t x4)322 static uintptr_t qti_sip_mem_assign(void *handle, uint32_t smc_cc,
323 u_register_t x1,
324 u_register_t x2,
325 u_register_t x3, u_register_t x4)
326 {
327 struct qti_mmap_params params = { 0 };
328 int ret = QTI_SIP_NOT_SUPPORTED;
329 u_register_t x5, x6, x7;
330
331 if (x1 != QTI_SIP_SVC_MEM_ASSIGN_PARAM_ID) {
332 ERROR("invalid mem_assign param id\n");
333 goto out;
334 }
335
336 x5 = read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X5);
337 if (x5 == 0x0) {
338 ERROR("no mem_assign mapping info\n");
339 goto out;
340 }
341
342 if (smc_cc == SMC_32) {
343 x5 = (uint32_t)x5;
344 }
345
346 ret = get_indirect_args(smc_cc, &x5, &x6, &x7);
347 if (ret != 0)
348 goto out;
349
350 ret = get_mem_params(¶ms, x2, x3, x4, x5, x6, x7);
351 if (ret != 0)
352 goto out;
353
354 ret = qti_accesscontrol_mem_assign(params.mem, params.mem_cnt,
355 params.src, params.src_cnt,
356 params.dst, params.dst_cnt);
357 out:
358 SMC_RET2(handle, ret == 0 ? QTI_SIP_SUCCESS : QTI_SIP_INVALID_PARAM,
359 ret);
360 }
361
362 /*
363 * This function handles QTI specific syscalls. Currently only SiP calls are present.
364 * Both FAST & YIELD type call land here.
365 */
qti_sip_handler(uint32_t smc_fid,u_register_t x1,u_register_t x2,u_register_t x3,u_register_t x4,void * cookie,void * handle,u_register_t flags)366 static uintptr_t qti_sip_handler(uint32_t smc_fid,
367 u_register_t x1,
368 u_register_t x2,
369 u_register_t x3,
370 u_register_t x4,
371 void *cookie, void *handle, u_register_t flags)
372 {
373 uint32_t l_smc_fid = smc_fid & FUNCID_OEN_NUM_MASK;
374
375 if (GET_SMC_CC(smc_fid) == SMC_32) {
376 x1 = (uint32_t) x1;
377 x2 = (uint32_t) x2;
378 x3 = (uint32_t) x3;
379 x4 = (uint32_t) x4;
380 }
381
382 switch (l_smc_fid) {
383 case QTI_SIP_SVC_CALL_COUNT_ID:
384 {
385 SMC_RET1(handle, QTI_SIP_SVC_CALL_COUNT);
386 break;
387 }
388 case QTI_SIP_SVC_UID_ID:
389 {
390 /* Return UID to the caller */
391 SMC_UUID_RET(handle, qti_sip_svc_uid);
392 break;
393 }
394 case QTI_SIP_SVC_VERSION_ID:
395 {
396 /* Return the version of current implementation */
397 SMC_RET2(handle, QTI_SIP_SVC_VERSION_MAJOR,
398 QTI_SIP_SVC_VERSION_MINOR);
399 break;
400 }
401 case QTI_SIP_SVC_AVAILABLE_ID:
402 {
403 if (x1 != 1) {
404 SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
405 }
406 if (qti_check_syscall_availability(x2) == true) {
407 SMC_RET2(handle, QTI_SIP_SUCCESS, 1);
408 } else {
409 SMC_RET2(handle, QTI_SIP_SUCCESS, 0);
410 }
411 break;
412 }
413 case QTI_SIP_SVC_SECURE_IO_READ_ID:
414 {
415 if ((x1 == QTI_SIP_SVC_SECURE_IO_READ_PARAM_ID) &&
416 qti_is_secure_io_access_allowed(x2)) {
417 SMC_RET2(handle, QTI_SIP_SUCCESS,
418 *((volatile uint32_t *)x2));
419 }
420 SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
421 break;
422 }
423 case QTI_SIP_SVC_SECURE_IO_WRITE_ID:
424 {
425 if ((x1 == QTI_SIP_SVC_SECURE_IO_WRITE_PARAM_ID) &&
426 qti_is_secure_io_access_allowed(x2)) {
427 *((volatile uint32_t *)x2) = x3;
428 SMC_RET1(handle, QTI_SIP_SUCCESS);
429 }
430 SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
431 break;
432 }
433 case QTI_SIP_SVC_MEM_ASSIGN_ID:
434 {
435 return qti_sip_mem_assign(handle, GET_SMC_CC(smc_fid),
436 x1, x2, x3, x4);
437 break;
438 }
439 default:
440 {
441 SMC_RET1(handle, QTI_SIP_NOT_SUPPORTED);
442 }
443 }
444 return (uintptr_t) handle;
445 }
446
447 /* Define a runtime service descriptor for both fast & yield SiP calls */
448 DECLARE_RT_SVC(qti_sip_fast_svc, OEN_SIP_START,
449 OEN_SIP_END, SMC_TYPE_FAST, NULL, qti_sip_handler);
450
451 DECLARE_RT_SVC(qti_sip_yield_svc, OEN_SIP_START,
452 OEN_SIP_END, SMC_TYPE_YIELD, NULL, qti_sip_handler);
453