1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016-2022, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * Copyright (c) 2020-2021, Arm Limited
6 */
7
8 #include <platform_config.h>
9
10 #include <arm.h>
11 #include <assert.h>
12 #include <config.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/linker.h>
19 #include <kernel/lockdep.h>
20 #include <kernel/misc.h>
21 #include <kernel/panic.h>
22 #include <kernel/spinlock.h>
23 #include <kernel/spmc_sp_handler.h>
24 #include <kernel/tee_ta_manager.h>
25 #include <kernel/thread.h>
26 #include <kernel/thread_private.h>
27 #include <kernel/user_access.h>
28 #include <kernel/user_mode_ctx_struct.h>
29 #include <kernel/virtualization.h>
30 #include <mm/core_memprot.h>
31 #include <mm/mobj.h>
32 #include <mm/tee_mm.h>
33 #include <mm/tee_pager.h>
34 #include <smccc.h>
35 #include <sm/sm.h>
36 #include <trace.h>
37 #include <util.h>
38
39 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
40 static vaddr_t thread_user_kcode_va __nex_bss;
41 long thread_user_kcode_offset __nex_bss;
42 static size_t thread_user_kcode_size __nex_bss;
43 #endif
44
45 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
46 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
47 long thread_user_kdata_sp_offset __nex_bss;
48 static uint8_t thread_user_kdata_page[
49 ROUNDUP(sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE,
50 SMALL_PAGE_SIZE)]
51 __aligned(SMALL_PAGE_SIZE)
52 #ifndef CFG_NS_VIRTUALIZATION
53 __section(".nozi.kdata_page");
54 #else
55 __section(".nex_nozi.kdata_page");
56 #endif
57 #endif
58
59 #ifdef ARM32
thread_get_exceptions(void)60 uint32_t __nostackcheck thread_get_exceptions(void)
61 {
62 uint32_t cpsr = read_cpsr();
63
64 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL;
65 }
66
thread_set_exceptions(uint32_t exceptions)67 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
68 {
69 uint32_t cpsr = read_cpsr();
70
71 /* Foreign interrupts must not be unmasked while holding a spinlock */
72 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
73 assert_have_no_spinlock();
74
75 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT);
76 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT);
77
78 barrier();
79 write_cpsr(cpsr);
80 barrier();
81 }
82 #endif /*ARM32*/
83
84 #ifdef ARM64
thread_get_exceptions(void)85 uint32_t __nostackcheck thread_get_exceptions(void)
86 {
87 uint32_t daif = read_daif();
88
89 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL;
90 }
91
thread_set_exceptions(uint32_t exceptions)92 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
93 {
94 uint32_t daif = read_daif();
95
96 /* Foreign interrupts must not be unmasked while holding a spinlock */
97 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
98 assert_have_no_spinlock();
99
100 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT);
101 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT);
102
103 barrier();
104 write_daif(daif);
105 barrier();
106 }
107 #endif /*ARM64*/
108
thread_mask_exceptions(uint32_t exceptions)109 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions)
110 {
111 uint32_t state = thread_get_exceptions();
112
113 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
114 return state;
115 }
116
thread_unmask_exceptions(uint32_t state)117 void __nostackcheck thread_unmask_exceptions(uint32_t state)
118 {
119 thread_set_exceptions(state & THREAD_EXCP_ALL);
120 }
121
thread_lazy_save_ns_vfp(void)122 static void thread_lazy_save_ns_vfp(void)
123 {
124 #ifdef CFG_WITH_VFP
125 struct thread_ctx *thr = threads + thread_get_id();
126
127 thr->vfp_state.ns_saved = false;
128 vfp_lazy_save_state_init(&thr->vfp_state.ns);
129 #endif /*CFG_WITH_VFP*/
130 }
131
thread_lazy_restore_ns_vfp(void)132 static void thread_lazy_restore_ns_vfp(void)
133 {
134 #ifdef CFG_WITH_VFP
135 struct thread_ctx *thr = threads + thread_get_id();
136 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
137
138 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved);
139
140 if (tuv && tuv->lazy_saved && !tuv->saved) {
141 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/);
142 tuv->saved = true;
143 }
144
145 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved);
146 thr->vfp_state.ns_saved = false;
147 #endif /*CFG_WITH_VFP*/
148 }
149
150 #ifdef ARM32
init_regs(struct thread_ctx * thread,uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5,uint32_t a6,uint32_t a7,void * pc)151 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
152 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
153 uint32_t a6, uint32_t a7, void *pc)
154 {
155 thread->regs.pc = (uint32_t)pc;
156
157 /*
158 * Stdcalls starts in SVC mode with masked foreign interrupts, masked
159 * Asynchronous abort and unmasked native interrupts.
160 */
161 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E;
162 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A |
163 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT);
164 /* Enable thumb mode if it's a thumb instruction */
165 if (thread->regs.pc & 1)
166 thread->regs.cpsr |= CPSR_T;
167 /* Reinitialize stack pointer */
168 thread->regs.svc_sp = thread->stack_va_end;
169
170 /*
171 * Copy arguments into context. This will make the
172 * arguments appear in r0-r7 when thread is started.
173 */
174 thread->regs.r0 = a0;
175 thread->regs.r1 = a1;
176 thread->regs.r2 = a2;
177 thread->regs.r3 = a3;
178 thread->regs.r4 = a4;
179 thread->regs.r5 = a5;
180 thread->regs.r6 = a6;
181 thread->regs.r7 = a7;
182 }
183 #endif /*ARM32*/
184
185 #ifdef ARM64
init_regs(struct thread_ctx * thread,uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5,uint32_t a6,uint32_t a7,void * pc)186 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
187 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
188 uint32_t a6, uint32_t a7, void *pc)
189 {
190 thread->regs.pc = (uint64_t)pc;
191
192 /*
193 * Stdcalls starts in SVC mode with masked foreign interrupts, masked
194 * Asynchronous abort and unmasked native interrupts.
195 */
196 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0,
197 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT);
198 /* Reinitialize stack pointer */
199 thread->regs.sp = thread->stack_va_end;
200
201 /*
202 * Copy arguments into context. This will make the
203 * arguments appear in x0-x7 when thread is started.
204 */
205 thread->regs.x[0] = a0;
206 thread->regs.x[1] = a1;
207 thread->regs.x[2] = a2;
208 thread->regs.x[3] = a3;
209 thread->regs.x[4] = a4;
210 thread->regs.x[5] = a5;
211 thread->regs.x[6] = a6;
212 thread->regs.x[7] = a7;
213
214 /* Set up frame pointer as per the Aarch64 AAPCS */
215 thread->regs.x[29] = 0;
216 }
217 #endif /*ARM64*/
218
__thread_alloc_and_run(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5,uint32_t a6,uint32_t a7,void * pc,uint32_t flags)219 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2,
220 uint32_t a3, uint32_t a4, uint32_t a5,
221 uint32_t a6, uint32_t a7,
222 void *pc, uint32_t flags)
223 {
224 struct thread_core_local *l = thread_get_core_local();
225 bool found_thread = false;
226 size_t n = 0;
227
228 assert(l->curr_thread == THREAD_ID_INVALID);
229
230 thread_lock_global();
231
232 for (n = 0; n < CFG_NUM_THREADS; n++) {
233 if (threads[n].state == THREAD_STATE_FREE) {
234 threads[n].state = THREAD_STATE_ACTIVE;
235 found_thread = true;
236 break;
237 }
238 }
239
240 thread_unlock_global();
241
242 if (!found_thread)
243 return;
244
245 l->curr_thread = n;
246
247 threads[n].flags = flags;
248 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc);
249 #ifdef CFG_CORE_PAUTH
250 /*
251 * Copy the APIA key into the registers to be restored with
252 * thread_resume().
253 */
254 threads[n].regs.apiakey_hi = threads[n].keys.apia_hi;
255 threads[n].regs.apiakey_lo = threads[n].keys.apia_lo;
256 #endif
257
258 thread_lazy_save_ns_vfp();
259
260 l->flags &= ~THREAD_CLF_TMP;
261 thread_resume(&threads[n].regs);
262 /*NOTREACHED*/
263 panic();
264 }
265
thread_alloc_and_run(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5)266 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
267 uint32_t a4, uint32_t a5)
268 {
269 __thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0,
270 thread_std_smc_entry, 0);
271 }
272
273 #ifdef CFG_SECURE_PARTITION
thread_sp_alloc_and_run(struct thread_smc_args * args __maybe_unused)274 void thread_sp_alloc_and_run(struct thread_smc_args *args __maybe_unused)
275 {
276 __thread_alloc_and_run(args->a0, args->a1, args->a2, args->a3, args->a4,
277 args->a5, args->a6, args->a7,
278 spmc_sp_thread_entry, THREAD_FLAGS_FFA_ONLY);
279 }
280 #endif
281
282 #ifdef ARM32
copy_a0_to_a3(struct thread_ctx_regs * regs,uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3)283 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
284 uint32_t a1, uint32_t a2, uint32_t a3)
285 {
286 /*
287 * Update returned values from RPC, values will appear in
288 * r0-r3 when thread is resumed.
289 */
290 regs->r0 = a0;
291 regs->r1 = a1;
292 regs->r2 = a2;
293 regs->r3 = a3;
294 }
295 #endif /*ARM32*/
296
297 #ifdef ARM64
copy_a0_to_a3(struct thread_ctx_regs * regs,uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3)298 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
299 uint32_t a1, uint32_t a2, uint32_t a3)
300 {
301 /*
302 * Update returned values from RPC, values will appear in
303 * x0-x3 when thread is resumed.
304 */
305 regs->x[0] = a0;
306 regs->x[1] = a1;
307 regs->x[2] = a2;
308 regs->x[3] = a3;
309 }
310 #endif /*ARM64*/
311
312 #ifdef ARM32
is_from_user(uint32_t cpsr)313 static bool is_from_user(uint32_t cpsr)
314 {
315 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
316 }
317 #endif
318
319 #ifdef ARM64
is_from_user(uint32_t cpsr)320 static bool is_from_user(uint32_t cpsr)
321 {
322 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
323 return true;
324 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
325 SPSR_64_MODE_EL0)
326 return true;
327 return false;
328 }
329 #endif
330
331 #ifdef CFG_SYSCALL_FTRACE
ftrace_suspend(void)332 static void __noprof ftrace_suspend(void)
333 {
334 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
335
336 if (s && s->fbuf)
337 s->fbuf->syscall_trace_suspended = true;
338 }
339
ftrace_resume(void)340 static void __noprof ftrace_resume(void)
341 {
342 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
343
344 if (s && s->fbuf)
345 s->fbuf->syscall_trace_suspended = false;
346 }
347 #else
ftrace_suspend(void)348 static void __noprof ftrace_suspend(void)
349 {
350 }
351
ftrace_resume(void)352 static void __noprof ftrace_resume(void)
353 {
354 }
355 #endif
356
is_user_mode(struct thread_ctx_regs * regs)357 static bool is_user_mode(struct thread_ctx_regs *regs)
358 {
359 return is_from_user((uint32_t)regs->cpsr);
360 }
361
thread_resume_from_rpc(uint32_t thread_id,uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3)362 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
363 uint32_t a2, uint32_t a3)
364 {
365 size_t n = thread_id;
366 struct thread_core_local *l = thread_get_core_local();
367 bool found_thread = false;
368
369 assert(l->curr_thread == THREAD_ID_INVALID);
370
371 thread_lock_global();
372
373 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) {
374 threads[n].state = THREAD_STATE_ACTIVE;
375 found_thread = true;
376 }
377
378 thread_unlock_global();
379
380 if (!found_thread)
381 return;
382
383 l->curr_thread = n;
384
385 if (threads[n].have_user_map) {
386 core_mmu_set_user_map(&threads[n].user_map);
387 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
388 tee_ta_ftrace_update_times_resume();
389 }
390
391 if (is_user_mode(&threads[n].regs))
392 tee_ta_update_session_utime_resume();
393
394 /*
395 * Return from RPC to request service of a foreign interrupt must not
396 * get parameters from non-secure world.
397 */
398 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
399 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3);
400 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
401 }
402
403 thread_lazy_save_ns_vfp();
404
405 if (threads[n].have_user_map)
406 ftrace_resume();
407
408 l->flags &= ~THREAD_CLF_TMP;
409 thread_resume(&threads[n].regs);
410 /*NOTREACHED*/
411 panic();
412 }
413
414 #ifdef ARM64
spsr_from_pstate(void)415 static uint64_t spsr_from_pstate(void)
416 {
417 uint64_t spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0);
418
419 spsr |= read_daif();
420 if (IS_ENABLED(CFG_PAN) && feat_pan_implemented() && read_pan())
421 spsr |= SPSR_64_PAN;
422
423 return spsr;
424 }
425
__thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS])426 void __thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS])
427 {
428 thread_rpc_spsr(rv, spsr_from_pstate());
429 }
430
thread_get_saved_thread_sp(void)431 vaddr_t thread_get_saved_thread_sp(void)
432 {
433 struct thread_core_local *l = thread_get_core_local();
434 int ct = l->curr_thread;
435
436 assert(ct != THREAD_ID_INVALID);
437 return threads[ct].kern_sp;
438 }
439 #endif /*ARM64*/
440
441 #ifdef ARM32
thread_is_in_normal_mode(void)442 bool __noprof thread_is_in_normal_mode(void)
443 {
444 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC;
445 }
446 #endif
447
thread_state_free(void)448 void thread_state_free(void)
449 {
450 struct thread_core_local *l = thread_get_core_local();
451 int ct = l->curr_thread;
452
453 assert(ct != THREAD_ID_INVALID);
454
455 thread_lazy_restore_ns_vfp();
456 tee_pager_release_phys(
457 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE),
458 STACK_THREAD_SIZE);
459
460 thread_lock_global();
461
462 assert(threads[ct].state == THREAD_STATE_ACTIVE);
463 threads[ct].state = THREAD_STATE_FREE;
464 threads[ct].flags = 0;
465 l->curr_thread = THREAD_ID_INVALID;
466
467 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
468 virt_unset_guest();
469 thread_unlock_global();
470 }
471
472 #ifdef CFG_WITH_PAGER
release_unused_kernel_stack(struct thread_ctx * thr,uint32_t cpsr __maybe_unused)473 static void release_unused_kernel_stack(struct thread_ctx *thr,
474 uint32_t cpsr __maybe_unused)
475 {
476 #ifdef ARM64
477 /*
478 * If we're from user mode then thr->regs.sp is the saved user
479 * stack pointer and thr->kern_sp holds the last kernel stack
480 * pointer. But if we're from kernel mode then thr->kern_sp isn't
481 * up to date so we need to read from thr->regs.sp instead.
482 */
483 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp;
484 #else
485 vaddr_t sp = thr->regs.svc_sp;
486 #endif
487 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE;
488 size_t len = sp - base;
489
490 tee_pager_release_phys((void *)base, len);
491 }
492 #else
release_unused_kernel_stack(struct thread_ctx * thr __unused,uint32_t cpsr __unused)493 static void release_unused_kernel_stack(struct thread_ctx *thr __unused,
494 uint32_t cpsr __unused)
495 {
496 }
497 #endif
498
thread_state_suspend(uint32_t flags,uint32_t cpsr,vaddr_t pc)499 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc)
500 {
501 struct thread_core_local *l = thread_get_core_local();
502 int ct = l->curr_thread;
503
504 assert(ct != THREAD_ID_INVALID);
505
506 if (core_mmu_user_mapping_is_active())
507 ftrace_suspend();
508
509 thread_check_canaries();
510
511 release_unused_kernel_stack(threads + ct, cpsr);
512
513 if (is_from_user(cpsr)) {
514 thread_user_save_vfp();
515 tee_ta_update_session_utime_suspend();
516 tee_ta_gprof_sample_pc(pc);
517 }
518 thread_lazy_restore_ns_vfp();
519
520 thread_lock_global();
521
522 assert(threads[ct].state == THREAD_STATE_ACTIVE);
523 threads[ct].flags |= flags;
524 threads[ct].regs.cpsr = cpsr;
525 threads[ct].regs.pc = pc;
526 threads[ct].state = THREAD_STATE_SUSPENDED;
527
528 threads[ct].have_user_map = core_mmu_user_mapping_is_active();
529 if (threads[ct].have_user_map) {
530 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
531 tee_ta_ftrace_update_times_suspend();
532 core_mmu_get_user_map(&threads[ct].user_map);
533 core_mmu_set_user_map(NULL);
534 }
535
536 if (IS_ENABLED(CFG_SECURE_PARTITION)) {
537 struct ts_session *ts_sess =
538 TAILQ_FIRST(&threads[ct].tsd.sess_stack);
539
540 spmc_sp_set_to_preempted(ts_sess);
541 }
542
543 l->curr_thread = THREAD_ID_INVALID;
544
545 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
546 virt_unset_guest();
547
548 thread_unlock_global();
549
550 return ct;
551 }
552
553 static void __maybe_unused
set_core_local_kcode_offset(struct thread_core_local * cls,long offset)554 set_core_local_kcode_offset(struct thread_core_local *cls, long offset)
555 {
556 size_t n = 0;
557
558 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
559 cls[n].kcode_offset = offset;
560 }
561
init_user_kcode(void)562 static void init_user_kcode(void)
563 {
564 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
565 vaddr_t v = (vaddr_t)thread_excp_vect;
566 vaddr_t ve = (vaddr_t)thread_excp_vect_end;
567
568 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE);
569 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE);
570 thread_user_kcode_size = ve - thread_user_kcode_va;
571
572 core_mmu_get_user_va_range(&v, NULL);
573 thread_user_kcode_offset = thread_user_kcode_va - v;
574
575 set_core_local_kcode_offset(thread_core_local,
576 thread_user_kcode_offset);
577 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
578 set_core_local_kcode_offset((void *)thread_user_kdata_page,
579 thread_user_kcode_offset);
580 /*
581 * When transitioning to EL0 subtract SP with this much to point to
582 * this special kdata page instead. SP is restored by add this much
583 * while transitioning back to EL1.
584 */
585 v += thread_user_kcode_size;
586 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v;
587 #endif
588 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
589 }
590
thread_init_primary(void)591 void thread_init_primary(void)
592 {
593 init_user_kcode();
594 }
595
get_midr_implementer(uint32_t midr)596 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr)
597 {
598 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK;
599 }
600
get_midr_primary_part(uint32_t midr)601 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr)
602 {
603 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) &
604 MIDR_PRIMARY_PART_NUM_MASK;
605 }
606
get_midr_variant(uint32_t midr)607 static uint32_t __maybe_unused get_midr_variant(uint32_t midr)
608 {
609 return (midr >> MIDR_VARIANT_SHIFT) & MIDR_VARIANT_MASK;
610 }
611
get_midr_revision(uint32_t midr)612 static uint32_t __maybe_unused get_midr_revision(uint32_t midr)
613 {
614 return (midr >> MIDR_REVISION_SHIFT) & MIDR_REVISION_MASK;
615 }
616
617 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
618 #ifdef ARM64
probe_workaround_available(uint32_t wa_id)619 static bool probe_workaround_available(uint32_t wa_id)
620 {
621 int32_t r;
622
623 r = thread_smc(SMCCC_VERSION, 0, 0, 0);
624 if (r < 0)
625 return false;
626 if (r < 0x10001) /* compare with version 1.1 */
627 return false;
628
629 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */
630 r = thread_smc(SMCCC_ARCH_FEATURES, wa_id, 0, 0);
631 return r >= 0;
632 }
633
select_vector_wa_spectre_v2(void)634 static vaddr_t __maybe_unused select_vector_wa_spectre_v2(void)
635 {
636 if (probe_workaround_available(SMCCC_ARCH_WORKAROUND_1)) {
637 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available",
638 SMCCC_ARCH_WORKAROUND_1);
639 DMSG("SMC Workaround for CVE-2017-5715 used");
640 return (vaddr_t)thread_excp_vect_wa_spectre_v2;
641 }
642
643 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable",
644 SMCCC_ARCH_WORKAROUND_1);
645 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)");
646 return (vaddr_t)thread_excp_vect;
647 }
648 #else
select_vector_wa_spectre_v2(void)649 static vaddr_t __maybe_unused select_vector_wa_spectre_v2(void)
650 {
651 return (vaddr_t)thread_excp_vect_wa_spectre_v2;
652 }
653 #endif
654 #endif
655
656 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
select_vector_wa_spectre_bhb(uint8_t loop_count __maybe_unused)657 static vaddr_t select_vector_wa_spectre_bhb(uint8_t loop_count __maybe_unused)
658 {
659 /*
660 * Spectre-BHB has only been analyzed for AArch64 so far. For
661 * AArch32 fall back to the Spectre-V2 workaround which is likely
662 * to work even if perhaps a bit more expensive than a more
663 * optimized workaround.
664 */
665 #ifdef ARM64
666 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
667 struct thread_core_local *cl = (void *)thread_user_kdata_page;
668
669 cl[get_core_pos()].bhb_loop_count = loop_count;
670 #endif
671 thread_get_core_local()->bhb_loop_count = loop_count;
672
673 DMSG("Spectre-BHB CVE-2022-23960 workaround enabled with \"K\" = %u",
674 loop_count);
675
676 return (vaddr_t)thread_excp_vect_wa_spectre_bhb;
677 #else
678 return select_vector_wa_spectre_v2();
679 #endif
680 }
681 #endif
682
get_excp_vect(void)683 static vaddr_t get_excp_vect(void)
684 {
685 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
686 uint32_t midr = read_midr();
687 uint8_t vers = 0;
688
689 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM)
690 return (vaddr_t)thread_excp_vect;
691 /*
692 * Variant rx, Revision py, for instance
693 * Variant 2 Revision 0 = r2p0 = 0x20
694 */
695 vers = (get_midr_variant(midr) << 4) | get_midr_revision(midr);
696
697 /*
698 * Spectre-V2 (CVE-2017-5715) software workarounds covers what's
699 * needed for Spectre-BHB (CVE-2022-23960) too. The workaround for
700 * Spectre-V2 is more expensive than the one for Spectre-BHB so if
701 * possible select the workaround for Spectre-BHB.
702 */
703 switch (get_midr_primary_part(midr)) {
704 #ifdef ARM32
705 /* Spectre-V2 */
706 case CORTEX_A8_PART_NUM:
707 case CORTEX_A9_PART_NUM:
708 case CORTEX_A17_PART_NUM:
709 #endif
710 /* Spectre-V2 */
711 case CORTEX_A57_PART_NUM:
712 case CORTEX_A73_PART_NUM:
713 case CORTEX_A75_PART_NUM:
714 return select_vector_wa_spectre_v2();
715 #ifdef ARM32
716 /* Spectre-V2 */
717 case CORTEX_A15_PART_NUM:
718 return (vaddr_t)thread_excp_vect_wa_a15_spectre_v2;
719 #endif
720 /*
721 * Spectre-V2 for vers < r1p0
722 * Spectre-BHB for vers >= r1p0
723 */
724 case CORTEX_A72_PART_NUM:
725 if (vers < 0x10)
726 return select_vector_wa_spectre_v2();
727 return select_vector_wa_spectre_bhb(8);
728
729 /*
730 * Doing the more safe but expensive Spectre-V2 workaround for CPUs
731 * still being researched on the best mitigation sequence.
732 */
733 case CORTEX_A65_PART_NUM:
734 case CORTEX_A65AE_PART_NUM:
735 case NEOVERSE_E1_PART_NUM:
736 return select_vector_wa_spectre_v2();
737
738 /* Spectre-BHB */
739 case CORTEX_A76_PART_NUM:
740 case CORTEX_A76AE_PART_NUM:
741 case CORTEX_A77_PART_NUM:
742 return select_vector_wa_spectre_bhb(24);
743 case CORTEX_A78_PART_NUM:
744 case CORTEX_A78AE_PART_NUM:
745 case CORTEX_A78C_PART_NUM:
746 case CORTEX_A710_PART_NUM:
747 case CORTEX_X1_PART_NUM:
748 case CORTEX_X2_PART_NUM:
749 return select_vector_wa_spectre_bhb(32);
750 case NEOVERSE_N1_PART_NUM:
751 return select_vector_wa_spectre_bhb(24);
752 case NEOVERSE_N2_PART_NUM:
753 case NEOVERSE_V1_PART_NUM:
754 return select_vector_wa_spectre_bhb(32);
755
756 default:
757 return (vaddr_t)thread_excp_vect;
758 }
759 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
760
761 return (vaddr_t)thread_excp_vect;
762 }
763
thread_init_per_cpu(void)764 void thread_init_per_cpu(void)
765 {
766 thread_init_vbar(get_excp_vect());
767
768 #ifdef CFG_FTRACE_SUPPORT
769 /*
770 * Enable accesses to frequency register and physical counter
771 * register in EL0/PL0 required for timestamping during
772 * function tracing.
773 */
774 write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN);
775 #endif
776 }
777
778 #ifdef CFG_WITH_VFP
thread_kernel_enable_vfp(void)779 uint32_t thread_kernel_enable_vfp(void)
780 {
781 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
782 struct thread_ctx *thr = threads + thread_get_id();
783 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
784
785 assert(!vfp_is_enabled());
786
787 if (!thr->vfp_state.ns_saved) {
788 vfp_lazy_save_state_final(&thr->vfp_state.ns,
789 true /*force_save*/);
790 thr->vfp_state.ns_saved = true;
791 } else if (thr->vfp_state.sec_lazy_saved &&
792 !thr->vfp_state.sec_saved) {
793 /*
794 * This happens when we're handling an abort while the
795 * thread was using the VFP state.
796 */
797 vfp_lazy_save_state_final(&thr->vfp_state.sec,
798 false /*!force_save*/);
799 thr->vfp_state.sec_saved = true;
800 } else if (tuv && tuv->lazy_saved && !tuv->saved) {
801 /*
802 * This can happen either during syscall or abort
803 * processing (while processing a syscall).
804 */
805 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/);
806 tuv->saved = true;
807 }
808
809 vfp_enable();
810 return exceptions;
811 }
812
thread_kernel_disable_vfp(uint32_t state)813 void thread_kernel_disable_vfp(uint32_t state)
814 {
815 uint32_t exceptions;
816
817 assert(vfp_is_enabled());
818
819 vfp_disable();
820 exceptions = thread_get_exceptions();
821 assert(exceptions & THREAD_EXCP_FOREIGN_INTR);
822 exceptions &= ~THREAD_EXCP_FOREIGN_INTR;
823 exceptions |= state & THREAD_EXCP_FOREIGN_INTR;
824 thread_set_exceptions(exceptions);
825 }
826
thread_kernel_save_vfp(void)827 void thread_kernel_save_vfp(void)
828 {
829 struct thread_ctx *thr = threads + thread_get_id();
830
831 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
832 if (vfp_is_enabled()) {
833 vfp_lazy_save_state_init(&thr->vfp_state.sec);
834 thr->vfp_state.sec_lazy_saved = true;
835 }
836 }
837
thread_kernel_restore_vfp(void)838 void thread_kernel_restore_vfp(void)
839 {
840 struct thread_ctx *thr = threads + thread_get_id();
841
842 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
843 assert(!vfp_is_enabled());
844 if (thr->vfp_state.sec_lazy_saved) {
845 vfp_lazy_restore_state(&thr->vfp_state.sec,
846 thr->vfp_state.sec_saved);
847 thr->vfp_state.sec_saved = false;
848 thr->vfp_state.sec_lazy_saved = false;
849 }
850 }
851
thread_user_enable_vfp(struct thread_user_vfp_state * uvfp)852 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp)
853 {
854 struct thread_ctx *thr = threads + thread_get_id();
855 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
856
857 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
858 assert(!vfp_is_enabled());
859
860 if (!thr->vfp_state.ns_saved) {
861 vfp_lazy_save_state_final(&thr->vfp_state.ns,
862 true /*force_save*/);
863 thr->vfp_state.ns_saved = true;
864 } else if (tuv && uvfp != tuv) {
865 if (tuv->lazy_saved && !tuv->saved) {
866 vfp_lazy_save_state_final(&tuv->vfp,
867 false /*!force_save*/);
868 tuv->saved = true;
869 }
870 }
871
872 if (uvfp->lazy_saved)
873 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved);
874 uvfp->lazy_saved = false;
875 uvfp->saved = false;
876
877 thr->vfp_state.uvfp = uvfp;
878 vfp_enable();
879 }
880
thread_user_save_vfp(void)881 void thread_user_save_vfp(void)
882 {
883 struct thread_ctx *thr = threads + thread_get_id();
884 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
885
886 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
887 if (!vfp_is_enabled())
888 return;
889
890 assert(tuv && !tuv->lazy_saved && !tuv->saved);
891 vfp_lazy_save_state_init(&tuv->vfp);
892 tuv->lazy_saved = true;
893 }
894
thread_user_clear_vfp(struct user_mode_ctx * uctx)895 void thread_user_clear_vfp(struct user_mode_ctx *uctx)
896 {
897 struct thread_user_vfp_state *uvfp = &uctx->vfp;
898 struct thread_ctx *thr = threads + thread_get_id();
899
900 if (uvfp == thr->vfp_state.uvfp)
901 thr->vfp_state.uvfp = NULL;
902 uvfp->lazy_saved = false;
903 uvfp->saved = false;
904 }
905 #endif /*CFG_WITH_VFP*/
906
907 #ifdef ARM32
get_spsr(bool is_32bit,unsigned long entry_func,uint32_t * spsr)908 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
909 {
910 uint32_t s;
911
912 if (!is_32bit)
913 return false;
914
915 s = read_cpsr();
916 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2);
917 s |= CPSR_MODE_USR;
918 if (entry_func & 1)
919 s |= CPSR_T;
920 *spsr = s;
921 return true;
922 }
923 #endif
924
925 #ifdef ARM64
get_spsr(bool is_32bit,unsigned long entry_func,uint32_t * spsr)926 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
927 {
928 uint32_t s;
929
930 if (is_32bit) {
931 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT);
932 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT;
933 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT;
934 } else {
935 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT);
936 }
937
938 *spsr = s;
939 return true;
940 }
941 #endif
942
set_ctx_regs(struct thread_ctx_regs * regs,unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long user_sp,unsigned long entry_func,uint32_t spsr,struct thread_pauth_keys * keys __maybe_unused)943 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0,
944 unsigned long a1, unsigned long a2, unsigned long a3,
945 unsigned long user_sp, unsigned long entry_func,
946 uint32_t spsr,
947 struct thread_pauth_keys *keys __maybe_unused)
948 {
949 /*
950 * First clear all registers to avoid leaking information from
951 * other TAs or even the Core itself.
952 */
953 *regs = (struct thread_ctx_regs){ };
954 #ifdef ARM32
955 regs->r0 = a0;
956 regs->r1 = a1;
957 regs->r2 = a2;
958 regs->r3 = a3;
959 regs->usr_sp = user_sp;
960 regs->pc = entry_func;
961 regs->cpsr = spsr;
962 #endif
963 #ifdef ARM64
964 regs->x[0] = a0;
965 regs->x[1] = a1;
966 regs->x[2] = a2;
967 regs->x[3] = a3;
968 regs->pc = entry_func;
969 regs->cpsr = spsr;
970 regs->x[13] = user_sp; /* Used when running TA in Aarch32 */
971 regs->sp = user_sp; /* Used when running TA in Aarch64 */
972 #ifdef CFG_TA_PAUTH
973 assert(keys);
974 regs->apiakey_hi = keys->apia_hi;
975 regs->apiakey_lo = keys->apia_lo;
976 #endif
977 /* Set frame pointer (user stack can't be unwound past this point) */
978 regs->x[29] = 0;
979 #endif
980 }
981
thread_get_pauth_keys(void)982 static struct thread_pauth_keys *thread_get_pauth_keys(void)
983 {
984 #if defined(CFG_TA_PAUTH)
985 struct ts_session *s = ts_get_current_session();
986
987 if (is_user_ta_ctx(s->ctx)) {
988 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
989
990 return &utc->uctx.keys;
991 } else if (is_sp_ctx(s->ctx)) {
992 struct sp_ctx *spc = to_sp_ctx(s->ctx);
993
994 return &spc->uctx.keys;
995 }
996
997 panic("[abort] Only user TAs and SPs support PAUTH keys");
998 #else
999 return NULL;
1000 #endif
1001 }
1002
thread_enter_user_mode(unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long user_sp,unsigned long entry_func,bool is_32bit,uint32_t * exit_status0,uint32_t * exit_status1)1003 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
1004 unsigned long a2, unsigned long a3, unsigned long user_sp,
1005 unsigned long entry_func, bool is_32bit,
1006 uint32_t *exit_status0, uint32_t *exit_status1)
1007 {
1008 uint32_t spsr = 0;
1009 uint32_t exceptions = 0;
1010 uint32_t rc = 0;
1011 struct thread_ctx_regs *regs = NULL;
1012 struct thread_pauth_keys *keys = NULL;
1013
1014 tee_ta_update_session_utime_resume();
1015
1016 keys = thread_get_pauth_keys();
1017
1018 /* Derive SPSR from current CPSR/PSTATE readout. */
1019 if (!get_spsr(is_32bit, entry_func, &spsr)) {
1020 *exit_status0 = 1; /* panic */
1021 *exit_status1 = 0xbadbadba;
1022 return 0;
1023 }
1024
1025 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1026 /*
1027 * We're using the per thread location of saved context registers
1028 * for temporary storage. Now that exceptions are masked they will
1029 * not be used for any thing else until they are eventually
1030 * unmasked when user mode has been entered.
1031 */
1032 regs = thread_get_ctx_regs();
1033 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr, keys);
1034 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1);
1035 thread_unmask_exceptions(exceptions);
1036 return rc;
1037 }
1038
1039 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
thread_get_user_kcode(struct mobj ** mobj,size_t * offset,vaddr_t * va,size_t * sz)1040 void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
1041 vaddr_t *va, size_t *sz)
1042 {
1043 core_mmu_get_user_va_range(va, NULL);
1044 *mobj = mobj_tee_ram_rx;
1045 *sz = thread_user_kcode_size;
1046 *offset = thread_user_kcode_va - (vaddr_t)mobj_get_va(*mobj, 0, *sz);
1047 }
1048 #endif
1049
1050 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
1051 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
thread_get_user_kdata(struct mobj ** mobj,size_t * offset,vaddr_t * va,size_t * sz)1052 void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
1053 vaddr_t *va, size_t *sz)
1054 {
1055 vaddr_t v;
1056
1057 core_mmu_get_user_va_range(&v, NULL);
1058 *va = v + thread_user_kcode_size;
1059 *mobj = mobj_tee_ram_rw;
1060 *sz = sizeof(thread_user_kdata_page);
1061 *offset = (vaddr_t)thread_user_kdata_page -
1062 (vaddr_t)mobj_get_va(*mobj, 0, *sz);
1063 }
1064 #endif
1065
setup_unwind_user_mode(struct thread_scall_regs * regs)1066 static void setup_unwind_user_mode(struct thread_scall_regs *regs)
1067 {
1068 #ifdef ARM32
1069 regs->lr = (uintptr_t)thread_unwind_user_mode;
1070 regs->spsr = read_cpsr();
1071 #endif
1072 #ifdef ARM64
1073 regs->elr = (uintptr_t)thread_unwind_user_mode;
1074 regs->spsr = spsr_from_pstate();
1075 /*
1076 * Regs is the value of stack pointer before calling the SVC
1077 * handler. By the addition matches for the reserved space at the
1078 * beginning of el0_sync_svc(). This prepares the stack when
1079 * returning to thread_unwind_user_mode instead of a normal
1080 * exception return.
1081 */
1082 regs->sp_el0 = (uint64_t)(regs + 1);
1083 #endif
1084 }
1085
gprof_set_status(struct ts_session * s __maybe_unused,enum ts_gprof_status status __maybe_unused)1086 static void gprof_set_status(struct ts_session *s __maybe_unused,
1087 enum ts_gprof_status status __maybe_unused)
1088 {
1089 #ifdef CFG_TA_GPROF_SUPPORT
1090 if (s->ctx->ops->gprof_set_status)
1091 s->ctx->ops->gprof_set_status(status);
1092 #endif
1093 }
1094
1095 /*
1096 * Note: this function is weak just to make it possible to exclude it from
1097 * the unpaged area.
1098 */
thread_scall_handler(struct thread_scall_regs * regs)1099 void __weak thread_scall_handler(struct thread_scall_regs *regs)
1100 {
1101 struct ts_session *sess = NULL;
1102 uint32_t state = 0;
1103
1104 /* Enable native interrupts */
1105 state = thread_get_exceptions();
1106 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR);
1107
1108 thread_user_save_vfp();
1109
1110 sess = ts_get_current_session();
1111 /*
1112 * User mode service has just entered kernel mode, suspend gprof
1113 * collection until we're about to switch back again.
1114 */
1115 gprof_set_status(sess, TS_GPROF_SUSPEND);
1116
1117 /* Restore foreign interrupts which are disabled on exception entry */
1118 thread_restore_foreign_intr();
1119
1120 assert(sess && sess->handle_scall);
1121 if (sess->handle_scall(regs)) {
1122 /* We're about to switch back to user mode */
1123 gprof_set_status(sess, TS_GPROF_RESUME);
1124 } else {
1125 /* We're returning from __thread_enter_user_mode() */
1126 setup_unwind_user_mode(regs);
1127 }
1128 }
1129
1130 #ifdef CFG_WITH_ARM_TRUSTED_FW
1131 /*
1132 * These five functions are __weak to allow platforms to override them if
1133 * needed.
1134 */
thread_cpu_off_handler(unsigned long a0 __unused,unsigned long a1 __unused)1135 unsigned long __weak thread_cpu_off_handler(unsigned long a0 __unused,
1136 unsigned long a1 __unused)
1137 {
1138 return 0;
1139 }
1140 DECLARE_KEEP_PAGER(thread_cpu_off_handler);
1141
thread_cpu_suspend_handler(unsigned long a0 __unused,unsigned long a1 __unused)1142 unsigned long __weak thread_cpu_suspend_handler(unsigned long a0 __unused,
1143 unsigned long a1 __unused)
1144 {
1145 return 0;
1146 }
1147 DECLARE_KEEP_PAGER(thread_cpu_suspend_handler);
1148
thread_cpu_resume_handler(unsigned long a0 __unused,unsigned long a1 __unused)1149 unsigned long __weak thread_cpu_resume_handler(unsigned long a0 __unused,
1150 unsigned long a1 __unused)
1151 {
1152 return 0;
1153 }
1154 DECLARE_KEEP_PAGER(thread_cpu_resume_handler);
1155
thread_system_off_handler(unsigned long a0 __unused,unsigned long a1 __unused)1156 unsigned long __weak thread_system_off_handler(unsigned long a0 __unused,
1157 unsigned long a1 __unused)
1158 {
1159 return 0;
1160 }
1161 DECLARE_KEEP_PAGER(thread_system_off_handler);
1162
thread_system_reset_handler(unsigned long a0 __unused,unsigned long a1 __unused)1163 unsigned long __weak thread_system_reset_handler(unsigned long a0 __unused,
1164 unsigned long a1 __unused)
1165 {
1166 return 0;
1167 }
1168 DECLARE_KEEP_PAGER(thread_system_reset_handler);
1169 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
1170
1171 #ifdef CFG_CORE_WORKAROUND_ARM_NMFI
interrupt_main_handler(void)1172 void __noreturn interrupt_main_handler(void)
1173 {
1174 /*
1175 * Note: overrides the default implementation of this function so that
1176 * if there would be another handler defined there would be duplicate
1177 * symbol error during linking.
1178 */
1179 panic("Secure interrupt received but it is not supported");
1180 }
1181 #endif
1182