xref: /optee_os/core/arch/arm/kernel/misc_a32.S (revision 34d244bfd51aaefc413da3e59e4dcd11048ff04f)
11bb92983SJerome Forissier/* SPDX-License-Identifier: BSD-2-Clause */
2abe38974SJens Wiklander/*
3abe38974SJens Wiklander * Copyright (c) 2014, STMicroelectronics International N.V.
48be2de1aSImre Kis * Copyright (c) 2019, Arm Limited. All rights reserved.
5abe38974SJens Wiklander */
6abe38974SJens Wiklander
7abe38974SJens Wiklander#include <asm.S>
8abe38974SJens Wiklander#include <arm.h>
9abe38974SJens Wiklander#include <arm32_macros.S>
1000da26ecSAndrew F. Davis#include <platform_config.h>
11abe38974SJens Wiklander
12abe38974SJens Wiklander
13af8e0424SEtienne Carriere/* size_t __get_core_pos(void); */
14170e9084SJens WiklanderFUNC __get_core_pos , : , .identity_map
15abe38974SJens Wiklander	read_mpidr r0
1630372800SJens Wiklander	b get_core_pos_mpidr
17af8e0424SEtienne CarriereEND_FUNC __get_core_pos
1830372800SJens Wiklander
1930372800SJens Wiklander/* size_t get_core_pos_mpidr(uint32_t mpidr); */
205aaab9c0SJerome Forissier/* Let platforms override this if needed */
215aaab9c0SJerome ForissierWEAK_FUNC get_core_pos_mpidr , :
228be2de1aSImre Kis	mov	r3, r0
238be2de1aSImre Kis
248be2de1aSImre Kis	/*
258be2de1aSImre Kis	 * Shift MPIDR value if it's not already shifted.
268be2de1aSImre Kis	 * Using logical shift ensures AFF0 to be filled with zeroes.
278be2de1aSImre Kis	 * This part is necessary even if CFG_CORE_THREAD_SHIFT is 0 because
288be2de1aSImre Kis	 * MT bit can be set on single threaded systems where all the AFF0
298be2de1aSImre Kis	 * values are zeroes.
308be2de1aSImre Kis	 */
318be2de1aSImre Kis	tst	r0, #MPIDR_MT_MASK
328be2de1aSImre Kis	lsleq	r3, r0, #MPIDR_AFFINITY_BITS
338be2de1aSImre Kis
348be2de1aSImre Kis	/*
358be2de1aSImre Kis	 * At this point the MPIDR layout is always shifted so it looks
368be2de1aSImre Kis	 * as follows AFF2 -> cluster, AFF1 -> core, AFF0 -> thread
378be2de1aSImre Kis	 */
388be2de1aSImre Kis#if CFG_CORE_THREAD_SHIFT == 0
3900da26ecSAndrew F. Davis	/* Calculate CorePos = (ClusterId * (cores/cluster)) + CoreId */
408be2de1aSImre Kis	ubfx	r0, r3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
418be2de1aSImre Kis	ubfx	r1, r3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
428be2de1aSImre Kis	add	r0, r0, r1, LSL #(CFG_CORE_CLUSTER_SHIFT)
438be2de1aSImre Kis#else
448be2de1aSImre Kis	/*
458be2de1aSImre Kis	 * Calculate CorePos =
468be2de1aSImre Kis	 * ((ClusterId * (cores/cluster)) + CoreId) * (threads/core) + ThreadId
478be2de1aSImre Kis	 */
488be2de1aSImre Kis	ubfx	r0, r3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
498be2de1aSImre Kis	ubfx	r1, r3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
508be2de1aSImre Kis	ubfx	r2, r3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
518be2de1aSImre Kis	add	r1, r1, r2, LSL #(CFG_CORE_CLUSTER_SHIFT)
528be2de1aSImre Kis	add	r0, r0, r1, LSL #(CFG_CORE_THREAD_SHIFT)
538be2de1aSImre Kis#endif
548be2de1aSImre Kis
55abe38974SJens Wiklander	bx	lr
5630372800SJens WiklanderEND_FUNC get_core_pos_mpidr
57abe38974SJens Wiklander
58adec1721SPascal Brand/*
59adec1721SPascal Brand * uint32_t temp_set_mode(int cpu_mode)
60adec1721SPascal Brand *   returns cpsr to be set
61adec1721SPascal Brand */
62adec1721SPascal BrandLOCAL_FUNC temp_set_mode , :
63adec1721SPascal Brand	mov	r1, r0
64adec1721SPascal Brand	cmp	r1, #CPSR_MODE_USR	/* update mode: usr -> sys */
65adec1721SPascal Brand	moveq	r1, #CPSR_MODE_SYS
66adec1721SPascal Brand	cpsid	aif			/* disable interrupts */
67adec1721SPascal Brand	mrs	r0, cpsr		/* get cpsr with disabled its*/
68adec1721SPascal Brand	bic	r0, #CPSR_MODE_MASK	/* clear mode */
69adec1721SPascal Brand	orr	r0, r1			/* set expected mode */
70abe38974SJens Wiklander	bx	lr
71adec1721SPascal BrandEND_FUNC temp_set_mode
72abe38974SJens Wiklander
73adec1721SPascal Brand/* uint32_t read_mode_sp(int cpu_mode) */
74adec1721SPascal BrandFUNC read_mode_sp , :
75adec1721SPascal Brand	push	{r4, lr}
76923c1f34SJens WiklanderUNWIND(	.save	{r4, lr})
77adec1721SPascal Brand	mrs	r4, cpsr		/* save cpsr */
78adec1721SPascal Brand	bl	temp_set_mode
79adec1721SPascal Brand	msr	cpsr, r0		/* set the new mode */
80adec1721SPascal Brand	mov	r0, sp			/* get the function result */
81adec1721SPascal Brand	msr	cpsr, r4		/* back to the old mode */
82adec1721SPascal Brand	pop	{r4, pc}
83adec1721SPascal BrandEND_FUNC read_mode_sp
84adec1721SPascal Brand
85adec1721SPascal Brand/* uint32_t read_mode_lr(int cpu_mode) */
86adec1721SPascal BrandFUNC read_mode_lr , :
87adec1721SPascal Brand	push	{r4, lr}
88923c1f34SJens WiklanderUNWIND(	.save	{r4, lr})
89adec1721SPascal Brand	mrs	r4, cpsr		/* save cpsr */
90adec1721SPascal Brand	bl	temp_set_mode
91adec1721SPascal Brand	msr	cpsr, r0		/* set the new mode */
92adec1721SPascal Brand	mov	r0, lr			/* get the function result */
93adec1721SPascal Brand	msr	cpsr, r4		/* back to the old mode */
94adec1721SPascal Brand	pop	{r4, pc}
95adec1721SPascal BrandEND_FUNC read_mode_lr
96*34d244bfSClément Léger
97*34d244bfSClément Léger/* void wait_cycles(unsigned long cycles) */
98*34d244bfSClément LégerFUNC wait_cycles , :
99*34d244bfSClément Léger	/* Divide by 4 since each loop will take 4 cycles to complete */
100*34d244bfSClément Léger	lsrs	r0, r0, #2
101*34d244bfSClément Léger	bxeq	lr
102*34d244bfSClément Légerloop:
103*34d244bfSClément Léger	subs	r0, r0, #1
104*34d244bfSClément Léger	nop
105*34d244bfSClément Léger	bne	loop
106*34d244bfSClément Léger
107*34d244bfSClément Léger	bx lr
108*34d244bfSClément LégerEND_FUNC wait_cycles
109