xref: /rk3399_ARM-atf/include/lib/el3_runtime/cpu_data.h (revision a3b16996ecb5d1cedc997204612e5f7c36ccd576)
1 /*
2  * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef CPU_DATA_H
8 #define CPU_DATA_H
9 
10 #include <platform_def.h>	/* CACHE_WRITEBACK_GRANULE required */
11 
12 #include <bl31/ehf.h>
13 
14 #ifdef __aarch64__
15 
16 /* Offsets for the cpu_data structure */
17 #define CPU_DATA_CRASH_BUF_OFFSET	0x18
18 /* need enough space in crash buffer to save 8 registers */
19 #define CPU_DATA_CRASH_BUF_SIZE		64
20 #define CPU_DATA_CPU_OPS_PTR		0x10
21 
22 #else /* __aarch64__ */
23 
24 #if CRASH_REPORTING
25 #error "Crash reporting is not supported in AArch32"
26 #endif
27 #define CPU_DATA_CPU_OPS_PTR		0x0
28 #define CPU_DATA_CRASH_BUF_OFFSET	0x4
29 
30 #endif /* __aarch64__ */
31 
32 #if CRASH_REPORTING
33 #define CPU_DATA_CRASH_BUF_END		(CPU_DATA_CRASH_BUF_OFFSET + \
34 						CPU_DATA_CRASH_BUF_SIZE)
35 #else
36 #define CPU_DATA_CRASH_BUF_END		CPU_DATA_CRASH_BUF_OFFSET
37 #endif
38 
39 /* cpu_data size is the data size rounded up to the platform cache line size */
40 #define CPU_DATA_SIZE			(((CPU_DATA_CRASH_BUF_END + \
41 					CACHE_WRITEBACK_GRANULE - 1) / \
42 						CACHE_WRITEBACK_GRANULE) * \
43 							CACHE_WRITEBACK_GRANULE)
44 
45 #if ENABLE_RUNTIME_INSTRUMENTATION
46 /* Temporary space to store PMF timestamps from assembly code */
47 #define CPU_DATA_PMF_TS_COUNT		1
48 #define CPU_DATA_PMF_TS0_OFFSET		CPU_DATA_CRASH_BUF_END
49 #define CPU_DATA_PMF_TS0_IDX		0
50 #endif
51 
52 #ifndef __ASSEMBLER__
53 
54 #include <arch_helpers.h>
55 #include <lib/cassert.h>
56 #include <lib/psci/psci.h>
57 #include <platform_def.h>
58 #include <stdint.h>
59 
60 /* Offsets for the cpu_data structure */
61 #define CPU_DATA_PSCI_LOCK_OFFSET	__builtin_offsetof\
62 		(cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
63 
64 #if PLAT_PCPU_DATA_SIZE
65 #define CPU_DATA_PLAT_PCPU_OFFSET	__builtin_offsetof\
66 		(cpu_data_t, platform_cpu_data)
67 #endif
68 
69 /*******************************************************************************
70  * Function & variable prototypes
71  ******************************************************************************/
72 
73 /*******************************************************************************
74  * Cache of frequently used per-cpu data:
75  *   Pointers to non-secure and secure security state contexts
76  *   Address of the crash stack
77  * It is aligned to the cache line boundary to allow efficient concurrent
78  * manipulation of these pointers on different cpus
79  *
80  * TODO: Add other commonly used variables to this (tf_issues#90)
81  *
82  * The data structure and the _cpu_data accessors should not be used directly
83  * by components that have per-cpu members. The member access macros should be
84  * used for this.
85  ******************************************************************************/
86 typedef struct cpu_data {
87 #ifdef __aarch64__
88 	void *cpu_context[2];
89 #endif
90 	uintptr_t cpu_ops_ptr;
91 #if CRASH_REPORTING
92 	u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
93 #endif
94 #if ENABLE_RUNTIME_INSTRUMENTATION
95 	uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
96 #endif
97 	struct psci_cpu_data psci_svc_cpu_data;
98 #if PLAT_PCPU_DATA_SIZE
99 	uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
100 #endif
101 #if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
102 	pe_exc_data_t ehf_data;
103 #endif
104 } __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
105 
106 extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
107 
108 #if CRASH_REPORTING
109 /* verify assembler offsets match data structures */
110 CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
111 	(cpu_data_t, crash_buf),
112 	assert_cpu_data_crash_stack_offset_mismatch);
113 #endif
114 
115 CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
116 		assert_cpu_data_size_mismatch);
117 
118 CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
119 		(cpu_data_t, cpu_ops_ptr),
120 		assert_cpu_data_cpu_ops_ptr_offset_mismatch);
121 
122 #if ENABLE_RUNTIME_INSTRUMENTATION
123 CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
124 		(cpu_data_t, cpu_data_pmf_ts[0]),
125 		assert_cpu_data_pmf_ts0_offset_mismatch);
126 #endif
127 
128 struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
129 
130 #ifdef __aarch64__
131 /* Return the cpu_data structure for the current CPU. */
132 static inline struct cpu_data *_cpu_data(void)
133 {
134 	return (cpu_data_t *)read_tpidr_el3();
135 }
136 #else
137 struct cpu_data *_cpu_data(void);
138 #endif
139 
140 /**************************************************************************
141  * APIs for initialising and accessing per-cpu data
142  *************************************************************************/
143 
144 void init_cpu_data_ptr(void);
145 void init_cpu_ops(void);
146 
147 #define get_cpu_data(_m)		   _cpu_data()->_m
148 #define set_cpu_data(_m, _v)		   _cpu_data()->_m = (_v)
149 #define get_cpu_data_by_index(_ix, _m)	   _cpu_data_by_index(_ix)->_m
150 #define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v)
151 /* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
152 #define flush_cpu_data(_m)	   flush_dcache_range((uintptr_t)	  \
153 						&(_cpu_data()->_m), \
154 						sizeof(((cpu_data_t *)0)->_m))
155 #define inv_cpu_data(_m)	   inv_dcache_range((uintptr_t)	  	  \
156 						&(_cpu_data()->_m), \
157 						sizeof(((cpu_data_t *)0)->_m))
158 #define flush_cpu_data_by_index(_ix, _m)	\
159 				   flush_dcache_range((uintptr_t)	  \
160 					 &(_cpu_data_by_index(_ix)->_m),  \
161 						sizeof(((cpu_data_t *)0)->_m))
162 
163 
164 #endif /* __ASSEMBLER__ */
165 #endif /* CPU_DATA_H */
166