xref: /rk3399_ARM-atf/include/plat/common/plat_hold_pen.h (revision ecab5d9e3f81b7bf093002b8614359adfc8d880d)
1 /*
2  * Copyright (c) 2026, BayLibre SAS
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef PLAT_HOLD_PEN_H
8 #define PLAT_HOLD_PEN_H
9 
10 #ifndef __ASSEMBLER__
11 #include <cdefs.h>
12 #include <stddef.h>
13 #include <stdint.h>
14 
15 #include <lib/cassert.h>
16 #endif
17 
18 #include <lib/cpus/cpu_ops.h>
19 #include <lib/utils_def.h>
20 #include <platform_def.h>
21 
22 /*
23  * Generic hold pen for SMP secondary CPU bring-up.
24  *
25  * Platforms call plat_hold_pen_init() once at boot to write
26  * HOLD_STATE_WAIT followed by magic tags into every slot, then flush
27  * to main memory.
28  *
29  * The primary CPU signals a secondary by calling plat_hold_pen_signal(),
30  * which writes the warm boot entrypoint into the target slot,
31  * flushes it, and issues SEV.
32  *
33  * Secondary CPUs may reach their polling loop before plat_hold_pen_init()
34  * has run (they diverge in el3_entrypoint_common before C runtime
35  * init). To guard against branching to stale SRAM content, each
36  * slot carries two magic tags that must both match before the entry
37  * field is considered. Random memory is unlikely to contain both
38  * magic values in sequence. The entry field is written before the
39  * magic tags during init so that a valid entry value is always in
40  * place before the tags make the slot "live".
41  *
42  * Before branching, the secondary writes HOLD_STATE_WAIT back into
43  * its entry field so that a subsequent warm boot re-entering the
44  * polling loop does not see a stale entrypoint.
45  *
46  * Each slot is cache-line aligned so that flushing one core's slot
47  * cannot accidentally evict or corrupt another's.
48  */
49 
50 #define HOLD_MAGIC1		UL(0xCAFECAFE)
51 #define HOLD_MAGIC2		UL(0xBEEFBEEF)
52 
53 /*
54  * All-ones sentinel: no valid entrypoint can live here.
55  * Evaluates to 0xffffffff on AArch32 and 0xffffffffffffffff on AArch64.
56  */
57 #define HOLD_STATE_WAIT		(~UL(0))
58 
59 #define HOLD_SLOT_SIZE		CACHE_WRITEBACK_GRANULE
60 
61 /*
62  * Field sizes and offsets within struct hold_slot, following the
63  * pattern established in include/lib/cpus/cpu_ops.h.
64  */
65 #define HOLD_SLOT_ENTRY_SIZE	CPU_WORD_SIZE
66 #define HOLD_SLOT_MAGIC1_SIZE	CPU_WORD_SIZE
67 #define HOLD_SLOT_MAGIC2_SIZE	CPU_WORD_SIZE
68 
69 #define HOLD_SLOT_ENTRY		0
70 #define HOLD_SLOT_MAGIC1	HOLD_SLOT_ENTRY + HOLD_SLOT_ENTRY_SIZE
71 #define HOLD_SLOT_MAGIC2	HOLD_SLOT_MAGIC1 + HOLD_SLOT_MAGIC1_SIZE
72 
73 #ifndef __ASSEMBLER__
74 
75 /*
76  * Per-core hold pen slot, cache-line aligned.
77  * The 'entry' field holds HOLD_STATE_WAIT while the core should keep
78  * polling, or the warm boot entrypoint address when it should go.
79  */
80 struct hold_slot {
81 	uintptr_t entry;
82 	uintptr_t magic1;
83 	uintptr_t magic2;
84 	/* Padded to cache line boundary by __aligned. */
85 } __aligned(HOLD_SLOT_SIZE);
86 
87 CASSERT(sizeof(struct hold_slot) == HOLD_SLOT_SIZE,
88 	hold_slot_not_cacheline_sized);
89 CASSERT(offsetof(struct hold_slot, entry) == HOLD_SLOT_ENTRY,
90 	hold_slot_entry_off_mismatch);
91 CASSERT(offsetof(struct hold_slot, magic1) == HOLD_SLOT_MAGIC1,
92 	hold_slot_magic1_off_mismatch);
93 CASSERT(offsetof(struct hold_slot, magic2) == HOLD_SLOT_MAGIC2,
94 	hold_slot_magic2_off_mismatch);
95 
96 void plat_hold_pen_init(struct hold_slot *hold_pen, unsigned int core_count);
97 void plat_hold_pen_signal(struct hold_slot *hold_pen, unsigned int core_idx,
98 			  uintptr_t entry_point);
99 
100 #endif /* __ASSEMBLER__ */
101 
102 #endif /* PLAT_HOLD_PEN_H */
103