1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * (C) Copyright 2010
3*4882a593Smuzhiyun * Texas Instruments, <www.ti.com>
4*4882a593Smuzhiyun * Aneesh V <aneesh@ti.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #ifndef ARMV7_H
9*4882a593Smuzhiyun #define ARMV7_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /* Cortex-A9 revisions */
12*4882a593Smuzhiyun #define MIDR_CORTEX_A9_R0P1 0x410FC091
13*4882a593Smuzhiyun #define MIDR_CORTEX_A9_R1P2 0x411FC092
14*4882a593Smuzhiyun #define MIDR_CORTEX_A9_R1P3 0x411FC093
15*4882a593Smuzhiyun #define MIDR_CORTEX_A9_R2P10 0x412FC09A
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /* Cortex-A15 revisions */
18*4882a593Smuzhiyun #define MIDR_CORTEX_A15_R0P0 0x410FC0F0
19*4882a593Smuzhiyun #define MIDR_CORTEX_A15_R2P2 0x412FC0F2
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* Cortex-A7 revisions */
22*4882a593Smuzhiyun #define MIDR_CORTEX_A7_R0P0 0x410FC070
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define MIDR_PRIMARY_PART_MASK 0xFF0FFFF0
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* ID_PFR1 feature fields */
27*4882a593Smuzhiyun #define CPUID_ARM_SEC_SHIFT 4
28*4882a593Smuzhiyun #define CPUID_ARM_SEC_MASK (0xF << CPUID_ARM_SEC_SHIFT)
29*4882a593Smuzhiyun #define CPUID_ARM_VIRT_SHIFT 12
30*4882a593Smuzhiyun #define CPUID_ARM_VIRT_MASK (0xF << CPUID_ARM_VIRT_SHIFT)
31*4882a593Smuzhiyun #define CPUID_ARM_GENTIMER_SHIFT 16
32*4882a593Smuzhiyun #define CPUID_ARM_GENTIMER_MASK (0xF << CPUID_ARM_GENTIMER_SHIFT)
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* valid bits in CBAR register / PERIPHBASE value */
35*4882a593Smuzhiyun #define CBAR_MASK 0xFFFF8000
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* CCSIDR */
38*4882a593Smuzhiyun #define CCSIDR_LINE_SIZE_OFFSET 0
39*4882a593Smuzhiyun #define CCSIDR_LINE_SIZE_MASK 0x7
40*4882a593Smuzhiyun #define CCSIDR_ASSOCIATIVITY_OFFSET 3
41*4882a593Smuzhiyun #define CCSIDR_ASSOCIATIVITY_MASK (0x3FF << 3)
42*4882a593Smuzhiyun #define CCSIDR_NUM_SETS_OFFSET 13
43*4882a593Smuzhiyun #define CCSIDR_NUM_SETS_MASK (0x7FFF << 13)
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * Values for InD field in CSSELR
47*4882a593Smuzhiyun * Selects the type of cache
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun #define ARMV7_CSSELR_IND_DATA_UNIFIED 0
50*4882a593Smuzhiyun #define ARMV7_CSSELR_IND_INSTRUCTION 1
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Values for Ctype fields in CLIDR */
53*4882a593Smuzhiyun #define ARMV7_CLIDR_CTYPE_NO_CACHE 0
54*4882a593Smuzhiyun #define ARMV7_CLIDR_CTYPE_INSTRUCTION_ONLY 1
55*4882a593Smuzhiyun #define ARMV7_CLIDR_CTYPE_DATA_ONLY 2
56*4882a593Smuzhiyun #define ARMV7_CLIDR_CTYPE_INSTRUCTION_DATA 3
57*4882a593Smuzhiyun #define ARMV7_CLIDR_CTYPE_UNIFIED 4
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #ifndef __ASSEMBLY__
60*4882a593Smuzhiyun #include <linux/types.h>
61*4882a593Smuzhiyun #include <asm/io.h>
62*4882a593Smuzhiyun #include <asm/barriers.h>
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* read L2 control register (L2CTLR) */
read_l2ctlr(void)65*4882a593Smuzhiyun static inline uint32_t read_l2ctlr(void)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun uint32_t val = 0;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun asm volatile ("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return val;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* write L2 control register (L2CTLR) */
write_l2ctlr(uint32_t val)75*4882a593Smuzhiyun static inline void write_l2ctlr(uint32_t val)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * Note: L2CTLR can only be written when the L2 memory system
79*4882a593Smuzhiyun * is idle, ie before the MMU is enabled.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun asm volatile("mcr p15, 1, %0, c9, c0, 2" : : "r" (val) : "memory");
82*4882a593Smuzhiyun isb();
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * Workaround for ARM errata # 798870
87*4882a593Smuzhiyun * Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been
88*4882a593Smuzhiyun * stalled for 1024 cycles to verify that its hazard condition still exists.
89*4882a593Smuzhiyun */
v7_enable_l2_hazard_detect(void)90*4882a593Smuzhiyun static inline void v7_enable_l2_hazard_detect(void)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun uint32_t val;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* L2ACTLR[7]: Enable hazard detect timeout */
95*4882a593Smuzhiyun asm volatile ("mrc p15, 1, %0, c15, c0, 0\n\t" : "=r"(val));
96*4882a593Smuzhiyun val |= (1 << 7);
97*4882a593Smuzhiyun asm volatile ("mcr p15, 1, %0, c15, c0, 0\n\t" : : "r"(val));
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * Workaround for ARM errata # 799270
102*4882a593Smuzhiyun * Ensure that the L2 logic has been used within the previous 256 cycles
103*4882a593Smuzhiyun * before modifying the ACTLR.SMP bit. This is required during boot before
104*4882a593Smuzhiyun * MMU has been enabled, or during a specified reset or power down sequence.
105*4882a593Smuzhiyun */
v7_enable_smp(uint32_t address)106*4882a593Smuzhiyun static inline void v7_enable_smp(uint32_t address)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun uint32_t temp, val;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Read auxiliary control register */
111*4882a593Smuzhiyun asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t" : "=r"(val));
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Enable SMP */
114*4882a593Smuzhiyun val |= (1 << 6);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Dummy read to assure L2 access */
117*4882a593Smuzhiyun temp = readl(address);
118*4882a593Smuzhiyun temp &= 0;
119*4882a593Smuzhiyun val |= temp;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Write auxiliary control register */
122*4882a593Smuzhiyun asm volatile ("mcr p15, 0, %0, c1, c0, 1\n\t" : : "r"(val));
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun CP15DSB;
125*4882a593Smuzhiyun CP15ISB;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun void v7_en_l2_hazard_detect(void);
129*4882a593Smuzhiyun void v7_outer_cache_enable(void);
130*4882a593Smuzhiyun void v7_outer_cache_disable(void);
131*4882a593Smuzhiyun void v7_outer_cache_flush_all(void);
132*4882a593Smuzhiyun void v7_outer_cache_inval_all(void);
133*4882a593Smuzhiyun void v7_outer_cache_flush_range(u32 start, u32 end);
134*4882a593Smuzhiyun void v7_outer_cache_inval_range(u32 start, u32 end);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun #ifdef CONFIG_ARMV7_NONSEC
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun int armv7_init_nonsec(void);
139*4882a593Smuzhiyun int armv7_apply_memory_carveout(u64 *start, u64 *size);
140*4882a593Smuzhiyun bool armv7_boot_nonsec(void);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* defined in assembly file */
143*4882a593Smuzhiyun unsigned int _nonsec_init(void);
144*4882a593Smuzhiyun void _do_nonsec_entry(void *target_pc, unsigned long r0,
145*4882a593Smuzhiyun unsigned long r1, unsigned long r2);
146*4882a593Smuzhiyun void _smp_pen(void);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun extern char __secure_start[];
149*4882a593Smuzhiyun extern char __secure_end[];
150*4882a593Smuzhiyun extern char __secure_stack_start[];
151*4882a593Smuzhiyun extern char __secure_stack_end[];
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun #endif /* CONFIG_ARMV7_NONSEC */
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun void v7_arch_cp15_set_l2aux_ctrl(u32 l2auxctrl, u32 cpu_midr,
156*4882a593Smuzhiyun u32 cpu_rev_comb, u32 cpu_variant,
157*4882a593Smuzhiyun u32 cpu_rev);
158*4882a593Smuzhiyun void v7_arch_cp15_set_acr(u32 acr, u32 cpu_midr, u32 cpu_rev_comb,
159*4882a593Smuzhiyun u32 cpu_variant, u32 cpu_rev);
160*4882a593Smuzhiyun #endif /* ! __ASSEMBLY__ */
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun #endif
163