xref: /rk3399_ARM-atf/plat/nxp/common/setup/ls_common.c (revision b53c2c5f2dfbf846e7c11e3bbfa9d4c4a3c3d8e8)
1*b53c2c5fSPankaj Gupta /*
2*b53c2c5fSPankaj Gupta  * Copyright 2018-2020 NXP
3*b53c2c5fSPankaj Gupta  *
4*b53c2c5fSPankaj Gupta  * SPDX-License-Identifier: BSD-3-Clause
5*b53c2c5fSPankaj Gupta  *
6*b53c2c5fSPankaj Gupta  */
7*b53c2c5fSPankaj Gupta 
8*b53c2c5fSPankaj Gupta #include <assert.h>
9*b53c2c5fSPankaj Gupta 
10*b53c2c5fSPankaj Gupta #include <arch.h>
11*b53c2c5fSPankaj Gupta #include <arch_helpers.h>
12*b53c2c5fSPankaj Gupta #include <common/debug.h>
13*b53c2c5fSPankaj Gupta #include <lib/mmio.h>
14*b53c2c5fSPankaj Gupta #include <lib/xlat_tables/xlat_tables_v2.h>
15*b53c2c5fSPankaj Gupta #include <mmu_def.h>
16*b53c2c5fSPankaj Gupta #include <plat/common/platform.h>
17*b53c2c5fSPankaj Gupta 
18*b53c2c5fSPankaj Gupta #include "plat_common.h"
19*b53c2c5fSPankaj Gupta #include "platform_def.h"
20*b53c2c5fSPankaj Gupta 
21*b53c2c5fSPankaj Gupta const mmap_region_t *plat_ls_get_mmap(void);
22*b53c2c5fSPankaj Gupta 
23*b53c2c5fSPankaj Gupta /*
24*b53c2c5fSPankaj Gupta  * Table of memory regions for various BL stages to map using the MMU.
25*b53c2c5fSPankaj Gupta  * This doesn't include Trusted SRAM as arm_setup_page_tables() already
26*b53c2c5fSPankaj Gupta  * takes care of mapping it.
27*b53c2c5fSPankaj Gupta  *
28*b53c2c5fSPankaj Gupta  * The flash needs to be mapped as writable in order to erase the FIP's Table of
29*b53c2c5fSPankaj Gupta  * Contents in case of unrecoverable error (see plat_error_handler()).
30*b53c2c5fSPankaj Gupta  */
31*b53c2c5fSPankaj Gupta #ifdef IMAGE_BL2
32*b53c2c5fSPankaj Gupta const mmap_region_t plat_ls_mmap[] = {
33*b53c2c5fSPankaj Gupta 	LS_MAP_CCSR,
34*b53c2c5fSPankaj Gupta 	{0}
35*b53c2c5fSPankaj Gupta };
36*b53c2c5fSPankaj Gupta #endif
37*b53c2c5fSPankaj Gupta 
38*b53c2c5fSPankaj Gupta #ifdef IMAGE_BL31
39*b53c2c5fSPankaj Gupta const mmap_region_t plat_ls_mmap[] = {
40*b53c2c5fSPankaj Gupta 	LS_MAP_CCSR,
41*b53c2c5fSPankaj Gupta #ifdef NXP_DCSR_ADDR
42*b53c2c5fSPankaj Gupta 	LS_MAP_DCSR,
43*b53c2c5fSPankaj Gupta #endif
44*b53c2c5fSPankaj Gupta 	LS_MAP_OCRAM,
45*b53c2c5fSPankaj Gupta 	{0}
46*b53c2c5fSPankaj Gupta };
47*b53c2c5fSPankaj Gupta #endif
48*b53c2c5fSPankaj Gupta #ifdef IMAGE_BL32
49*b53c2c5fSPankaj Gupta const mmap_region_t plat_ls_mmap[] = {
50*b53c2c5fSPankaj Gupta 	LS_MAP_CCSR,
51*b53c2c5fSPankaj Gupta 	LS_MAP_BL32_SEC_MEM,
52*b53c2c5fSPankaj Gupta 	{0}
53*b53c2c5fSPankaj Gupta };
54*b53c2c5fSPankaj Gupta #endif
55*b53c2c5fSPankaj Gupta 
56*b53c2c5fSPankaj Gupta /* Weak definitions may be overridden in specific NXP SoC */
57*b53c2c5fSPankaj Gupta #pragma weak plat_get_ns_image_entrypoint
58*b53c2c5fSPankaj Gupta #pragma weak plat_ls_get_mmap
59*b53c2c5fSPankaj Gupta 
60*b53c2c5fSPankaj Gupta #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
61*b53c2c5fSPankaj Gupta static void mmap_add_ddr_regions_statically(void)
62*b53c2c5fSPankaj Gupta {
63*b53c2c5fSPankaj Gupta 	int i = 0;
64*b53c2c5fSPankaj Gupta 	dram_regions_info_t *info_dram_regions = get_dram_regions_info();
65*b53c2c5fSPankaj Gupta 	/* MMU map for Non-Secure DRAM Regions */
66*b53c2c5fSPankaj Gupta 	VERBOSE("DRAM Region %d: %p - %p\n", i,
67*b53c2c5fSPankaj Gupta 			(void *) info_dram_regions->region[i].addr,
68*b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
69*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
70*b53c2c5fSPankaj Gupta 				- 1));
71*b53c2c5fSPankaj Gupta 	mmap_add_region(info_dram_regions->region[i].addr,
72*b53c2c5fSPankaj Gupta 			info_dram_regions->region[i].addr,
73*b53c2c5fSPankaj Gupta 			info_dram_regions->region[i].size,
74*b53c2c5fSPankaj Gupta 			MT_MEMORY | MT_RW | MT_NS);
75*b53c2c5fSPankaj Gupta 
76*b53c2c5fSPankaj Gupta 	/* MMU map for Secure DDR Region on DRAM-0 */
77*b53c2c5fSPankaj Gupta 	if (info_dram_regions->region[i].size >
78*b53c2c5fSPankaj Gupta 		(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
79*b53c2c5fSPankaj Gupta 		VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
80*b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
81*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
82*b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
83*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
84*b53c2c5fSPankaj Gupta 				+ NXP_SECURE_DRAM_SIZE
85*b53c2c5fSPankaj Gupta 				+ NXP_SP_SHRD_DRAM_SIZE
86*b53c2c5fSPankaj Gupta 				- 1));
87*b53c2c5fSPankaj Gupta 		mmap_add_region((info_dram_regions->region[i].addr
88*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
89*b53c2c5fSPankaj Gupta 				(info_dram_regions->region[i].addr
90*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
91*b53c2c5fSPankaj Gupta 				(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
92*b53c2c5fSPankaj Gupta 				MT_MEMORY | MT_RW | MT_SECURE);
93*b53c2c5fSPankaj Gupta 	}
94*b53c2c5fSPankaj Gupta 
95*b53c2c5fSPankaj Gupta #ifdef IMAGE_BL31
96*b53c2c5fSPankaj Gupta 	for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
97*b53c2c5fSPankaj Gupta 		if (info_dram_regions->region[i].size == 0)
98*b53c2c5fSPankaj Gupta 			break;
99*b53c2c5fSPankaj Gupta 		VERBOSE("DRAM Region %d: %p - %p\n", i,
100*b53c2c5fSPankaj Gupta 			(void *) info_dram_regions->region[i].addr,
101*b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
102*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
103*b53c2c5fSPankaj Gupta 				- 1));
104*b53c2c5fSPankaj Gupta 		mmap_add_region(info_dram_regions->region[i].addr,
105*b53c2c5fSPankaj Gupta 				info_dram_regions->region[i].addr,
106*b53c2c5fSPankaj Gupta 				info_dram_regions->region[i].size,
107*b53c2c5fSPankaj Gupta 				MT_MEMORY | MT_RW | MT_NS);
108*b53c2c5fSPankaj Gupta 	}
109*b53c2c5fSPankaj Gupta #endif
110*b53c2c5fSPankaj Gupta }
111*b53c2c5fSPankaj Gupta #endif
112*b53c2c5fSPankaj Gupta 
113*b53c2c5fSPankaj Gupta #if defined(PLAT_XLAT_TABLES_DYNAMIC)
114*b53c2c5fSPankaj Gupta void mmap_add_ddr_region_dynamically(void)
115*b53c2c5fSPankaj Gupta {
116*b53c2c5fSPankaj Gupta 	int i = 0;
117*b53c2c5fSPankaj Gupta 	dram_regions_info_t *info_dram_regions = get_dram_regions_info();
118*b53c2c5fSPankaj Gupta 	/* MMU map for Non-Secure DRAM Regions */
119*b53c2c5fSPankaj Gupta 	VERBOSE("DRAM Region %d: %p - %p\n", i,
120*b53c2c5fSPankaj Gupta 			(void *) info_dram_regions->region[i].addr,
121*b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
122*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
123*b53c2c5fSPankaj Gupta 				- 1));
124*b53c2c5fSPankaj Gupta 	mmap_add_dynamic_region(info_dram_regions->region[i].addr,
125*b53c2c5fSPankaj Gupta 			info_dram_regions->region[i].addr,
126*b53c2c5fSPankaj Gupta 			info_dram_regions->region[i].size,
127*b53c2c5fSPankaj Gupta 			MT_MEMORY | MT_RW | MT_NS);
128*b53c2c5fSPankaj Gupta 
129*b53c2c5fSPankaj Gupta 	/* MMU map for Secure DDR Region on DRAM-0 */
130*b53c2c5fSPankaj Gupta 	if (info_dram_regions->region[i].size >
131*b53c2c5fSPankaj Gupta 		(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
132*b53c2c5fSPankaj Gupta 		VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
133*b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
134*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
135*b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
136*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
137*b53c2c5fSPankaj Gupta 				+ NXP_SECURE_DRAM_SIZE
138*b53c2c5fSPankaj Gupta 				+ NXP_SP_SHRD_DRAM_SIZE
139*b53c2c5fSPankaj Gupta 				- 1));
140*b53c2c5fSPankaj Gupta 		mmap_add_dynamic_region((info_dram_regions->region[i].addr
141*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
142*b53c2c5fSPankaj Gupta 				(info_dram_regions->region[i].addr
143*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
144*b53c2c5fSPankaj Gupta 				(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
145*b53c2c5fSPankaj Gupta 				MT_MEMORY | MT_RW | MT_SECURE);
146*b53c2c5fSPankaj Gupta 	}
147*b53c2c5fSPankaj Gupta 
148*b53c2c5fSPankaj Gupta #ifdef IMAGE_BL31
149*b53c2c5fSPankaj Gupta 	for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
150*b53c2c5fSPankaj Gupta 		if (info_dram_regions->region[i].size == 0) {
151*b53c2c5fSPankaj Gupta 			break;
152*b53c2c5fSPankaj Gupta 		}
153*b53c2c5fSPankaj Gupta 		VERBOSE("DRAM Region %d: %p - %p\n", i,
154*b53c2c5fSPankaj Gupta 			(void *) info_dram_regions->region[i].addr,
155*b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
156*b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
157*b53c2c5fSPankaj Gupta 				- 1));
158*b53c2c5fSPankaj Gupta 		mmap_add_dynamic_region(info_dram_regions->region[i].addr,
159*b53c2c5fSPankaj Gupta 				info_dram_regions->region[i].addr,
160*b53c2c5fSPankaj Gupta 				info_dram_regions->region[i].size,
161*b53c2c5fSPankaj Gupta 				MT_MEMORY | MT_RW | MT_NS);
162*b53c2c5fSPankaj Gupta 	}
163*b53c2c5fSPankaj Gupta #endif
164*b53c2c5fSPankaj Gupta }
165*b53c2c5fSPankaj Gupta #endif
166*b53c2c5fSPankaj Gupta 
167*b53c2c5fSPankaj Gupta /*
168*b53c2c5fSPankaj Gupta  * Set up the page tables for the generic and platform-specific memory regions.
169*b53c2c5fSPankaj Gupta  * The extents of the generic memory regions are specified by the function
170*b53c2c5fSPankaj Gupta  * arguments and consist of:
171*b53c2c5fSPankaj Gupta  * - Trusted SRAM seen by the BL image;
172*b53c2c5fSPankaj Gupta  * - Code section;
173*b53c2c5fSPankaj Gupta  * - Read-only data section;
174*b53c2c5fSPankaj Gupta  * - Coherent memory region, if applicable.
175*b53c2c5fSPankaj Gupta  */
176*b53c2c5fSPankaj Gupta void ls_setup_page_tables(uintptr_t total_base,
177*b53c2c5fSPankaj Gupta 			   size_t total_size,
178*b53c2c5fSPankaj Gupta 			   uintptr_t code_start,
179*b53c2c5fSPankaj Gupta 			   uintptr_t code_limit,
180*b53c2c5fSPankaj Gupta 			   uintptr_t rodata_start,
181*b53c2c5fSPankaj Gupta 			   uintptr_t rodata_limit
182*b53c2c5fSPankaj Gupta #if USE_COHERENT_MEM
183*b53c2c5fSPankaj Gupta 			   ,
184*b53c2c5fSPankaj Gupta 			   uintptr_t coh_start,
185*b53c2c5fSPankaj Gupta 			   uintptr_t coh_limit
186*b53c2c5fSPankaj Gupta #endif
187*b53c2c5fSPankaj Gupta 			   )
188*b53c2c5fSPankaj Gupta {
189*b53c2c5fSPankaj Gupta 	/*
190*b53c2c5fSPankaj Gupta 	 * Map the Trusted SRAM with appropriate memory attributes.
191*b53c2c5fSPankaj Gupta 	 * Subsequent mappings will adjust the attributes for specific regions.
192*b53c2c5fSPankaj Gupta 	 */
193*b53c2c5fSPankaj Gupta 	VERBOSE("Memory seen by this BL image: %p - %p\n",
194*b53c2c5fSPankaj Gupta 		(void *) total_base, (void *) (total_base + total_size));
195*b53c2c5fSPankaj Gupta 	mmap_add_region(total_base, total_base,
196*b53c2c5fSPankaj Gupta 			total_size,
197*b53c2c5fSPankaj Gupta 			MT_MEMORY | MT_RW | MT_SECURE);
198*b53c2c5fSPankaj Gupta 
199*b53c2c5fSPankaj Gupta 	/* Re-map the code section */
200*b53c2c5fSPankaj Gupta 	VERBOSE("Code region: %p - %p\n",
201*b53c2c5fSPankaj Gupta 		(void *) code_start, (void *) code_limit);
202*b53c2c5fSPankaj Gupta 	mmap_add_region(code_start, code_start,
203*b53c2c5fSPankaj Gupta 			code_limit - code_start,
204*b53c2c5fSPankaj Gupta 			MT_CODE | MT_SECURE);
205*b53c2c5fSPankaj Gupta 
206*b53c2c5fSPankaj Gupta 	/* Re-map the read-only data section */
207*b53c2c5fSPankaj Gupta 	VERBOSE("Read-only data region: %p - %p\n",
208*b53c2c5fSPankaj Gupta 		(void *) rodata_start, (void *) rodata_limit);
209*b53c2c5fSPankaj Gupta 	mmap_add_region(rodata_start, rodata_start,
210*b53c2c5fSPankaj Gupta 			rodata_limit - rodata_start,
211*b53c2c5fSPankaj Gupta 			MT_RO_DATA | MT_SECURE);
212*b53c2c5fSPankaj Gupta 
213*b53c2c5fSPankaj Gupta #if USE_COHERENT_MEM
214*b53c2c5fSPankaj Gupta 	/* Re-map the coherent memory region */
215*b53c2c5fSPankaj Gupta 	VERBOSE("Coherent region: %p - %p\n",
216*b53c2c5fSPankaj Gupta 		(void *) coh_start, (void *) coh_limit);
217*b53c2c5fSPankaj Gupta 	mmap_add_region(coh_start, coh_start,
218*b53c2c5fSPankaj Gupta 			coh_limit - coh_start,
219*b53c2c5fSPankaj Gupta 			MT_DEVICE | MT_RW | MT_SECURE);
220*b53c2c5fSPankaj Gupta #endif
221*b53c2c5fSPankaj Gupta 
222*b53c2c5fSPankaj Gupta 	/* Now (re-)map the platform-specific memory regions */
223*b53c2c5fSPankaj Gupta 	mmap_add(plat_ls_get_mmap());
224*b53c2c5fSPankaj Gupta 
225*b53c2c5fSPankaj Gupta 
226*b53c2c5fSPankaj Gupta #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
227*b53c2c5fSPankaj Gupta 	mmap_add_ddr_regions_statically();
228*b53c2c5fSPankaj Gupta #endif
229*b53c2c5fSPankaj Gupta 
230*b53c2c5fSPankaj Gupta 	/* Create the page tables to reflect the above mappings */
231*b53c2c5fSPankaj Gupta 	init_xlat_tables();
232*b53c2c5fSPankaj Gupta }
233*b53c2c5fSPankaj Gupta 
234*b53c2c5fSPankaj Gupta /*******************************************************************************
235*b53c2c5fSPankaj Gupta  * Returns NXP platform specific memory map regions.
236*b53c2c5fSPankaj Gupta  ******************************************************************************/
237*b53c2c5fSPankaj Gupta const mmap_region_t *plat_ls_get_mmap(void)
238*b53c2c5fSPankaj Gupta {
239*b53c2c5fSPankaj Gupta 	return plat_ls_mmap;
240*b53c2c5fSPankaj Gupta }
241