xref: /rk3399_ARM-atf/lib/xlat_tables/aarch64/xlat_tables.c (revision ed81f3ebbfb5abc7d0d250fbc71f297a904d71ae)
13ca9928dSSoby Mathew /*
23ca9928dSSoby Mathew  * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
33ca9928dSSoby Mathew  *
43ca9928dSSoby Mathew  * Redistribution and use in source and binary forms, with or without
53ca9928dSSoby Mathew  * modification, are permitted provided that the following conditions are met:
63ca9928dSSoby Mathew  *
73ca9928dSSoby Mathew  * Redistributions of source code must retain the above copyright notice, this
83ca9928dSSoby Mathew  * list of conditions and the following disclaimer.
93ca9928dSSoby Mathew  *
103ca9928dSSoby Mathew  * Redistributions in binary form must reproduce the above copyright notice,
113ca9928dSSoby Mathew  * this list of conditions and the following disclaimer in the documentation
123ca9928dSSoby Mathew  * and/or other materials provided with the distribution.
133ca9928dSSoby Mathew  *
143ca9928dSSoby Mathew  * Neither the name of ARM nor the names of its contributors may be used
153ca9928dSSoby Mathew  * to endorse or promote products derived from this software without specific
163ca9928dSSoby Mathew  * prior written permission.
173ca9928dSSoby Mathew  *
183ca9928dSSoby Mathew  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
193ca9928dSSoby Mathew  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
203ca9928dSSoby Mathew  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
213ca9928dSSoby Mathew  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
223ca9928dSSoby Mathew  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
233ca9928dSSoby Mathew  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
243ca9928dSSoby Mathew  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
253ca9928dSSoby Mathew  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
263ca9928dSSoby Mathew  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
273ca9928dSSoby Mathew  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
283ca9928dSSoby Mathew  * POSSIBILITY OF SUCH DAMAGE.
293ca9928dSSoby Mathew  */
303ca9928dSSoby Mathew 
313ca9928dSSoby Mathew #include <arch.h>
323ca9928dSSoby Mathew #include <arch_helpers.h>
333ca9928dSSoby Mathew #include <assert.h>
343ca9928dSSoby Mathew #include <cassert.h>
353ca9928dSSoby Mathew #include <platform_def.h>
36*ed81f3ebSSandrine Bailleux #include <utils.h>
373ca9928dSSoby Mathew #include <xlat_tables.h>
383ca9928dSSoby Mathew #include "../xlat_tables_private.h"
393ca9928dSSoby Mathew 
403ca9928dSSoby Mathew /*
413ca9928dSSoby Mathew  * The virtual address space size must be a power of two (as set in TCR.T0SZ).
423ca9928dSSoby Mathew  * As we start the initial lookup at level 1, it must also be between 2 GB and
433ca9928dSSoby Mathew  * 512 GB (with the virtual address size therefore 31 to 39 bits). See section
443ca9928dSSoby Mathew  * D4.2.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.i) for more
453ca9928dSSoby Mathew  * information.
463ca9928dSSoby Mathew  */
473ca9928dSSoby Mathew CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 39) &&
483ca9928dSSoby Mathew 	IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size);
493ca9928dSSoby Mathew 
503ca9928dSSoby Mathew #define UNSET_DESC	~0ul
513ca9928dSSoby Mathew #define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
523ca9928dSSoby Mathew 
533ca9928dSSoby Mathew static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
543ca9928dSSoby Mathew 		__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
553ca9928dSSoby Mathew 
563ca9928dSSoby Mathew static unsigned long long tcr_ps_bits;
573ca9928dSSoby Mathew 
583ca9928dSSoby Mathew static unsigned long long calc_physical_addr_size_bits(
593ca9928dSSoby Mathew 					unsigned long long max_addr)
603ca9928dSSoby Mathew {
613ca9928dSSoby Mathew 	/* Physical address can't exceed 48 bits */
623ca9928dSSoby Mathew 	assert((max_addr & ADDR_MASK_48_TO_63) == 0);
633ca9928dSSoby Mathew 
643ca9928dSSoby Mathew 	/* 48 bits address */
653ca9928dSSoby Mathew 	if (max_addr & ADDR_MASK_44_TO_47)
663ca9928dSSoby Mathew 		return TCR_PS_BITS_256TB;
673ca9928dSSoby Mathew 
683ca9928dSSoby Mathew 	/* 44 bits address */
693ca9928dSSoby Mathew 	if (max_addr & ADDR_MASK_42_TO_43)
703ca9928dSSoby Mathew 		return TCR_PS_BITS_16TB;
713ca9928dSSoby Mathew 
723ca9928dSSoby Mathew 	/* 42 bits address */
733ca9928dSSoby Mathew 	if (max_addr & ADDR_MASK_40_TO_41)
743ca9928dSSoby Mathew 		return TCR_PS_BITS_4TB;
753ca9928dSSoby Mathew 
763ca9928dSSoby Mathew 	/* 40 bits address */
773ca9928dSSoby Mathew 	if (max_addr & ADDR_MASK_36_TO_39)
783ca9928dSSoby Mathew 		return TCR_PS_BITS_1TB;
793ca9928dSSoby Mathew 
803ca9928dSSoby Mathew 	/* 36 bits address */
813ca9928dSSoby Mathew 	if (max_addr & ADDR_MASK_32_TO_35)
823ca9928dSSoby Mathew 		return TCR_PS_BITS_64GB;
833ca9928dSSoby Mathew 
843ca9928dSSoby Mathew 	return TCR_PS_BITS_4GB;
853ca9928dSSoby Mathew }
863ca9928dSSoby Mathew 
873ca9928dSSoby Mathew void init_xlat_tables(void)
883ca9928dSSoby Mathew {
893ca9928dSSoby Mathew 	unsigned long long max_pa;
903ca9928dSSoby Mathew 	uintptr_t max_va;
913ca9928dSSoby Mathew 	print_mmap();
923ca9928dSSoby Mathew 	init_xlation_table(0, l1_xlation_table, 1, &max_va, &max_pa);
933ca9928dSSoby Mathew 	tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
943ca9928dSSoby Mathew 	assert(max_va < ADDR_SPACE_SIZE);
953ca9928dSSoby Mathew }
963ca9928dSSoby Mathew 
973ca9928dSSoby Mathew /*******************************************************************************
983ca9928dSSoby Mathew  * Macro generating the code for the function enabling the MMU in the given
993ca9928dSSoby Mathew  * exception level, assuming that the pagetables have already been created.
1003ca9928dSSoby Mathew  *
1013ca9928dSSoby Mathew  *   _el:		Exception level at which the function will run
1023ca9928dSSoby Mathew  *   _tcr_extra:	Extra bits to set in the TCR register. This mask will
1033ca9928dSSoby Mathew  *			be OR'ed with the default TCR value.
1043ca9928dSSoby Mathew  *   _tlbi_fct:		Function to invalidate the TLBs at the current
1053ca9928dSSoby Mathew  *			exception level
1063ca9928dSSoby Mathew  ******************************************************************************/
1073ca9928dSSoby Mathew #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct)		\
1083ca9928dSSoby Mathew 	void enable_mmu_el##_el(unsigned int flags)				\
1093ca9928dSSoby Mathew 	{								\
1103ca9928dSSoby Mathew 		uint64_t mair, tcr, ttbr;				\
1113ca9928dSSoby Mathew 		uint32_t sctlr;						\
1123ca9928dSSoby Mathew 									\
1133ca9928dSSoby Mathew 		assert(IS_IN_EL(_el));					\
1143ca9928dSSoby Mathew 		assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0);	\
1153ca9928dSSoby Mathew 									\
1163ca9928dSSoby Mathew 		/* Set attributes in the right indices of the MAIR */	\
1173ca9928dSSoby Mathew 		mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);	\
1183ca9928dSSoby Mathew 		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,		\
1193ca9928dSSoby Mathew 				ATTR_IWBWA_OWBWA_NTR_INDEX);		\
1203ca9928dSSoby Mathew 		mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE,		\
1213ca9928dSSoby Mathew 				ATTR_NON_CACHEABLE_INDEX);		\
1223ca9928dSSoby Mathew 		write_mair_el##_el(mair);				\
1233ca9928dSSoby Mathew 									\
1243ca9928dSSoby Mathew 		/* Invalidate TLBs at the current exception level */	\
1253ca9928dSSoby Mathew 		_tlbi_fct();						\
1263ca9928dSSoby Mathew 									\
1273ca9928dSSoby Mathew 		/* Set TCR bits as well. */				\
1283ca9928dSSoby Mathew 		/* Inner & outer WBWA & shareable + T0SZ = 32 */	\
1293ca9928dSSoby Mathew 		tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |	\
1303ca9928dSSoby Mathew 			TCR_RGN_INNER_WBA |				\
1313ca9928dSSoby Mathew 			(64 - __builtin_ctzl(ADDR_SPACE_SIZE));		\
1323ca9928dSSoby Mathew 		tcr |= _tcr_extra;					\
1333ca9928dSSoby Mathew 		write_tcr_el##_el(tcr);					\
1343ca9928dSSoby Mathew 									\
1353ca9928dSSoby Mathew 		/* Set TTBR bits as well */				\
1363ca9928dSSoby Mathew 		ttbr = (uint64_t) l1_xlation_table;			\
1373ca9928dSSoby Mathew 		write_ttbr0_el##_el(ttbr);				\
1383ca9928dSSoby Mathew 									\
1393ca9928dSSoby Mathew 		/* Ensure all translation table writes have drained */	\
1403ca9928dSSoby Mathew 		/* into memory, the TLB invalidation is complete, */	\
1413ca9928dSSoby Mathew 		/* and translation register writes are committed */	\
1423ca9928dSSoby Mathew 		/* before enabling the MMU */				\
1433ca9928dSSoby Mathew 		dsb();							\
1443ca9928dSSoby Mathew 		isb();							\
1453ca9928dSSoby Mathew 									\
1463ca9928dSSoby Mathew 		sctlr = read_sctlr_el##_el();				\
1473ca9928dSSoby Mathew 		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
1483ca9928dSSoby Mathew 									\
1493ca9928dSSoby Mathew 		if (flags & DISABLE_DCACHE)				\
1503ca9928dSSoby Mathew 			sctlr &= ~SCTLR_C_BIT;				\
1513ca9928dSSoby Mathew 		else							\
1523ca9928dSSoby Mathew 			sctlr |= SCTLR_C_BIT;				\
1533ca9928dSSoby Mathew 									\
1543ca9928dSSoby Mathew 		write_sctlr_el##_el(sctlr);				\
1553ca9928dSSoby Mathew 									\
1563ca9928dSSoby Mathew 		/* Ensure the MMU enable takes effect immediately */	\
1573ca9928dSSoby Mathew 		isb();							\
1583ca9928dSSoby Mathew 	}
1593ca9928dSSoby Mathew 
1603ca9928dSSoby Mathew /* Define EL1 and EL3 variants of the function enabling the MMU */
1613ca9928dSSoby Mathew DEFINE_ENABLE_MMU_EL(1,
1623ca9928dSSoby Mathew 		(tcr_ps_bits << TCR_EL1_IPS_SHIFT),
1633ca9928dSSoby Mathew 		tlbivmalle1)
1643ca9928dSSoby Mathew DEFINE_ENABLE_MMU_EL(3,
1653ca9928dSSoby Mathew 		TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
1663ca9928dSSoby Mathew 		tlbialle3)
167