1b2bca61dSSoby Mathew /* 2b2bca61dSSoby Mathew * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3b2bca61dSSoby Mathew * 4b2bca61dSSoby Mathew * Redistribution and use in source and binary forms, with or without 5b2bca61dSSoby Mathew * modification, are permitted provided that the following conditions are met: 6b2bca61dSSoby Mathew * 7b2bca61dSSoby Mathew * Redistributions of source code must retain the above copyright notice, this 8b2bca61dSSoby Mathew * list of conditions and the following disclaimer. 9b2bca61dSSoby Mathew * 10b2bca61dSSoby Mathew * Redistributions in binary form must reproduce the above copyright notice, 11b2bca61dSSoby Mathew * this list of conditions and the following disclaimer in the documentation 12b2bca61dSSoby Mathew * and/or other materials provided with the distribution. 13b2bca61dSSoby Mathew * 14b2bca61dSSoby Mathew * Neither the name of ARM nor the names of its contributors may be used 15b2bca61dSSoby Mathew * to endorse or promote products derived from this software without specific 16b2bca61dSSoby Mathew * prior written permission. 17b2bca61dSSoby Mathew * 18b2bca61dSSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19b2bca61dSSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20b2bca61dSSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21b2bca61dSSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22b2bca61dSSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23b2bca61dSSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24b2bca61dSSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25b2bca61dSSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26b2bca61dSSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27b2bca61dSSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28b2bca61dSSoby Mathew * POSSIBILITY OF SUCH DAMAGE. 29b2bca61dSSoby Mathew */ 30b2bca61dSSoby Mathew 31b2bca61dSSoby Mathew #include <arch.h> 32b2bca61dSSoby Mathew #include <arch_helpers.h> 33b2bca61dSSoby Mathew #include <assert.h> 34b2bca61dSSoby Mathew #include <cassert.h> 35b2bca61dSSoby Mathew #include <platform_def.h> 36b2bca61dSSoby Mathew #include <utils.h> 37b2bca61dSSoby Mathew #include <xlat_tables.h> 38b2bca61dSSoby Mathew #include "../xlat_tables_private.h" 39b2bca61dSSoby Mathew 40b2bca61dSSoby Mathew /* 41*e8719552SAntonio Nino Diaz * Each platform can define the size of the virtual address space, which is 42*e8719552SAntonio Nino Diaz * defined in ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus the width 43*e8719552SAntonio Nino Diaz * of said address space. The value of TTBCR.TxSZ must be in the range 0 to 44*e8719552SAntonio Nino Diaz * 7 [1], which means that the virtual address space width must be in the range 45*e8719552SAntonio Nino Diaz * 32 to 25 bits. 46*e8719552SAntonio Nino Diaz * 47*e8719552SAntonio Nino Diaz * Here we calculate the initial lookup level from the value of ADDR_SPACE_SIZE. 48*e8719552SAntonio Nino Diaz * For a 4 KB page size, level 1 supports virtual address spaces of widths 32 49*e8719552SAntonio Nino Diaz * to 31 bits, and level 2 from 30 to 25. Wider or narrower address spaces are 50*e8719552SAntonio Nino Diaz * not supported. As a result, level 3 cannot be used as initial lookup level 51*e8719552SAntonio Nino Diaz * with 4 KB granularity [1]. 52*e8719552SAntonio Nino Diaz * 53*e8719552SAntonio Nino Diaz * For example, for a 31-bit address space (i.e. ADDR_SPACE_SIZE == 1 << 31), 54*e8719552SAntonio Nino Diaz * TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table G4-5 in 55*e8719552SAntonio Nino Diaz * the ARM ARM, the initial lookup level for such an address space is 1. 56*e8719552SAntonio Nino Diaz * 57*e8719552SAntonio Nino Diaz * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more 58*e8719552SAntonio Nino Diaz * information: 59*e8719552SAntonio Nino Diaz * [1] Section G4.6.5 60b2bca61dSSoby Mathew */ 61b2bca61dSSoby Mathew 62*e8719552SAntonio Nino Diaz #if ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN)) 63b2bca61dSSoby Mathew 64*e8719552SAntonio Nino Diaz # error "ADDR_SPACE_SIZE is too big." 65*e8719552SAntonio Nino Diaz 66*e8719552SAntonio Nino Diaz #elif ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT) 67*e8719552SAntonio Nino Diaz 68*e8719552SAntonio Nino Diaz # define XLAT_TABLE_LEVEL_BASE 1 69*e8719552SAntonio Nino Diaz # define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT) 70*e8719552SAntonio Nino Diaz 71*e8719552SAntonio Nino Diaz #elif ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX)) 72*e8719552SAntonio Nino Diaz 73*e8719552SAntonio Nino Diaz # define XLAT_TABLE_LEVEL_BASE 2 74*e8719552SAntonio Nino Diaz # define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT) 75*e8719552SAntonio Nino Diaz 76*e8719552SAntonio Nino Diaz #else 77*e8719552SAntonio Nino Diaz 78*e8719552SAntonio Nino Diaz # error "ADDR_SPACE_SIZE is too small." 79*e8719552SAntonio Nino Diaz 80*e8719552SAntonio Nino Diaz #endif 81*e8719552SAntonio Nino Diaz 82*e8719552SAntonio Nino Diaz static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES] 83*e8719552SAntonio Nino Diaz __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t)); 84b2bca61dSSoby Mathew 85b2bca61dSSoby Mathew void init_xlat_tables(void) 86b2bca61dSSoby Mathew { 87b2bca61dSSoby Mathew unsigned long long max_pa; 88b2bca61dSSoby Mathew uintptr_t max_va; 89b2bca61dSSoby Mathew print_mmap(); 90*e8719552SAntonio Nino Diaz init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE, 91*e8719552SAntonio Nino Diaz &max_va, &max_pa); 92b2bca61dSSoby Mathew assert(max_va < ADDR_SPACE_SIZE); 93b2bca61dSSoby Mathew } 94b2bca61dSSoby Mathew 95b2bca61dSSoby Mathew /******************************************************************************* 96b2bca61dSSoby Mathew * Function for enabling the MMU in Secure PL1, assuming that the 97b2bca61dSSoby Mathew * page-tables have already been created. 98b2bca61dSSoby Mathew ******************************************************************************/ 99b2bca61dSSoby Mathew void enable_mmu_secure(unsigned int flags) 100b2bca61dSSoby Mathew { 101b2bca61dSSoby Mathew unsigned int mair0, ttbcr, sctlr; 102b2bca61dSSoby Mathew uint64_t ttbr0; 103b2bca61dSSoby Mathew 104b2bca61dSSoby Mathew assert(IS_IN_SECURE()); 105b2bca61dSSoby Mathew assert((read_sctlr() & SCTLR_M_BIT) == 0); 106b2bca61dSSoby Mathew 107b2bca61dSSoby Mathew /* Set attributes in the right indices of the MAIR */ 108b2bca61dSSoby Mathew mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); 109b2bca61dSSoby Mathew mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, 110b2bca61dSSoby Mathew ATTR_IWBWA_OWBWA_NTR_INDEX); 111b2bca61dSSoby Mathew mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE, 112b2bca61dSSoby Mathew ATTR_NON_CACHEABLE_INDEX); 113b2bca61dSSoby Mathew write_mair0(mair0); 114b2bca61dSSoby Mathew 115b2bca61dSSoby Mathew /* Invalidate TLBs at the current exception level */ 116b2bca61dSSoby Mathew tlbiall(); 117b2bca61dSSoby Mathew 118b2bca61dSSoby Mathew /* 119b2bca61dSSoby Mathew * Set TTBCR bits as well. Set TTBR0 table properties as Inner 120b2bca61dSSoby Mathew * & outer WBWA & shareable. Disable TTBR1. 121b2bca61dSSoby Mathew */ 122b2bca61dSSoby Mathew ttbcr = TTBCR_EAE_BIT | 123b2bca61dSSoby Mathew TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | 124b2bca61dSSoby Mathew TTBCR_RGN0_INNER_WBA | 125b2bca61dSSoby Mathew (32 - __builtin_ctzl((uintptr_t)ADDR_SPACE_SIZE)); 126b2bca61dSSoby Mathew ttbcr |= TTBCR_EPD1_BIT; 127b2bca61dSSoby Mathew write_ttbcr(ttbcr); 128b2bca61dSSoby Mathew 129b2bca61dSSoby Mathew /* Set TTBR0 bits as well */ 130*e8719552SAntonio Nino Diaz ttbr0 = (uintptr_t) base_xlation_table; 131b2bca61dSSoby Mathew write64_ttbr0(ttbr0); 132b2bca61dSSoby Mathew write64_ttbr1(0); 133b2bca61dSSoby Mathew 134b2bca61dSSoby Mathew /* 135b2bca61dSSoby Mathew * Ensure all translation table writes have drained 136b2bca61dSSoby Mathew * into memory, the TLB invalidation is complete, 137b2bca61dSSoby Mathew * and translation register writes are committed 138b2bca61dSSoby Mathew * before enabling the MMU 139b2bca61dSSoby Mathew */ 140b2bca61dSSoby Mathew dsb(); 141b2bca61dSSoby Mathew isb(); 142b2bca61dSSoby Mathew 143b2bca61dSSoby Mathew sctlr = read_sctlr(); 144b2bca61dSSoby Mathew sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; 145b2bca61dSSoby Mathew 146b2bca61dSSoby Mathew if (flags & DISABLE_DCACHE) 147b2bca61dSSoby Mathew sctlr &= ~SCTLR_C_BIT; 148b2bca61dSSoby Mathew else 149b2bca61dSSoby Mathew sctlr |= SCTLR_C_BIT; 150b2bca61dSSoby Mathew 151b2bca61dSSoby Mathew write_sctlr(sctlr); 152b2bca61dSSoby Mathew 153b2bca61dSSoby Mathew /* Ensure the MMU enable takes effect immediately */ 154b2bca61dSSoby Mathew isb(); 155b2bca61dSSoby Mathew } 156