1*b2bca61dSSoby Mathew /* 2*b2bca61dSSoby Mathew * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3*b2bca61dSSoby Mathew * 4*b2bca61dSSoby Mathew * Redistribution and use in source and binary forms, with or without 5*b2bca61dSSoby Mathew * modification, are permitted provided that the following conditions are met: 6*b2bca61dSSoby Mathew * 7*b2bca61dSSoby Mathew * Redistributions of source code must retain the above copyright notice, this 8*b2bca61dSSoby Mathew * list of conditions and the following disclaimer. 9*b2bca61dSSoby Mathew * 10*b2bca61dSSoby Mathew * Redistributions in binary form must reproduce the above copyright notice, 11*b2bca61dSSoby Mathew * this list of conditions and the following disclaimer in the documentation 12*b2bca61dSSoby Mathew * and/or other materials provided with the distribution. 13*b2bca61dSSoby Mathew * 14*b2bca61dSSoby Mathew * Neither the name of ARM nor the names of its contributors may be used 15*b2bca61dSSoby Mathew * to endorse or promote products derived from this software without specific 16*b2bca61dSSoby Mathew * prior written permission. 17*b2bca61dSSoby Mathew * 18*b2bca61dSSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19*b2bca61dSSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20*b2bca61dSSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21*b2bca61dSSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22*b2bca61dSSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23*b2bca61dSSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24*b2bca61dSSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25*b2bca61dSSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26*b2bca61dSSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27*b2bca61dSSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28*b2bca61dSSoby Mathew * POSSIBILITY OF SUCH DAMAGE. 29*b2bca61dSSoby Mathew */ 30*b2bca61dSSoby Mathew 31*b2bca61dSSoby Mathew #include <arch.h> 32*b2bca61dSSoby Mathew #include <arch_helpers.h> 33*b2bca61dSSoby Mathew #include <assert.h> 34*b2bca61dSSoby Mathew #include <cassert.h> 35*b2bca61dSSoby Mathew #include <platform_def.h> 36*b2bca61dSSoby Mathew #include <utils.h> 37*b2bca61dSSoby Mathew #include <xlat_tables.h> 38*b2bca61dSSoby Mathew #include "../xlat_tables_private.h" 39*b2bca61dSSoby Mathew 40*b2bca61dSSoby Mathew /* 41*b2bca61dSSoby Mathew * The virtual address space size must be a power of two. As we start the initial 42*b2bca61dSSoby Mathew * lookup at level 1, it must also be between 2 GB and 4 GB. See section 43*b2bca61dSSoby Mathew * G4.6.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more 44*b2bca61dSSoby Mathew * information. 45*b2bca61dSSoby Mathew */ 46*b2bca61dSSoby Mathew CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 32) && 47*b2bca61dSSoby Mathew IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size); 48*b2bca61dSSoby Mathew 49*b2bca61dSSoby Mathew #define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT) 50*b2bca61dSSoby Mathew 51*b2bca61dSSoby Mathew static uint64_t l1_xlation_table[NUM_L1_ENTRIES] 52*b2bca61dSSoby Mathew __aligned(NUM_L1_ENTRIES * sizeof(uint64_t)); 53*b2bca61dSSoby Mathew 54*b2bca61dSSoby Mathew void init_xlat_tables(void) 55*b2bca61dSSoby Mathew { 56*b2bca61dSSoby Mathew unsigned long long max_pa; 57*b2bca61dSSoby Mathew uintptr_t max_va; 58*b2bca61dSSoby Mathew print_mmap(); 59*b2bca61dSSoby Mathew init_xlation_table(0, l1_xlation_table, 1, &max_va, &max_pa); 60*b2bca61dSSoby Mathew assert(max_va < ADDR_SPACE_SIZE); 61*b2bca61dSSoby Mathew } 62*b2bca61dSSoby Mathew 63*b2bca61dSSoby Mathew /******************************************************************************* 64*b2bca61dSSoby Mathew * Function for enabling the MMU in Secure PL1, assuming that the 65*b2bca61dSSoby Mathew * page-tables have already been created. 66*b2bca61dSSoby Mathew ******************************************************************************/ 67*b2bca61dSSoby Mathew void enable_mmu_secure(unsigned int flags) 68*b2bca61dSSoby Mathew { 69*b2bca61dSSoby Mathew unsigned int mair0, ttbcr, sctlr; 70*b2bca61dSSoby Mathew uint64_t ttbr0; 71*b2bca61dSSoby Mathew 72*b2bca61dSSoby Mathew assert(IS_IN_SECURE()); 73*b2bca61dSSoby Mathew assert((read_sctlr() & SCTLR_M_BIT) == 0); 74*b2bca61dSSoby Mathew 75*b2bca61dSSoby Mathew /* Set attributes in the right indices of the MAIR */ 76*b2bca61dSSoby Mathew mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); 77*b2bca61dSSoby Mathew mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, 78*b2bca61dSSoby Mathew ATTR_IWBWA_OWBWA_NTR_INDEX); 79*b2bca61dSSoby Mathew mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE, 80*b2bca61dSSoby Mathew ATTR_NON_CACHEABLE_INDEX); 81*b2bca61dSSoby Mathew write_mair0(mair0); 82*b2bca61dSSoby Mathew 83*b2bca61dSSoby Mathew /* Invalidate TLBs at the current exception level */ 84*b2bca61dSSoby Mathew tlbiall(); 85*b2bca61dSSoby Mathew 86*b2bca61dSSoby Mathew /* 87*b2bca61dSSoby Mathew * Set TTBCR bits as well. Set TTBR0 table properties as Inner 88*b2bca61dSSoby Mathew * & outer WBWA & shareable. Disable TTBR1. 89*b2bca61dSSoby Mathew */ 90*b2bca61dSSoby Mathew ttbcr = TTBCR_EAE_BIT | 91*b2bca61dSSoby Mathew TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | 92*b2bca61dSSoby Mathew TTBCR_RGN0_INNER_WBA | 93*b2bca61dSSoby Mathew (32 - __builtin_ctzl((uintptr_t)ADDR_SPACE_SIZE)); 94*b2bca61dSSoby Mathew ttbcr |= TTBCR_EPD1_BIT; 95*b2bca61dSSoby Mathew write_ttbcr(ttbcr); 96*b2bca61dSSoby Mathew 97*b2bca61dSSoby Mathew /* Set TTBR0 bits as well */ 98*b2bca61dSSoby Mathew ttbr0 = (uintptr_t) l1_xlation_table; 99*b2bca61dSSoby Mathew write64_ttbr0(ttbr0); 100*b2bca61dSSoby Mathew write64_ttbr1(0); 101*b2bca61dSSoby Mathew 102*b2bca61dSSoby Mathew /* 103*b2bca61dSSoby Mathew * Ensure all translation table writes have drained 104*b2bca61dSSoby Mathew * into memory, the TLB invalidation is complete, 105*b2bca61dSSoby Mathew * and translation register writes are committed 106*b2bca61dSSoby Mathew * before enabling the MMU 107*b2bca61dSSoby Mathew */ 108*b2bca61dSSoby Mathew dsb(); 109*b2bca61dSSoby Mathew isb(); 110*b2bca61dSSoby Mathew 111*b2bca61dSSoby Mathew sctlr = read_sctlr(); 112*b2bca61dSSoby Mathew sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; 113*b2bca61dSSoby Mathew 114*b2bca61dSSoby Mathew if (flags & DISABLE_DCACHE) 115*b2bca61dSSoby Mathew sctlr &= ~SCTLR_C_BIT; 116*b2bca61dSSoby Mathew else 117*b2bca61dSSoby Mathew sctlr |= SCTLR_C_BIT; 118*b2bca61dSSoby Mathew 119*b2bca61dSSoby Mathew write_sctlr(sctlr); 120*b2bca61dSSoby Mathew 121*b2bca61dSSoby Mathew /* Ensure the MMU enable takes effect immediately */ 122*b2bca61dSSoby Mathew isb(); 123*b2bca61dSSoby Mathew } 124