10ae76531SDavid Feng /* 20ae76531SDavid Feng * (C) Copyright 2013 30ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn> 40ae76531SDavid Feng * 55e2ec773SAlexander Graf * (C) Copyright 2016 65e2ec773SAlexander Graf * Alexander Graf <agraf@suse.de> 75e2ec773SAlexander Graf * 80ae76531SDavid Feng * SPDX-License-Identifier: GPL-2.0+ 90ae76531SDavid Feng */ 100ae76531SDavid Feng 110ae76531SDavid Feng #include <common.h> 120ae76531SDavid Feng #include <asm/system.h> 130ae76531SDavid Feng #include <asm/armv8/mmu.h> 140ae76531SDavid Feng 150ae76531SDavid Feng DECLARE_GLOBAL_DATA_PTR; 160ae76531SDavid Feng 170ae76531SDavid Feng #ifndef CONFIG_SYS_DCACHE_OFF 1894f7ff36SSergey Temerkhanov 195e2ec773SAlexander Graf /* 205e2ec773SAlexander Graf * With 4k page granule, a virtual address is split into 4 lookup parts 215e2ec773SAlexander Graf * spanning 9 bits each: 225e2ec773SAlexander Graf * 235e2ec773SAlexander Graf * _______________________________________________ 245e2ec773SAlexander Graf * | | | | | | | 255e2ec773SAlexander Graf * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off | 265e2ec773SAlexander Graf * |_______|_______|_______|_______|_______|_______| 275e2ec773SAlexander Graf * 63-48 47-39 38-30 29-21 20-12 11-00 285e2ec773SAlexander Graf * 295e2ec773SAlexander Graf * mask page size 305e2ec773SAlexander Graf * 315e2ec773SAlexander Graf * Lv0: FF8000000000 -- 325e2ec773SAlexander Graf * Lv1: 7FC0000000 1G 335e2ec773SAlexander Graf * Lv2: 3FE00000 2M 345e2ec773SAlexander Graf * Lv3: 1FF000 4K 355e2ec773SAlexander Graf * off: FFF 365e2ec773SAlexander Graf */ 375e2ec773SAlexander Graf 38252cdb46SYork Sun u64 get_tcr(int el, u64 *pips, u64 *pva_bits) 390691484aSAlexander Graf { 400691484aSAlexander Graf u64 max_addr = 0; 410691484aSAlexander Graf u64 ips, va_bits; 420691484aSAlexander Graf u64 tcr; 430691484aSAlexander Graf int i; 440691484aSAlexander Graf 450691484aSAlexander Graf /* Find the largest address we need to support */ 46d473f0c6SAlexander Graf for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) 47cd4b0c5fSYork Sun max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size); 480691484aSAlexander Graf 490691484aSAlexander Graf /* Calculate the maximum physical (and thus virtual) address */ 500691484aSAlexander Graf if (max_addr > (1ULL << 44)) { 510691484aSAlexander Graf ips = 5; 520691484aSAlexander Graf va_bits = 48; 530691484aSAlexander Graf } else if (max_addr > (1ULL << 42)) { 540691484aSAlexander Graf ips = 4; 550691484aSAlexander Graf va_bits = 44; 560691484aSAlexander Graf } else if (max_addr > (1ULL << 40)) { 570691484aSAlexander Graf ips = 3; 580691484aSAlexander Graf va_bits = 42; 590691484aSAlexander Graf } else if (max_addr > (1ULL << 36)) { 600691484aSAlexander Graf ips = 2; 610691484aSAlexander Graf va_bits = 40; 620691484aSAlexander Graf } else if (max_addr > (1ULL << 32)) { 630691484aSAlexander Graf ips = 1; 640691484aSAlexander Graf va_bits = 36; 650691484aSAlexander Graf } else { 660691484aSAlexander Graf ips = 0; 670691484aSAlexander Graf va_bits = 32; 680691484aSAlexander Graf } 690691484aSAlexander Graf 700691484aSAlexander Graf if (el == 1) { 719bb367a5SAlexander Graf tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE; 720691484aSAlexander Graf } else if (el == 2) { 730691484aSAlexander Graf tcr = TCR_EL2_RSVD | (ips << 16); 740691484aSAlexander Graf } else { 750691484aSAlexander Graf tcr = TCR_EL3_RSVD | (ips << 16); 760691484aSAlexander Graf } 770691484aSAlexander Graf 780691484aSAlexander Graf /* PTWs cacheable, inner/outer WBWA and inner shareable */ 795e2ec773SAlexander Graf tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; 805e2ec773SAlexander Graf tcr |= TCR_T0SZ(va_bits); 810691484aSAlexander Graf 820691484aSAlexander Graf if (pips) 830691484aSAlexander Graf *pips = ips; 840691484aSAlexander Graf if (pva_bits) 850691484aSAlexander Graf *pva_bits = va_bits; 860691484aSAlexander Graf 870691484aSAlexander Graf return tcr; 880691484aSAlexander Graf } 890691484aSAlexander Graf 905e2ec773SAlexander Graf #define MAX_PTE_ENTRIES 512 915e2ec773SAlexander Graf 925e2ec773SAlexander Graf static int pte_type(u64 *pte) 935e2ec773SAlexander Graf { 945e2ec773SAlexander Graf return *pte & PTE_TYPE_MASK; 955e2ec773SAlexander Graf } 965e2ec773SAlexander Graf 975e2ec773SAlexander Graf /* Returns the LSB number for a PTE on level <level> */ 985e2ec773SAlexander Graf static int level2shift(int level) 995e2ec773SAlexander Graf { 1005e2ec773SAlexander Graf /* Page is 12 bits wide, every level translates 9 bits */ 1015e2ec773SAlexander Graf return (12 + 9 * (3 - level)); 1025e2ec773SAlexander Graf } 1035e2ec773SAlexander Graf 1045e2ec773SAlexander Graf static u64 *find_pte(u64 addr, int level) 1055e2ec773SAlexander Graf { 1065e2ec773SAlexander Graf int start_level = 0; 1075e2ec773SAlexander Graf u64 *pte; 1085e2ec773SAlexander Graf u64 idx; 1095e2ec773SAlexander Graf u64 va_bits; 1105e2ec773SAlexander Graf int i; 1115e2ec773SAlexander Graf 1125e2ec773SAlexander Graf debug("addr=%llx level=%d\n", addr, level); 1135e2ec773SAlexander Graf 1145e2ec773SAlexander Graf get_tcr(0, NULL, &va_bits); 1155e2ec773SAlexander Graf if (va_bits < 39) 1165e2ec773SAlexander Graf start_level = 1; 1175e2ec773SAlexander Graf 1185e2ec773SAlexander Graf if (level < start_level) 1195e2ec773SAlexander Graf return NULL; 1205e2ec773SAlexander Graf 1215e2ec773SAlexander Graf /* Walk through all page table levels to find our PTE */ 1225e2ec773SAlexander Graf pte = (u64*)gd->arch.tlb_addr; 1235e2ec773SAlexander Graf for (i = start_level; i < 4; i++) { 1245e2ec773SAlexander Graf idx = (addr >> level2shift(i)) & 0x1FF; 1255e2ec773SAlexander Graf pte += idx; 1265e2ec773SAlexander Graf debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte); 1275e2ec773SAlexander Graf 1285e2ec773SAlexander Graf /* Found it */ 1295e2ec773SAlexander Graf if (i == level) 1305e2ec773SAlexander Graf return pte; 1315e2ec773SAlexander Graf /* PTE is no table (either invalid or block), can't traverse */ 1325e2ec773SAlexander Graf if (pte_type(pte) != PTE_TYPE_TABLE) 1335e2ec773SAlexander Graf return NULL; 1345e2ec773SAlexander Graf /* Off to the next level */ 1355e2ec773SAlexander Graf pte = (u64*)(*pte & 0x0000fffffffff000ULL); 1365e2ec773SAlexander Graf } 1375e2ec773SAlexander Graf 1385e2ec773SAlexander Graf /* Should never reach here */ 1395e2ec773SAlexander Graf return NULL; 1405e2ec773SAlexander Graf } 1415e2ec773SAlexander Graf 1425e2ec773SAlexander Graf /* Returns and creates a new full table (512 entries) */ 1435e2ec773SAlexander Graf static u64 *create_table(void) 1445e2ec773SAlexander Graf { 1455e2ec773SAlexander Graf u64 *new_table = (u64*)gd->arch.tlb_fillptr; 1465e2ec773SAlexander Graf u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64); 1475e2ec773SAlexander Graf 1485e2ec773SAlexander Graf /* Allocate MAX_PTE_ENTRIES pte entries */ 1495e2ec773SAlexander Graf gd->arch.tlb_fillptr += pt_len; 1505e2ec773SAlexander Graf 1515e2ec773SAlexander Graf if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size) 1525e2ec773SAlexander Graf panic("Insufficient RAM for page table: 0x%lx > 0x%lx. " 1535e2ec773SAlexander Graf "Please increase the size in get_page_table_size()", 1545e2ec773SAlexander Graf gd->arch.tlb_fillptr - gd->arch.tlb_addr, 1555e2ec773SAlexander Graf gd->arch.tlb_size); 1565e2ec773SAlexander Graf 1575e2ec773SAlexander Graf /* Mark all entries as invalid */ 1585e2ec773SAlexander Graf memset(new_table, 0, pt_len); 1595e2ec773SAlexander Graf 1605e2ec773SAlexander Graf return new_table; 1615e2ec773SAlexander Graf } 1625e2ec773SAlexander Graf 1635e2ec773SAlexander Graf static void set_pte_table(u64 *pte, u64 *table) 1645e2ec773SAlexander Graf { 1655e2ec773SAlexander Graf /* Point *pte to the new table */ 1665e2ec773SAlexander Graf debug("Setting %p to addr=%p\n", pte, table); 1675e2ec773SAlexander Graf *pte = PTE_TYPE_TABLE | (ulong)table; 1685e2ec773SAlexander Graf } 1695e2ec773SAlexander Graf 1705e2ec773SAlexander Graf /* Splits a block PTE into table with subpages spanning the old block */ 1715e2ec773SAlexander Graf static void split_block(u64 *pte, int level) 1725e2ec773SAlexander Graf { 1735e2ec773SAlexander Graf u64 old_pte = *pte; 1745e2ec773SAlexander Graf u64 *new_table; 1755e2ec773SAlexander Graf u64 i = 0; 1765e2ec773SAlexander Graf /* level describes the parent level, we need the child ones */ 1775e2ec773SAlexander Graf int levelshift = level2shift(level + 1); 1785e2ec773SAlexander Graf 1795e2ec773SAlexander Graf if (pte_type(pte) != PTE_TYPE_BLOCK) 1805e2ec773SAlexander Graf panic("PTE %p (%llx) is not a block. Some driver code wants to " 1815e2ec773SAlexander Graf "modify dcache settings for an range not covered in " 1825e2ec773SAlexander Graf "mem_map.", pte, old_pte); 1835e2ec773SAlexander Graf 1845e2ec773SAlexander Graf new_table = create_table(); 1855e2ec773SAlexander Graf debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table); 1865e2ec773SAlexander Graf 1875e2ec773SAlexander Graf for (i = 0; i < MAX_PTE_ENTRIES; i++) { 1885e2ec773SAlexander Graf new_table[i] = old_pte | (i << levelshift); 1895e2ec773SAlexander Graf 1905e2ec773SAlexander Graf /* Level 3 block PTEs have the table type */ 1915e2ec773SAlexander Graf if ((level + 1) == 3) 1925e2ec773SAlexander Graf new_table[i] |= PTE_TYPE_TABLE; 1935e2ec773SAlexander Graf 1945e2ec773SAlexander Graf debug("Setting new_table[%lld] = %llx\n", i, new_table[i]); 1955e2ec773SAlexander Graf } 1965e2ec773SAlexander Graf 1975e2ec773SAlexander Graf /* Set the new table into effect */ 1985e2ec773SAlexander Graf set_pte_table(pte, new_table); 1995e2ec773SAlexander Graf } 2005e2ec773SAlexander Graf 201f733d466SYork Sun /* Add one mm_region map entry to the page tables */ 202f733d466SYork Sun static void add_map(struct mm_region *map) 203f733d466SYork Sun { 204f733d466SYork Sun u64 *pte; 205cd4b0c5fSYork Sun u64 virt = map->virt; 206cd4b0c5fSYork Sun u64 phys = map->phys; 207f733d466SYork Sun u64 size = map->size; 208f733d466SYork Sun u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; 209f733d466SYork Sun u64 blocksize; 210f733d466SYork Sun int level; 211f733d466SYork Sun u64 *new_table; 212f733d466SYork Sun 213f733d466SYork Sun while (size) { 214cd4b0c5fSYork Sun pte = find_pte(virt, 0); 215f733d466SYork Sun if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { 216cd4b0c5fSYork Sun debug("Creating table for virt 0x%llx\n", virt); 217f733d466SYork Sun new_table = create_table(); 218f733d466SYork Sun set_pte_table(pte, new_table); 219f733d466SYork Sun } 220f733d466SYork Sun 221f733d466SYork Sun for (level = 1; level < 4; level++) { 222cd4b0c5fSYork Sun pte = find_pte(virt, level); 223f733d466SYork Sun if (!pte) 224f733d466SYork Sun panic("pte not found\n"); 225cd4b0c5fSYork Sun 226f733d466SYork Sun blocksize = 1ULL << level2shift(level); 227cd4b0c5fSYork Sun debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n", 228cd4b0c5fSYork Sun virt, size, blocksize); 229cd4b0c5fSYork Sun if (size >= blocksize && !(virt & (blocksize - 1))) { 230f733d466SYork Sun /* Page fits, create block PTE */ 231cd4b0c5fSYork Sun debug("Setting PTE %p to block virt=%llx\n", 232cd4b0c5fSYork Sun pte, virt); 233cd4b0c5fSYork Sun *pte = phys | attrs; 234cd4b0c5fSYork Sun virt += blocksize; 235cd4b0c5fSYork Sun phys += blocksize; 236f733d466SYork Sun size -= blocksize; 237f733d466SYork Sun break; 238f733d466SYork Sun } else if (pte_type(pte) == PTE_TYPE_FAULT) { 239f733d466SYork Sun /* Page doesn't fit, create subpages */ 240cd4b0c5fSYork Sun debug("Creating subtable for virt 0x%llx blksize=%llx\n", 241cd4b0c5fSYork Sun virt, blocksize); 242f733d466SYork Sun new_table = create_table(); 243f733d466SYork Sun set_pte_table(pte, new_table); 244f733d466SYork Sun } else if (pte_type(pte) == PTE_TYPE_BLOCK) { 245cd4b0c5fSYork Sun debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n", 246cd4b0c5fSYork Sun virt, blocksize); 247f733d466SYork Sun split_block(pte, level); 248f733d466SYork Sun } 249f733d466SYork Sun } 250f733d466SYork Sun } 251f733d466SYork Sun } 252f733d466SYork Sun 2535e2ec773SAlexander Graf enum pte_type { 2545e2ec773SAlexander Graf PTE_INVAL, 2555e2ec773SAlexander Graf PTE_BLOCK, 2565e2ec773SAlexander Graf PTE_LEVEL, 2575e2ec773SAlexander Graf }; 2585e2ec773SAlexander Graf 2595e2ec773SAlexander Graf /* 2605e2ec773SAlexander Graf * This is a recursively called function to count the number of 2615e2ec773SAlexander Graf * page tables we need to cover a particular PTE range. If you 2625e2ec773SAlexander Graf * call this with level = -1 you basically get the full 48 bit 2635e2ec773SAlexander Graf * coverage. 2645e2ec773SAlexander Graf */ 2655e2ec773SAlexander Graf static int count_required_pts(u64 addr, int level, u64 maxaddr) 2665e2ec773SAlexander Graf { 2675e2ec773SAlexander Graf int levelshift = level2shift(level); 2685e2ec773SAlexander Graf u64 levelsize = 1ULL << levelshift; 2695e2ec773SAlexander Graf u64 levelmask = levelsize - 1; 2705e2ec773SAlexander Graf u64 levelend = addr + levelsize; 2715e2ec773SAlexander Graf int r = 0; 2725e2ec773SAlexander Graf int i; 2735e2ec773SAlexander Graf enum pte_type pte_type = PTE_INVAL; 2745e2ec773SAlexander Graf 275d473f0c6SAlexander Graf for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) { 2765e2ec773SAlexander Graf struct mm_region *map = &mem_map[i]; 277cd4b0c5fSYork Sun u64 start = map->virt; 2785e2ec773SAlexander Graf u64 end = start + map->size; 2795e2ec773SAlexander Graf 2805e2ec773SAlexander Graf /* Check if the PTE would overlap with the map */ 2815e2ec773SAlexander Graf if (max(addr, start) <= min(levelend, end)) { 2825e2ec773SAlexander Graf start = max(addr, start); 2835e2ec773SAlexander Graf end = min(levelend, end); 2845e2ec773SAlexander Graf 2855e2ec773SAlexander Graf /* We need a sub-pt for this level */ 2865e2ec773SAlexander Graf if ((start & levelmask) || (end & levelmask)) { 2875e2ec773SAlexander Graf pte_type = PTE_LEVEL; 2885e2ec773SAlexander Graf break; 2895e2ec773SAlexander Graf } 2905e2ec773SAlexander Graf 2915e2ec773SAlexander Graf /* Lv0 can not do block PTEs, so do levels here too */ 2925e2ec773SAlexander Graf if (level <= 0) { 2935e2ec773SAlexander Graf pte_type = PTE_LEVEL; 2945e2ec773SAlexander Graf break; 2955e2ec773SAlexander Graf } 2965e2ec773SAlexander Graf 2975e2ec773SAlexander Graf /* PTE is active, but fits into a block */ 2985e2ec773SAlexander Graf pte_type = PTE_BLOCK; 2995e2ec773SAlexander Graf } 3005e2ec773SAlexander Graf } 3015e2ec773SAlexander Graf 3025e2ec773SAlexander Graf /* 3035e2ec773SAlexander Graf * Block PTEs at this level are already covered by the parent page 3045e2ec773SAlexander Graf * table, so we only need to count sub page tables. 3055e2ec773SAlexander Graf */ 3065e2ec773SAlexander Graf if (pte_type == PTE_LEVEL) { 3075e2ec773SAlexander Graf int sublevel = level + 1; 3085e2ec773SAlexander Graf u64 sublevelsize = 1ULL << level2shift(sublevel); 3095e2ec773SAlexander Graf 3105e2ec773SAlexander Graf /* Account for the new sub page table ... */ 3115e2ec773SAlexander Graf r = 1; 3125e2ec773SAlexander Graf 3135e2ec773SAlexander Graf /* ... and for all child page tables that one might have */ 3145e2ec773SAlexander Graf for (i = 0; i < MAX_PTE_ENTRIES; i++) { 3155e2ec773SAlexander Graf r += count_required_pts(addr, sublevel, maxaddr); 3165e2ec773SAlexander Graf addr += sublevelsize; 3175e2ec773SAlexander Graf 3185e2ec773SAlexander Graf if (addr >= maxaddr) { 3195e2ec773SAlexander Graf /* 3205e2ec773SAlexander Graf * We reached the end of address space, no need 3215e2ec773SAlexander Graf * to look any further. 3225e2ec773SAlexander Graf */ 3235e2ec773SAlexander Graf break; 3245e2ec773SAlexander Graf } 3255e2ec773SAlexander Graf } 3265e2ec773SAlexander Graf } 3275e2ec773SAlexander Graf 3285e2ec773SAlexander Graf return r; 3295e2ec773SAlexander Graf } 3305e2ec773SAlexander Graf 3315e2ec773SAlexander Graf /* Returns the estimated required size of all page tables */ 332c05016abSAlexander Graf __weak u64 get_page_table_size(void) 3335e2ec773SAlexander Graf { 3345e2ec773SAlexander Graf u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64); 3355e2ec773SAlexander Graf u64 size = 0; 3365e2ec773SAlexander Graf u64 va_bits; 3375e2ec773SAlexander Graf int start_level = 0; 3385e2ec773SAlexander Graf 3395e2ec773SAlexander Graf get_tcr(0, NULL, &va_bits); 3405e2ec773SAlexander Graf if (va_bits < 39) 3415e2ec773SAlexander Graf start_level = 1; 3425e2ec773SAlexander Graf 3435e2ec773SAlexander Graf /* Account for all page tables we would need to cover our memory map */ 3445e2ec773SAlexander Graf size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits); 3455e2ec773SAlexander Graf 3465e2ec773SAlexander Graf /* 3475e2ec773SAlexander Graf * We need to duplicate our page table once to have an emergency pt to 3485e2ec773SAlexander Graf * resort to when splitting page tables later on 3495e2ec773SAlexander Graf */ 3505e2ec773SAlexander Graf size *= 2; 3515e2ec773SAlexander Graf 3525e2ec773SAlexander Graf /* 3535e2ec773SAlexander Graf * We may need to split page tables later on if dcache settings change, 3545e2ec773SAlexander Graf * so reserve up to 4 (random pick) page tables for that. 3555e2ec773SAlexander Graf */ 3565e2ec773SAlexander Graf size += one_pt * 4; 3575e2ec773SAlexander Graf 3585e2ec773SAlexander Graf return size; 3595e2ec773SAlexander Graf } 3605e2ec773SAlexander Graf 361252cdb46SYork Sun void setup_pgtables(void) 36294f7ff36SSergey Temerkhanov { 3635e2ec773SAlexander Graf int i; 36494f7ff36SSergey Temerkhanov 365252cdb46SYork Sun if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr) 366252cdb46SYork Sun panic("Page table pointer not setup."); 367252cdb46SYork Sun 3685e2ec773SAlexander Graf /* 3695e2ec773SAlexander Graf * Allocate the first level we're on with invalidate entries. 3705e2ec773SAlexander Graf * If the starting level is 0 (va_bits >= 39), then this is our 3715e2ec773SAlexander Graf * Lv0 page table, otherwise it's the entry Lv1 page table. 3725e2ec773SAlexander Graf */ 3735e2ec773SAlexander Graf create_table(); 3745e2ec773SAlexander Graf 3755e2ec773SAlexander Graf /* Now add all MMU table entries one after another to the table */ 376d473f0c6SAlexander Graf for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) 3775e2ec773SAlexander Graf add_map(&mem_map[i]); 37894f7ff36SSergey Temerkhanov } 37994f7ff36SSergey Temerkhanov 3805e2ec773SAlexander Graf static void setup_all_pgtables(void) 3815e2ec773SAlexander Graf { 3825e2ec773SAlexander Graf u64 tlb_addr = gd->arch.tlb_addr; 3830e170947SAlexander Graf u64 tlb_size = gd->arch.tlb_size; 38494f7ff36SSergey Temerkhanov 3855e2ec773SAlexander Graf /* Reset the fill ptr */ 3865e2ec773SAlexander Graf gd->arch.tlb_fillptr = tlb_addr; 38794f7ff36SSergey Temerkhanov 3885e2ec773SAlexander Graf /* Create normal system page tables */ 3895e2ec773SAlexander Graf setup_pgtables(); 3905e2ec773SAlexander Graf 3915e2ec773SAlexander Graf /* Create emergency page tables */ 3920e170947SAlexander Graf gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr - 3930e170947SAlexander Graf (uintptr_t)gd->arch.tlb_addr; 3945e2ec773SAlexander Graf gd->arch.tlb_addr = gd->arch.tlb_fillptr; 3955e2ec773SAlexander Graf setup_pgtables(); 3965e2ec773SAlexander Graf gd->arch.tlb_emerg = gd->arch.tlb_addr; 3975e2ec773SAlexander Graf gd->arch.tlb_addr = tlb_addr; 3980e170947SAlexander Graf gd->arch.tlb_size = tlb_size; 39994f7ff36SSergey Temerkhanov } 40094f7ff36SSergey Temerkhanov 4010ae76531SDavid Feng /* to activate the MMU we need to set up virtual memory */ 4023c6af3baSStephen Warren __weak void mmu_setup(void) 4030ae76531SDavid Feng { 4048b19dff5SThierry Reding int el; 4050ae76531SDavid Feng 4065e2ec773SAlexander Graf /* Set up page tables only once */ 4075e2ec773SAlexander Graf if (!gd->arch.tlb_fillptr) 4085e2ec773SAlexander Graf setup_all_pgtables(); 4090691484aSAlexander Graf 4100691484aSAlexander Graf el = current_el(); 4110691484aSAlexander Graf set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 4120691484aSAlexander Graf MEMORY_ATTRIBUTES); 4130691484aSAlexander Graf 4140ae76531SDavid Feng /* enable the mmu */ 4150ae76531SDavid Feng set_sctlr(get_sctlr() | CR_M); 4160ae76531SDavid Feng } 4170ae76531SDavid Feng 4180ae76531SDavid Feng /* 4190ae76531SDavid Feng * Performs a invalidation of the entire data cache at all levels 4200ae76531SDavid Feng */ 4210ae76531SDavid Feng void invalidate_dcache_all(void) 4220ae76531SDavid Feng { 4231e6ad55cSYork Sun __asm_invalidate_dcache_all(); 4241ab557a0SStephen Warren __asm_invalidate_l3_dcache(); 4250ae76531SDavid Feng } 4260ae76531SDavid Feng 4270ae76531SDavid Feng /* 428dcd468b8SYork Sun * Performs a clean & invalidation of the entire data cache at all levels. 429dcd468b8SYork Sun * This function needs to be inline to avoid using stack. 4301ab557a0SStephen Warren * __asm_flush_l3_dcache return status of timeout 4310ae76531SDavid Feng */ 432dcd468b8SYork Sun inline void flush_dcache_all(void) 4330ae76531SDavid Feng { 434dcd468b8SYork Sun int ret; 435dcd468b8SYork Sun 4360ae76531SDavid Feng __asm_flush_dcache_all(); 4371ab557a0SStephen Warren ret = __asm_flush_l3_dcache(); 438dcd468b8SYork Sun if (ret) 439dcd468b8SYork Sun debug("flushing dcache returns 0x%x\n", ret); 440dcd468b8SYork Sun else 441dcd468b8SYork Sun debug("flushing dcache successfully.\n"); 4420ae76531SDavid Feng } 4430ae76531SDavid Feng 4440ae76531SDavid Feng /* 4450ae76531SDavid Feng * Invalidates range in all levels of D-cache/unified cache 4460ae76531SDavid Feng */ 4470ae76531SDavid Feng void invalidate_dcache_range(unsigned long start, unsigned long stop) 4480ae76531SDavid Feng { 4490ae76531SDavid Feng __asm_flush_dcache_range(start, stop); 4500ae76531SDavid Feng } 4510ae76531SDavid Feng 4520ae76531SDavid Feng /* 4530ae76531SDavid Feng * Flush range(clean & invalidate) from all levels of D-cache/unified cache 4540ae76531SDavid Feng */ 4550ae76531SDavid Feng void flush_dcache_range(unsigned long start, unsigned long stop) 4560ae76531SDavid Feng { 4570ae76531SDavid Feng __asm_flush_dcache_range(start, stop); 4580ae76531SDavid Feng } 4590ae76531SDavid Feng 4600ae76531SDavid Feng void dcache_enable(void) 4610ae76531SDavid Feng { 4620ae76531SDavid Feng /* The data cache is not active unless the mmu is enabled */ 4630ae76531SDavid Feng if (!(get_sctlr() & CR_M)) { 4640ae76531SDavid Feng invalidate_dcache_all(); 4650ae76531SDavid Feng __asm_invalidate_tlb_all(); 4660ae76531SDavid Feng mmu_setup(); 4670ae76531SDavid Feng } 4680ae76531SDavid Feng 4690ae76531SDavid Feng set_sctlr(get_sctlr() | CR_C); 4700ae76531SDavid Feng } 4710ae76531SDavid Feng 4720ae76531SDavid Feng void dcache_disable(void) 4730ae76531SDavid Feng { 4740ae76531SDavid Feng uint32_t sctlr; 4750ae76531SDavid Feng 4760ae76531SDavid Feng sctlr = get_sctlr(); 4770ae76531SDavid Feng 4780ae76531SDavid Feng /* if cache isn't enabled no need to disable */ 4790ae76531SDavid Feng if (!(sctlr & CR_C)) 4800ae76531SDavid Feng return; 4810ae76531SDavid Feng 4820ae76531SDavid Feng set_sctlr(sctlr & ~(CR_C|CR_M)); 4830ae76531SDavid Feng 4840ae76531SDavid Feng flush_dcache_all(); 4850ae76531SDavid Feng __asm_invalidate_tlb_all(); 4860ae76531SDavid Feng } 4870ae76531SDavid Feng 4880ae76531SDavid Feng int dcache_status(void) 4890ae76531SDavid Feng { 4900ae76531SDavid Feng return (get_sctlr() & CR_C) != 0; 4910ae76531SDavid Feng } 4920ae76531SDavid Feng 493dad17fd5SSiva Durga Prasad Paladugu u64 *__weak arch_get_page_table(void) { 494dad17fd5SSiva Durga Prasad Paladugu puts("No page table offset defined\n"); 495dad17fd5SSiva Durga Prasad Paladugu 496dad17fd5SSiva Durga Prasad Paladugu return NULL; 497dad17fd5SSiva Durga Prasad Paladugu } 498dad17fd5SSiva Durga Prasad Paladugu 4995e2ec773SAlexander Graf static bool is_aligned(u64 addr, u64 size, u64 align) 5005e2ec773SAlexander Graf { 5015e2ec773SAlexander Graf return !(addr & (align - 1)) && !(size & (align - 1)); 5025e2ec773SAlexander Graf } 5035e2ec773SAlexander Graf 504*7f9b9f31SYork Sun /* Use flag to indicate if attrs has more than d-cache attributes */ 505*7f9b9f31SYork Sun static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level) 5065e2ec773SAlexander Graf { 5075e2ec773SAlexander Graf int levelshift = level2shift(level); 5085e2ec773SAlexander Graf u64 levelsize = 1ULL << levelshift; 5095e2ec773SAlexander Graf u64 *pte = find_pte(start, level); 5105e2ec773SAlexander Graf 5115e2ec773SAlexander Graf /* Can we can just modify the current level block PTE? */ 5125e2ec773SAlexander Graf if (is_aligned(start, size, levelsize)) { 513*7f9b9f31SYork Sun if (flag) { 514*7f9b9f31SYork Sun *pte &= ~PMD_ATTRMASK; 515*7f9b9f31SYork Sun *pte |= attrs & PMD_ATTRMASK; 516*7f9b9f31SYork Sun } else { 5175e2ec773SAlexander Graf *pte &= ~PMD_ATTRINDX_MASK; 518*7f9b9f31SYork Sun *pte |= attrs & PMD_ATTRINDX_MASK; 519*7f9b9f31SYork Sun } 5205e2ec773SAlexander Graf debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level); 5215e2ec773SAlexander Graf 5225e2ec773SAlexander Graf return levelsize; 5235e2ec773SAlexander Graf } 5245e2ec773SAlexander Graf 5255e2ec773SAlexander Graf /* Unaligned or doesn't fit, maybe split block into table */ 5265e2ec773SAlexander Graf debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte); 5275e2ec773SAlexander Graf 5285e2ec773SAlexander Graf /* Maybe we need to split the block into a table */ 5295e2ec773SAlexander Graf if (pte_type(pte) == PTE_TYPE_BLOCK) 5305e2ec773SAlexander Graf split_block(pte, level); 5315e2ec773SAlexander Graf 5325e2ec773SAlexander Graf /* And then double-check it became a table or already is one */ 5335e2ec773SAlexander Graf if (pte_type(pte) != PTE_TYPE_TABLE) 5345e2ec773SAlexander Graf panic("PTE %p (%llx) for addr=%llx should be a table", 5355e2ec773SAlexander Graf pte, *pte, start); 5365e2ec773SAlexander Graf 5375e2ec773SAlexander Graf /* Roll on to the next page table level */ 5385e2ec773SAlexander Graf return 0; 5395e2ec773SAlexander Graf } 5405e2ec773SAlexander Graf 5415e2ec773SAlexander Graf void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 5425e2ec773SAlexander Graf enum dcache_option option) 5435e2ec773SAlexander Graf { 5445e2ec773SAlexander Graf u64 attrs = PMD_ATTRINDX(option); 5455e2ec773SAlexander Graf u64 real_start = start; 5465e2ec773SAlexander Graf u64 real_size = size; 5475e2ec773SAlexander Graf 5485e2ec773SAlexander Graf debug("start=%lx size=%lx\n", (ulong)start, (ulong)size); 5495e2ec773SAlexander Graf 550252cdb46SYork Sun if (!gd->arch.tlb_emerg) 551252cdb46SYork Sun panic("Emergency page table not setup."); 552252cdb46SYork Sun 5535e2ec773SAlexander Graf /* 5545e2ec773SAlexander Graf * We can not modify page tables that we're currently running on, 5555e2ec773SAlexander Graf * so we first need to switch to the "emergency" page tables where 5565e2ec773SAlexander Graf * we can safely modify our primary page tables and then switch back 5575e2ec773SAlexander Graf */ 5585e2ec773SAlexander Graf __asm_switch_ttbr(gd->arch.tlb_emerg); 5595e2ec773SAlexander Graf 5605e2ec773SAlexander Graf /* 5615e2ec773SAlexander Graf * Loop through the address range until we find a page granule that fits 5625e2ec773SAlexander Graf * our alignment constraints, then set it to the new cache attributes 5635e2ec773SAlexander Graf */ 5645e2ec773SAlexander Graf while (size > 0) { 5655e2ec773SAlexander Graf int level; 5665e2ec773SAlexander Graf u64 r; 5675e2ec773SAlexander Graf 5685e2ec773SAlexander Graf for (level = 1; level < 4; level++) { 569*7f9b9f31SYork Sun /* Set d-cache attributes only */ 570*7f9b9f31SYork Sun r = set_one_region(start, size, attrs, false, level); 5715e2ec773SAlexander Graf if (r) { 5725e2ec773SAlexander Graf /* PTE successfully replaced */ 5735e2ec773SAlexander Graf size -= r; 5745e2ec773SAlexander Graf start += r; 5755e2ec773SAlexander Graf break; 5765e2ec773SAlexander Graf } 5775e2ec773SAlexander Graf } 5785e2ec773SAlexander Graf 5795e2ec773SAlexander Graf } 5805e2ec773SAlexander Graf 5815e2ec773SAlexander Graf /* We're done modifying page tables, switch back to our primary ones */ 5825e2ec773SAlexander Graf __asm_switch_ttbr(gd->arch.tlb_addr); 5835e2ec773SAlexander Graf 5845e2ec773SAlexander Graf /* 5855e2ec773SAlexander Graf * Make sure there's nothing stale in dcache for a region that might 5865e2ec773SAlexander Graf * have caches off now 5875e2ec773SAlexander Graf */ 5885e2ec773SAlexander Graf flush_dcache_range(real_start, real_start + real_size); 5895e2ec773SAlexander Graf } 59094f7ff36SSergey Temerkhanov 591*7f9b9f31SYork Sun /* 592*7f9b9f31SYork Sun * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits. 593*7f9b9f31SYork Sun * The procecess is break-before-make. The target region will be marked as 594*7f9b9f31SYork Sun * invalid during the process of changing. 595*7f9b9f31SYork Sun */ 596*7f9b9f31SYork Sun void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs) 597*7f9b9f31SYork Sun { 598*7f9b9f31SYork Sun int level; 599*7f9b9f31SYork Sun u64 r, size, start; 600*7f9b9f31SYork Sun 601*7f9b9f31SYork Sun start = addr; 602*7f9b9f31SYork Sun size = siz; 603*7f9b9f31SYork Sun /* 604*7f9b9f31SYork Sun * Loop through the address range until we find a page granule that fits 605*7f9b9f31SYork Sun * our alignment constraints, then set it to "invalid". 606*7f9b9f31SYork Sun */ 607*7f9b9f31SYork Sun while (size > 0) { 608*7f9b9f31SYork Sun for (level = 1; level < 4; level++) { 609*7f9b9f31SYork Sun /* Set PTE to fault */ 610*7f9b9f31SYork Sun r = set_one_region(start, size, PTE_TYPE_FAULT, true, 611*7f9b9f31SYork Sun level); 612*7f9b9f31SYork Sun if (r) { 613*7f9b9f31SYork Sun /* PTE successfully invalidated */ 614*7f9b9f31SYork Sun size -= r; 615*7f9b9f31SYork Sun start += r; 616*7f9b9f31SYork Sun break; 617*7f9b9f31SYork Sun } 618*7f9b9f31SYork Sun } 619*7f9b9f31SYork Sun } 620*7f9b9f31SYork Sun 621*7f9b9f31SYork Sun flush_dcache_range(gd->arch.tlb_addr, 622*7f9b9f31SYork Sun gd->arch.tlb_addr + gd->arch.tlb_size); 623*7f9b9f31SYork Sun __asm_invalidate_tlb_all(); 624*7f9b9f31SYork Sun 625*7f9b9f31SYork Sun /* 626*7f9b9f31SYork Sun * Loop through the address range until we find a page granule that fits 627*7f9b9f31SYork Sun * our alignment constraints, then set it to the new cache attributes 628*7f9b9f31SYork Sun */ 629*7f9b9f31SYork Sun start = addr; 630*7f9b9f31SYork Sun size = siz; 631*7f9b9f31SYork Sun while (size > 0) { 632*7f9b9f31SYork Sun for (level = 1; level < 4; level++) { 633*7f9b9f31SYork Sun /* Set PTE to new attributes */ 634*7f9b9f31SYork Sun r = set_one_region(start, size, attrs, true, level); 635*7f9b9f31SYork Sun if (r) { 636*7f9b9f31SYork Sun /* PTE successfully updated */ 637*7f9b9f31SYork Sun size -= r; 638*7f9b9f31SYork Sun start += r; 639*7f9b9f31SYork Sun break; 640*7f9b9f31SYork Sun } 641*7f9b9f31SYork Sun } 642*7f9b9f31SYork Sun } 643*7f9b9f31SYork Sun flush_dcache_range(gd->arch.tlb_addr, 644*7f9b9f31SYork Sun gd->arch.tlb_addr + gd->arch.tlb_size); 645*7f9b9f31SYork Sun __asm_invalidate_tlb_all(); 646*7f9b9f31SYork Sun } 647*7f9b9f31SYork Sun 6480ae76531SDavid Feng #else /* CONFIG_SYS_DCACHE_OFF */ 6490ae76531SDavid Feng 65019503c31SAlexander Graf /* 65119503c31SAlexander Graf * For SPL builds, we may want to not have dcache enabled. Any real U-Boot 65219503c31SAlexander Graf * running however really wants to have dcache and the MMU active. Check that 65319503c31SAlexander Graf * everything is sane and give the developer a hint if it isn't. 65419503c31SAlexander Graf */ 65519503c31SAlexander Graf #ifndef CONFIG_SPL_BUILD 65619503c31SAlexander Graf #error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache. 65719503c31SAlexander Graf #endif 65819503c31SAlexander Graf 6590ae76531SDavid Feng void invalidate_dcache_all(void) 6600ae76531SDavid Feng { 6610ae76531SDavid Feng } 6620ae76531SDavid Feng 6630ae76531SDavid Feng void flush_dcache_all(void) 6640ae76531SDavid Feng { 6650ae76531SDavid Feng } 6660ae76531SDavid Feng 6670ae76531SDavid Feng void dcache_enable(void) 6680ae76531SDavid Feng { 6690ae76531SDavid Feng } 6700ae76531SDavid Feng 6710ae76531SDavid Feng void dcache_disable(void) 6720ae76531SDavid Feng { 6730ae76531SDavid Feng } 6740ae76531SDavid Feng 6750ae76531SDavid Feng int dcache_status(void) 6760ae76531SDavid Feng { 6770ae76531SDavid Feng return 0; 6780ae76531SDavid Feng } 6790ae76531SDavid Feng 680dad17fd5SSiva Durga Prasad Paladugu void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 681dad17fd5SSiva Durga Prasad Paladugu enum dcache_option option) 682dad17fd5SSiva Durga Prasad Paladugu { 683dad17fd5SSiva Durga Prasad Paladugu } 684dad17fd5SSiva Durga Prasad Paladugu 6850ae76531SDavid Feng #endif /* CONFIG_SYS_DCACHE_OFF */ 6860ae76531SDavid Feng 6870ae76531SDavid Feng #ifndef CONFIG_SYS_ICACHE_OFF 6880ae76531SDavid Feng 6890ae76531SDavid Feng void icache_enable(void) 6900ae76531SDavid Feng { 6911ab557a0SStephen Warren invalidate_icache_all(); 6920ae76531SDavid Feng set_sctlr(get_sctlr() | CR_I); 6930ae76531SDavid Feng } 6940ae76531SDavid Feng 6950ae76531SDavid Feng void icache_disable(void) 6960ae76531SDavid Feng { 6970ae76531SDavid Feng set_sctlr(get_sctlr() & ~CR_I); 6980ae76531SDavid Feng } 6990ae76531SDavid Feng 7000ae76531SDavid Feng int icache_status(void) 7010ae76531SDavid Feng { 7020ae76531SDavid Feng return (get_sctlr() & CR_I) != 0; 7030ae76531SDavid Feng } 7040ae76531SDavid Feng 7050ae76531SDavid Feng void invalidate_icache_all(void) 7060ae76531SDavid Feng { 7070ae76531SDavid Feng __asm_invalidate_icache_all(); 7081ab557a0SStephen Warren __asm_invalidate_l3_icache(); 7090ae76531SDavid Feng } 7100ae76531SDavid Feng 7110ae76531SDavid Feng #else /* CONFIG_SYS_ICACHE_OFF */ 7120ae76531SDavid Feng 7130ae76531SDavid Feng void icache_enable(void) 7140ae76531SDavid Feng { 7150ae76531SDavid Feng } 7160ae76531SDavid Feng 7170ae76531SDavid Feng void icache_disable(void) 7180ae76531SDavid Feng { 7190ae76531SDavid Feng } 7200ae76531SDavid Feng 7210ae76531SDavid Feng int icache_status(void) 7220ae76531SDavid Feng { 7230ae76531SDavid Feng return 0; 7240ae76531SDavid Feng } 7250ae76531SDavid Feng 7260ae76531SDavid Feng void invalidate_icache_all(void) 7270ae76531SDavid Feng { 7280ae76531SDavid Feng } 7290ae76531SDavid Feng 7300ae76531SDavid Feng #endif /* CONFIG_SYS_ICACHE_OFF */ 7310ae76531SDavid Feng 7320ae76531SDavid Feng /* 7330ae76531SDavid Feng * Enable dCache & iCache, whether cache is actually enabled 7340ae76531SDavid Feng * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF 7350ae76531SDavid Feng */ 7362f78eae5SYork Sun void __weak enable_caches(void) 7370ae76531SDavid Feng { 7380ae76531SDavid Feng icache_enable(); 7390ae76531SDavid Feng dcache_enable(); 7400ae76531SDavid Feng } 741