10ae76531SDavid Feng /* 20ae76531SDavid Feng * (C) Copyright 2013 30ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn> 40ae76531SDavid Feng * 55e2ec773SAlexander Graf * (C) Copyright 2016 65e2ec773SAlexander Graf * Alexander Graf <agraf@suse.de> 75e2ec773SAlexander Graf * 80ae76531SDavid Feng * SPDX-License-Identifier: GPL-2.0+ 90ae76531SDavid Feng */ 100ae76531SDavid Feng 110ae76531SDavid Feng #include <common.h> 120ae76531SDavid Feng #include <asm/system.h> 130ae76531SDavid Feng #include <asm/armv8/mmu.h> 140ae76531SDavid Feng 150ae76531SDavid Feng DECLARE_GLOBAL_DATA_PTR; 160ae76531SDavid Feng 170ae76531SDavid Feng #ifndef CONFIG_SYS_DCACHE_OFF 1894f7ff36SSergey Temerkhanov 195e2ec773SAlexander Graf /* 205e2ec773SAlexander Graf * With 4k page granule, a virtual address is split into 4 lookup parts 215e2ec773SAlexander Graf * spanning 9 bits each: 225e2ec773SAlexander Graf * 235e2ec773SAlexander Graf * _______________________________________________ 245e2ec773SAlexander Graf * | | | | | | | 255e2ec773SAlexander Graf * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off | 265e2ec773SAlexander Graf * |_______|_______|_______|_______|_______|_______| 275e2ec773SAlexander Graf * 63-48 47-39 38-30 29-21 20-12 11-00 285e2ec773SAlexander Graf * 295e2ec773SAlexander Graf * mask page size 305e2ec773SAlexander Graf * 315e2ec773SAlexander Graf * Lv0: FF8000000000 -- 325e2ec773SAlexander Graf * Lv1: 7FC0000000 1G 335e2ec773SAlexander Graf * Lv2: 3FE00000 2M 345e2ec773SAlexander Graf * Lv3: 1FF000 4K 355e2ec773SAlexander Graf * off: FFF 365e2ec773SAlexander Graf */ 375e2ec773SAlexander Graf 38252cdb46SYork Sun u64 get_tcr(int el, u64 *pips, u64 *pva_bits) 390691484aSAlexander Graf { 400691484aSAlexander Graf u64 max_addr = 0; 410691484aSAlexander Graf u64 ips, va_bits; 420691484aSAlexander Graf u64 tcr; 430691484aSAlexander Graf int i; 440691484aSAlexander Graf 450691484aSAlexander Graf /* Find the largest address we need to support */ 46d473f0c6SAlexander Graf for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) 47cd4b0c5fSYork Sun max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size); 480691484aSAlexander Graf 490691484aSAlexander Graf /* Calculate the maximum physical (and thus virtual) address */ 500691484aSAlexander Graf if (max_addr > (1ULL << 44)) { 510691484aSAlexander Graf ips = 5; 520691484aSAlexander Graf va_bits = 48; 530691484aSAlexander Graf } else if (max_addr > (1ULL << 42)) { 540691484aSAlexander Graf ips = 4; 550691484aSAlexander Graf va_bits = 44; 560691484aSAlexander Graf } else if (max_addr > (1ULL << 40)) { 570691484aSAlexander Graf ips = 3; 580691484aSAlexander Graf va_bits = 42; 590691484aSAlexander Graf } else if (max_addr > (1ULL << 36)) { 600691484aSAlexander Graf ips = 2; 610691484aSAlexander Graf va_bits = 40; 620691484aSAlexander Graf } else if (max_addr > (1ULL << 32)) { 630691484aSAlexander Graf ips = 1; 640691484aSAlexander Graf va_bits = 36; 650691484aSAlexander Graf } else { 660691484aSAlexander Graf ips = 0; 670691484aSAlexander Graf va_bits = 32; 680691484aSAlexander Graf } 690691484aSAlexander Graf 700691484aSAlexander Graf if (el == 1) { 719bb367a5SAlexander Graf tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE; 720691484aSAlexander Graf } else if (el == 2) { 730691484aSAlexander Graf tcr = TCR_EL2_RSVD | (ips << 16); 740691484aSAlexander Graf } else { 750691484aSAlexander Graf tcr = TCR_EL3_RSVD | (ips << 16); 760691484aSAlexander Graf } 770691484aSAlexander Graf 780691484aSAlexander Graf /* PTWs cacheable, inner/outer WBWA and inner shareable */ 795e2ec773SAlexander Graf tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; 805e2ec773SAlexander Graf tcr |= TCR_T0SZ(va_bits); 810691484aSAlexander Graf 820691484aSAlexander Graf if (pips) 830691484aSAlexander Graf *pips = ips; 840691484aSAlexander Graf if (pva_bits) 850691484aSAlexander Graf *pva_bits = va_bits; 860691484aSAlexander Graf 870691484aSAlexander Graf return tcr; 880691484aSAlexander Graf } 890691484aSAlexander Graf 905e2ec773SAlexander Graf #define MAX_PTE_ENTRIES 512 915e2ec773SAlexander Graf 925e2ec773SAlexander Graf static int pte_type(u64 *pte) 935e2ec773SAlexander Graf { 945e2ec773SAlexander Graf return *pte & PTE_TYPE_MASK; 955e2ec773SAlexander Graf } 965e2ec773SAlexander Graf 975e2ec773SAlexander Graf /* Returns the LSB number for a PTE on level <level> */ 985e2ec773SAlexander Graf static int level2shift(int level) 995e2ec773SAlexander Graf { 1005e2ec773SAlexander Graf /* Page is 12 bits wide, every level translates 9 bits */ 1015e2ec773SAlexander Graf return (12 + 9 * (3 - level)); 1025e2ec773SAlexander Graf } 1035e2ec773SAlexander Graf 1045e2ec773SAlexander Graf static u64 *find_pte(u64 addr, int level) 1055e2ec773SAlexander Graf { 1065e2ec773SAlexander Graf int start_level = 0; 1075e2ec773SAlexander Graf u64 *pte; 1085e2ec773SAlexander Graf u64 idx; 1095e2ec773SAlexander Graf u64 va_bits; 1105e2ec773SAlexander Graf int i; 1115e2ec773SAlexander Graf 1125e2ec773SAlexander Graf debug("addr=%llx level=%d\n", addr, level); 1135e2ec773SAlexander Graf 1145e2ec773SAlexander Graf get_tcr(0, NULL, &va_bits); 1155e2ec773SAlexander Graf if (va_bits < 39) 1165e2ec773SAlexander Graf start_level = 1; 1175e2ec773SAlexander Graf 1185e2ec773SAlexander Graf if (level < start_level) 1195e2ec773SAlexander Graf return NULL; 1205e2ec773SAlexander Graf 1215e2ec773SAlexander Graf /* Walk through all page table levels to find our PTE */ 1225e2ec773SAlexander Graf pte = (u64*)gd->arch.tlb_addr; 1235e2ec773SAlexander Graf for (i = start_level; i < 4; i++) { 1245e2ec773SAlexander Graf idx = (addr >> level2shift(i)) & 0x1FF; 1255e2ec773SAlexander Graf pte += idx; 1265e2ec773SAlexander Graf debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte); 1275e2ec773SAlexander Graf 1285e2ec773SAlexander Graf /* Found it */ 1295e2ec773SAlexander Graf if (i == level) 1305e2ec773SAlexander Graf return pte; 1315e2ec773SAlexander Graf /* PTE is no table (either invalid or block), can't traverse */ 1325e2ec773SAlexander Graf if (pte_type(pte) != PTE_TYPE_TABLE) 1335e2ec773SAlexander Graf return NULL; 1345e2ec773SAlexander Graf /* Off to the next level */ 1355e2ec773SAlexander Graf pte = (u64*)(*pte & 0x0000fffffffff000ULL); 1365e2ec773SAlexander Graf } 1375e2ec773SAlexander Graf 1385e2ec773SAlexander Graf /* Should never reach here */ 1395e2ec773SAlexander Graf return NULL; 1405e2ec773SAlexander Graf } 1415e2ec773SAlexander Graf 1425e2ec773SAlexander Graf /* Returns and creates a new full table (512 entries) */ 1435e2ec773SAlexander Graf static u64 *create_table(void) 1445e2ec773SAlexander Graf { 1455e2ec773SAlexander Graf u64 *new_table = (u64*)gd->arch.tlb_fillptr; 1465e2ec773SAlexander Graf u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64); 1475e2ec773SAlexander Graf 1485e2ec773SAlexander Graf /* Allocate MAX_PTE_ENTRIES pte entries */ 1495e2ec773SAlexander Graf gd->arch.tlb_fillptr += pt_len; 1505e2ec773SAlexander Graf 1515e2ec773SAlexander Graf if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size) 1525e2ec773SAlexander Graf panic("Insufficient RAM for page table: 0x%lx > 0x%lx. " 1535e2ec773SAlexander Graf "Please increase the size in get_page_table_size()", 1545e2ec773SAlexander Graf gd->arch.tlb_fillptr - gd->arch.tlb_addr, 1555e2ec773SAlexander Graf gd->arch.tlb_size); 1565e2ec773SAlexander Graf 1575e2ec773SAlexander Graf /* Mark all entries as invalid */ 1585e2ec773SAlexander Graf memset(new_table, 0, pt_len); 1595e2ec773SAlexander Graf 1605e2ec773SAlexander Graf return new_table; 1615e2ec773SAlexander Graf } 1625e2ec773SAlexander Graf 1635e2ec773SAlexander Graf static void set_pte_table(u64 *pte, u64 *table) 1645e2ec773SAlexander Graf { 1655e2ec773SAlexander Graf /* Point *pte to the new table */ 1665e2ec773SAlexander Graf debug("Setting %p to addr=%p\n", pte, table); 1675e2ec773SAlexander Graf *pte = PTE_TYPE_TABLE | (ulong)table; 1685e2ec773SAlexander Graf } 1695e2ec773SAlexander Graf 1705e2ec773SAlexander Graf /* Splits a block PTE into table with subpages spanning the old block */ 1715e2ec773SAlexander Graf static void split_block(u64 *pte, int level) 1725e2ec773SAlexander Graf { 1735e2ec773SAlexander Graf u64 old_pte = *pte; 1745e2ec773SAlexander Graf u64 *new_table; 1755e2ec773SAlexander Graf u64 i = 0; 1765e2ec773SAlexander Graf /* level describes the parent level, we need the child ones */ 1775e2ec773SAlexander Graf int levelshift = level2shift(level + 1); 1785e2ec773SAlexander Graf 1795e2ec773SAlexander Graf if (pte_type(pte) != PTE_TYPE_BLOCK) 1805e2ec773SAlexander Graf panic("PTE %p (%llx) is not a block. Some driver code wants to " 1815e2ec773SAlexander Graf "modify dcache settings for an range not covered in " 1825e2ec773SAlexander Graf "mem_map.", pte, old_pte); 1835e2ec773SAlexander Graf 1845e2ec773SAlexander Graf new_table = create_table(); 1855e2ec773SAlexander Graf debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table); 1865e2ec773SAlexander Graf 1875e2ec773SAlexander Graf for (i = 0; i < MAX_PTE_ENTRIES; i++) { 1885e2ec773SAlexander Graf new_table[i] = old_pte | (i << levelshift); 1895e2ec773SAlexander Graf 1905e2ec773SAlexander Graf /* Level 3 block PTEs have the table type */ 1915e2ec773SAlexander Graf if ((level + 1) == 3) 1925e2ec773SAlexander Graf new_table[i] |= PTE_TYPE_TABLE; 1935e2ec773SAlexander Graf 1945e2ec773SAlexander Graf debug("Setting new_table[%lld] = %llx\n", i, new_table[i]); 1955e2ec773SAlexander Graf } 1965e2ec773SAlexander Graf 1975e2ec773SAlexander Graf /* Set the new table into effect */ 1985e2ec773SAlexander Graf set_pte_table(pte, new_table); 1995e2ec773SAlexander Graf } 2005e2ec773SAlexander Graf 201f733d466SYork Sun /* Add one mm_region map entry to the page tables */ 202f733d466SYork Sun static void add_map(struct mm_region *map) 203f733d466SYork Sun { 204f733d466SYork Sun u64 *pte; 205cd4b0c5fSYork Sun u64 virt = map->virt; 206cd4b0c5fSYork Sun u64 phys = map->phys; 207f733d466SYork Sun u64 size = map->size; 208f733d466SYork Sun u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; 209f733d466SYork Sun u64 blocksize; 210f733d466SYork Sun int level; 211f733d466SYork Sun u64 *new_table; 212f733d466SYork Sun 213f733d466SYork Sun while (size) { 214cd4b0c5fSYork Sun pte = find_pte(virt, 0); 215f733d466SYork Sun if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { 216cd4b0c5fSYork Sun debug("Creating table for virt 0x%llx\n", virt); 217f733d466SYork Sun new_table = create_table(); 218f733d466SYork Sun set_pte_table(pte, new_table); 219f733d466SYork Sun } 220f733d466SYork Sun 221f733d466SYork Sun for (level = 1; level < 4; level++) { 222cd4b0c5fSYork Sun pte = find_pte(virt, level); 223f733d466SYork Sun if (!pte) 224f733d466SYork Sun panic("pte not found\n"); 225cd4b0c5fSYork Sun 226f733d466SYork Sun blocksize = 1ULL << level2shift(level); 227cd4b0c5fSYork Sun debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n", 228cd4b0c5fSYork Sun virt, size, blocksize); 229cd4b0c5fSYork Sun if (size >= blocksize && !(virt & (blocksize - 1))) { 230f733d466SYork Sun /* Page fits, create block PTE */ 231cd4b0c5fSYork Sun debug("Setting PTE %p to block virt=%llx\n", 232cd4b0c5fSYork Sun pte, virt); 233*8a04b1c1SPeng Fan if (level == 3) 234*8a04b1c1SPeng Fan *pte = phys | attrs | PTE_TYPE_PAGE; 235*8a04b1c1SPeng Fan else 236cd4b0c5fSYork Sun *pte = phys | attrs; 237cd4b0c5fSYork Sun virt += blocksize; 238cd4b0c5fSYork Sun phys += blocksize; 239f733d466SYork Sun size -= blocksize; 240f733d466SYork Sun break; 241f733d466SYork Sun } else if (pte_type(pte) == PTE_TYPE_FAULT) { 242f733d466SYork Sun /* Page doesn't fit, create subpages */ 243cd4b0c5fSYork Sun debug("Creating subtable for virt 0x%llx blksize=%llx\n", 244cd4b0c5fSYork Sun virt, blocksize); 245f733d466SYork Sun new_table = create_table(); 246f733d466SYork Sun set_pte_table(pte, new_table); 247f733d466SYork Sun } else if (pte_type(pte) == PTE_TYPE_BLOCK) { 248cd4b0c5fSYork Sun debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n", 249cd4b0c5fSYork Sun virt, blocksize); 250f733d466SYork Sun split_block(pte, level); 251f733d466SYork Sun } 252f733d466SYork Sun } 253f733d466SYork Sun } 254f733d466SYork Sun } 255f733d466SYork Sun 2565e2ec773SAlexander Graf enum pte_type { 2575e2ec773SAlexander Graf PTE_INVAL, 2585e2ec773SAlexander Graf PTE_BLOCK, 2595e2ec773SAlexander Graf PTE_LEVEL, 2605e2ec773SAlexander Graf }; 2615e2ec773SAlexander Graf 2625e2ec773SAlexander Graf /* 2635e2ec773SAlexander Graf * This is a recursively called function to count the number of 2645e2ec773SAlexander Graf * page tables we need to cover a particular PTE range. If you 2655e2ec773SAlexander Graf * call this with level = -1 you basically get the full 48 bit 2665e2ec773SAlexander Graf * coverage. 2675e2ec773SAlexander Graf */ 2685e2ec773SAlexander Graf static int count_required_pts(u64 addr, int level, u64 maxaddr) 2695e2ec773SAlexander Graf { 2705e2ec773SAlexander Graf int levelshift = level2shift(level); 2715e2ec773SAlexander Graf u64 levelsize = 1ULL << levelshift; 2725e2ec773SAlexander Graf u64 levelmask = levelsize - 1; 2735e2ec773SAlexander Graf u64 levelend = addr + levelsize; 2745e2ec773SAlexander Graf int r = 0; 2755e2ec773SAlexander Graf int i; 2765e2ec773SAlexander Graf enum pte_type pte_type = PTE_INVAL; 2775e2ec773SAlexander Graf 278d473f0c6SAlexander Graf for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) { 2795e2ec773SAlexander Graf struct mm_region *map = &mem_map[i]; 280cd4b0c5fSYork Sun u64 start = map->virt; 2815e2ec773SAlexander Graf u64 end = start + map->size; 2825e2ec773SAlexander Graf 2835e2ec773SAlexander Graf /* Check if the PTE would overlap with the map */ 2845e2ec773SAlexander Graf if (max(addr, start) <= min(levelend, end)) { 2855e2ec773SAlexander Graf start = max(addr, start); 2865e2ec773SAlexander Graf end = min(levelend, end); 2875e2ec773SAlexander Graf 2885e2ec773SAlexander Graf /* We need a sub-pt for this level */ 2895e2ec773SAlexander Graf if ((start & levelmask) || (end & levelmask)) { 2905e2ec773SAlexander Graf pte_type = PTE_LEVEL; 2915e2ec773SAlexander Graf break; 2925e2ec773SAlexander Graf } 2935e2ec773SAlexander Graf 2945e2ec773SAlexander Graf /* Lv0 can not do block PTEs, so do levels here too */ 2955e2ec773SAlexander Graf if (level <= 0) { 2965e2ec773SAlexander Graf pte_type = PTE_LEVEL; 2975e2ec773SAlexander Graf break; 2985e2ec773SAlexander Graf } 2995e2ec773SAlexander Graf 3005e2ec773SAlexander Graf /* PTE is active, but fits into a block */ 3015e2ec773SAlexander Graf pte_type = PTE_BLOCK; 3025e2ec773SAlexander Graf } 3035e2ec773SAlexander Graf } 3045e2ec773SAlexander Graf 3055e2ec773SAlexander Graf /* 3065e2ec773SAlexander Graf * Block PTEs at this level are already covered by the parent page 3075e2ec773SAlexander Graf * table, so we only need to count sub page tables. 3085e2ec773SAlexander Graf */ 3095e2ec773SAlexander Graf if (pte_type == PTE_LEVEL) { 3105e2ec773SAlexander Graf int sublevel = level + 1; 3115e2ec773SAlexander Graf u64 sublevelsize = 1ULL << level2shift(sublevel); 3125e2ec773SAlexander Graf 3135e2ec773SAlexander Graf /* Account for the new sub page table ... */ 3145e2ec773SAlexander Graf r = 1; 3155e2ec773SAlexander Graf 3165e2ec773SAlexander Graf /* ... and for all child page tables that one might have */ 3175e2ec773SAlexander Graf for (i = 0; i < MAX_PTE_ENTRIES; i++) { 3185e2ec773SAlexander Graf r += count_required_pts(addr, sublevel, maxaddr); 3195e2ec773SAlexander Graf addr += sublevelsize; 3205e2ec773SAlexander Graf 3215e2ec773SAlexander Graf if (addr >= maxaddr) { 3225e2ec773SAlexander Graf /* 3235e2ec773SAlexander Graf * We reached the end of address space, no need 3245e2ec773SAlexander Graf * to look any further. 3255e2ec773SAlexander Graf */ 3265e2ec773SAlexander Graf break; 3275e2ec773SAlexander Graf } 3285e2ec773SAlexander Graf } 3295e2ec773SAlexander Graf } 3305e2ec773SAlexander Graf 3315e2ec773SAlexander Graf return r; 3325e2ec773SAlexander Graf } 3335e2ec773SAlexander Graf 3345e2ec773SAlexander Graf /* Returns the estimated required size of all page tables */ 335c05016abSAlexander Graf __weak u64 get_page_table_size(void) 3365e2ec773SAlexander Graf { 3375e2ec773SAlexander Graf u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64); 3385e2ec773SAlexander Graf u64 size = 0; 3395e2ec773SAlexander Graf u64 va_bits; 3405e2ec773SAlexander Graf int start_level = 0; 3415e2ec773SAlexander Graf 3425e2ec773SAlexander Graf get_tcr(0, NULL, &va_bits); 3435e2ec773SAlexander Graf if (va_bits < 39) 3445e2ec773SAlexander Graf start_level = 1; 3455e2ec773SAlexander Graf 3465e2ec773SAlexander Graf /* Account for all page tables we would need to cover our memory map */ 3475e2ec773SAlexander Graf size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits); 3485e2ec773SAlexander Graf 3495e2ec773SAlexander Graf /* 3505e2ec773SAlexander Graf * We need to duplicate our page table once to have an emergency pt to 3515e2ec773SAlexander Graf * resort to when splitting page tables later on 3525e2ec773SAlexander Graf */ 3535e2ec773SAlexander Graf size *= 2; 3545e2ec773SAlexander Graf 3555e2ec773SAlexander Graf /* 3565e2ec773SAlexander Graf * We may need to split page tables later on if dcache settings change, 3575e2ec773SAlexander Graf * so reserve up to 4 (random pick) page tables for that. 3585e2ec773SAlexander Graf */ 3595e2ec773SAlexander Graf size += one_pt * 4; 3605e2ec773SAlexander Graf 3615e2ec773SAlexander Graf return size; 3625e2ec773SAlexander Graf } 3635e2ec773SAlexander Graf 364252cdb46SYork Sun void setup_pgtables(void) 36594f7ff36SSergey Temerkhanov { 3665e2ec773SAlexander Graf int i; 36794f7ff36SSergey Temerkhanov 368252cdb46SYork Sun if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr) 369252cdb46SYork Sun panic("Page table pointer not setup."); 370252cdb46SYork Sun 3715e2ec773SAlexander Graf /* 3725e2ec773SAlexander Graf * Allocate the first level we're on with invalidate entries. 3735e2ec773SAlexander Graf * If the starting level is 0 (va_bits >= 39), then this is our 3745e2ec773SAlexander Graf * Lv0 page table, otherwise it's the entry Lv1 page table. 3755e2ec773SAlexander Graf */ 3765e2ec773SAlexander Graf create_table(); 3775e2ec773SAlexander Graf 3785e2ec773SAlexander Graf /* Now add all MMU table entries one after another to the table */ 379d473f0c6SAlexander Graf for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) 3805e2ec773SAlexander Graf add_map(&mem_map[i]); 38194f7ff36SSergey Temerkhanov } 38294f7ff36SSergey Temerkhanov 3835e2ec773SAlexander Graf static void setup_all_pgtables(void) 3845e2ec773SAlexander Graf { 3855e2ec773SAlexander Graf u64 tlb_addr = gd->arch.tlb_addr; 3860e170947SAlexander Graf u64 tlb_size = gd->arch.tlb_size; 38794f7ff36SSergey Temerkhanov 3885e2ec773SAlexander Graf /* Reset the fill ptr */ 3895e2ec773SAlexander Graf gd->arch.tlb_fillptr = tlb_addr; 39094f7ff36SSergey Temerkhanov 3915e2ec773SAlexander Graf /* Create normal system page tables */ 3925e2ec773SAlexander Graf setup_pgtables(); 3935e2ec773SAlexander Graf 3945e2ec773SAlexander Graf /* Create emergency page tables */ 3950e170947SAlexander Graf gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr - 3960e170947SAlexander Graf (uintptr_t)gd->arch.tlb_addr; 3975e2ec773SAlexander Graf gd->arch.tlb_addr = gd->arch.tlb_fillptr; 3985e2ec773SAlexander Graf setup_pgtables(); 3995e2ec773SAlexander Graf gd->arch.tlb_emerg = gd->arch.tlb_addr; 4005e2ec773SAlexander Graf gd->arch.tlb_addr = tlb_addr; 4010e170947SAlexander Graf gd->arch.tlb_size = tlb_size; 40294f7ff36SSergey Temerkhanov } 40394f7ff36SSergey Temerkhanov 4040ae76531SDavid Feng /* to activate the MMU we need to set up virtual memory */ 4053c6af3baSStephen Warren __weak void mmu_setup(void) 4060ae76531SDavid Feng { 4078b19dff5SThierry Reding int el; 4080ae76531SDavid Feng 4095e2ec773SAlexander Graf /* Set up page tables only once */ 4105e2ec773SAlexander Graf if (!gd->arch.tlb_fillptr) 4115e2ec773SAlexander Graf setup_all_pgtables(); 4120691484aSAlexander Graf 4130691484aSAlexander Graf el = current_el(); 4140691484aSAlexander Graf set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 4150691484aSAlexander Graf MEMORY_ATTRIBUTES); 4160691484aSAlexander Graf 4170ae76531SDavid Feng /* enable the mmu */ 4180ae76531SDavid Feng set_sctlr(get_sctlr() | CR_M); 4190ae76531SDavid Feng } 4200ae76531SDavid Feng 4210ae76531SDavid Feng /* 4220ae76531SDavid Feng * Performs a invalidation of the entire data cache at all levels 4230ae76531SDavid Feng */ 4240ae76531SDavid Feng void invalidate_dcache_all(void) 4250ae76531SDavid Feng { 4261e6ad55cSYork Sun __asm_invalidate_dcache_all(); 4271ab557a0SStephen Warren __asm_invalidate_l3_dcache(); 4280ae76531SDavid Feng } 4290ae76531SDavid Feng 4300ae76531SDavid Feng /* 431dcd468b8SYork Sun * Performs a clean & invalidation of the entire data cache at all levels. 432dcd468b8SYork Sun * This function needs to be inline to avoid using stack. 4331ab557a0SStephen Warren * __asm_flush_l3_dcache return status of timeout 4340ae76531SDavid Feng */ 435dcd468b8SYork Sun inline void flush_dcache_all(void) 4360ae76531SDavid Feng { 437dcd468b8SYork Sun int ret; 438dcd468b8SYork Sun 4390ae76531SDavid Feng __asm_flush_dcache_all(); 4401ab557a0SStephen Warren ret = __asm_flush_l3_dcache(); 441dcd468b8SYork Sun if (ret) 442dcd468b8SYork Sun debug("flushing dcache returns 0x%x\n", ret); 443dcd468b8SYork Sun else 444dcd468b8SYork Sun debug("flushing dcache successfully.\n"); 4450ae76531SDavid Feng } 4460ae76531SDavid Feng 4470ae76531SDavid Feng /* 4480ae76531SDavid Feng * Invalidates range in all levels of D-cache/unified cache 4490ae76531SDavid Feng */ 4500ae76531SDavid Feng void invalidate_dcache_range(unsigned long start, unsigned long stop) 4510ae76531SDavid Feng { 4526775a820SSimon Glass __asm_invalidate_dcache_range(start, stop); 4530ae76531SDavid Feng } 4540ae76531SDavid Feng 4550ae76531SDavid Feng /* 4560ae76531SDavid Feng * Flush range(clean & invalidate) from all levels of D-cache/unified cache 4570ae76531SDavid Feng */ 4580ae76531SDavid Feng void flush_dcache_range(unsigned long start, unsigned long stop) 4590ae76531SDavid Feng { 4600ae76531SDavid Feng __asm_flush_dcache_range(start, stop); 4610ae76531SDavid Feng } 4620ae76531SDavid Feng 4630ae76531SDavid Feng void dcache_enable(void) 4640ae76531SDavid Feng { 4650ae76531SDavid Feng /* The data cache is not active unless the mmu is enabled */ 4660ae76531SDavid Feng if (!(get_sctlr() & CR_M)) { 4670ae76531SDavid Feng invalidate_dcache_all(); 4680ae76531SDavid Feng __asm_invalidate_tlb_all(); 4690ae76531SDavid Feng mmu_setup(); 4700ae76531SDavid Feng } 4710ae76531SDavid Feng 4720ae76531SDavid Feng set_sctlr(get_sctlr() | CR_C); 4730ae76531SDavid Feng } 4740ae76531SDavid Feng 4750ae76531SDavid Feng void dcache_disable(void) 4760ae76531SDavid Feng { 4770ae76531SDavid Feng uint32_t sctlr; 4780ae76531SDavid Feng 4790ae76531SDavid Feng sctlr = get_sctlr(); 4800ae76531SDavid Feng 4810ae76531SDavid Feng /* if cache isn't enabled no need to disable */ 4820ae76531SDavid Feng if (!(sctlr & CR_C)) 4830ae76531SDavid Feng return; 4840ae76531SDavid Feng 4850ae76531SDavid Feng set_sctlr(sctlr & ~(CR_C|CR_M)); 4860ae76531SDavid Feng 4870ae76531SDavid Feng flush_dcache_all(); 4880ae76531SDavid Feng __asm_invalidate_tlb_all(); 4890ae76531SDavid Feng } 4900ae76531SDavid Feng 4910ae76531SDavid Feng int dcache_status(void) 4920ae76531SDavid Feng { 4930ae76531SDavid Feng return (get_sctlr() & CR_C) != 0; 4940ae76531SDavid Feng } 4950ae76531SDavid Feng 496dad17fd5SSiva Durga Prasad Paladugu u64 *__weak arch_get_page_table(void) { 497dad17fd5SSiva Durga Prasad Paladugu puts("No page table offset defined\n"); 498dad17fd5SSiva Durga Prasad Paladugu 499dad17fd5SSiva Durga Prasad Paladugu return NULL; 500dad17fd5SSiva Durga Prasad Paladugu } 501dad17fd5SSiva Durga Prasad Paladugu 5025e2ec773SAlexander Graf static bool is_aligned(u64 addr, u64 size, u64 align) 5035e2ec773SAlexander Graf { 5045e2ec773SAlexander Graf return !(addr & (align - 1)) && !(size & (align - 1)); 5055e2ec773SAlexander Graf } 5065e2ec773SAlexander Graf 5077f9b9f31SYork Sun /* Use flag to indicate if attrs has more than d-cache attributes */ 5087f9b9f31SYork Sun static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level) 5095e2ec773SAlexander Graf { 5105e2ec773SAlexander Graf int levelshift = level2shift(level); 5115e2ec773SAlexander Graf u64 levelsize = 1ULL << levelshift; 5125e2ec773SAlexander Graf u64 *pte = find_pte(start, level); 5135e2ec773SAlexander Graf 5145e2ec773SAlexander Graf /* Can we can just modify the current level block PTE? */ 5155e2ec773SAlexander Graf if (is_aligned(start, size, levelsize)) { 5167f9b9f31SYork Sun if (flag) { 5177f9b9f31SYork Sun *pte &= ~PMD_ATTRMASK; 5187f9b9f31SYork Sun *pte |= attrs & PMD_ATTRMASK; 5197f9b9f31SYork Sun } else { 5205e2ec773SAlexander Graf *pte &= ~PMD_ATTRINDX_MASK; 5217f9b9f31SYork Sun *pte |= attrs & PMD_ATTRINDX_MASK; 5227f9b9f31SYork Sun } 5235e2ec773SAlexander Graf debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level); 5245e2ec773SAlexander Graf 5255e2ec773SAlexander Graf return levelsize; 5265e2ec773SAlexander Graf } 5275e2ec773SAlexander Graf 5285e2ec773SAlexander Graf /* Unaligned or doesn't fit, maybe split block into table */ 5295e2ec773SAlexander Graf debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte); 5305e2ec773SAlexander Graf 5315e2ec773SAlexander Graf /* Maybe we need to split the block into a table */ 5325e2ec773SAlexander Graf if (pte_type(pte) == PTE_TYPE_BLOCK) 5335e2ec773SAlexander Graf split_block(pte, level); 5345e2ec773SAlexander Graf 5355e2ec773SAlexander Graf /* And then double-check it became a table or already is one */ 5365e2ec773SAlexander Graf if (pte_type(pte) != PTE_TYPE_TABLE) 5375e2ec773SAlexander Graf panic("PTE %p (%llx) for addr=%llx should be a table", 5385e2ec773SAlexander Graf pte, *pte, start); 5395e2ec773SAlexander Graf 5405e2ec773SAlexander Graf /* Roll on to the next page table level */ 5415e2ec773SAlexander Graf return 0; 5425e2ec773SAlexander Graf } 5435e2ec773SAlexander Graf 5445e2ec773SAlexander Graf void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 5455e2ec773SAlexander Graf enum dcache_option option) 5465e2ec773SAlexander Graf { 5475e2ec773SAlexander Graf u64 attrs = PMD_ATTRINDX(option); 5485e2ec773SAlexander Graf u64 real_start = start; 5495e2ec773SAlexander Graf u64 real_size = size; 5505e2ec773SAlexander Graf 5515e2ec773SAlexander Graf debug("start=%lx size=%lx\n", (ulong)start, (ulong)size); 5525e2ec773SAlexander Graf 553252cdb46SYork Sun if (!gd->arch.tlb_emerg) 554252cdb46SYork Sun panic("Emergency page table not setup."); 555252cdb46SYork Sun 5565e2ec773SAlexander Graf /* 5575e2ec773SAlexander Graf * We can not modify page tables that we're currently running on, 5585e2ec773SAlexander Graf * so we first need to switch to the "emergency" page tables where 5595e2ec773SAlexander Graf * we can safely modify our primary page tables and then switch back 5605e2ec773SAlexander Graf */ 5615e2ec773SAlexander Graf __asm_switch_ttbr(gd->arch.tlb_emerg); 5625e2ec773SAlexander Graf 5635e2ec773SAlexander Graf /* 5645e2ec773SAlexander Graf * Loop through the address range until we find a page granule that fits 5655e2ec773SAlexander Graf * our alignment constraints, then set it to the new cache attributes 5665e2ec773SAlexander Graf */ 5675e2ec773SAlexander Graf while (size > 0) { 5685e2ec773SAlexander Graf int level; 5695e2ec773SAlexander Graf u64 r; 5705e2ec773SAlexander Graf 5715e2ec773SAlexander Graf for (level = 1; level < 4; level++) { 5727f9b9f31SYork Sun /* Set d-cache attributes only */ 5737f9b9f31SYork Sun r = set_one_region(start, size, attrs, false, level); 5745e2ec773SAlexander Graf if (r) { 5755e2ec773SAlexander Graf /* PTE successfully replaced */ 5765e2ec773SAlexander Graf size -= r; 5775e2ec773SAlexander Graf start += r; 5785e2ec773SAlexander Graf break; 5795e2ec773SAlexander Graf } 5805e2ec773SAlexander Graf } 5815e2ec773SAlexander Graf 5825e2ec773SAlexander Graf } 5835e2ec773SAlexander Graf 5845e2ec773SAlexander Graf /* We're done modifying page tables, switch back to our primary ones */ 5855e2ec773SAlexander Graf __asm_switch_ttbr(gd->arch.tlb_addr); 5865e2ec773SAlexander Graf 5875e2ec773SAlexander Graf /* 5885e2ec773SAlexander Graf * Make sure there's nothing stale in dcache for a region that might 5895e2ec773SAlexander Graf * have caches off now 5905e2ec773SAlexander Graf */ 5915e2ec773SAlexander Graf flush_dcache_range(real_start, real_start + real_size); 5925e2ec773SAlexander Graf } 59394f7ff36SSergey Temerkhanov 5947f9b9f31SYork Sun /* 5957f9b9f31SYork Sun * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits. 5967f9b9f31SYork Sun * The procecess is break-before-make. The target region will be marked as 5977f9b9f31SYork Sun * invalid during the process of changing. 5987f9b9f31SYork Sun */ 5997f9b9f31SYork Sun void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs) 6007f9b9f31SYork Sun { 6017f9b9f31SYork Sun int level; 6027f9b9f31SYork Sun u64 r, size, start; 6037f9b9f31SYork Sun 6047f9b9f31SYork Sun start = addr; 6057f9b9f31SYork Sun size = siz; 6067f9b9f31SYork Sun /* 6077f9b9f31SYork Sun * Loop through the address range until we find a page granule that fits 6087f9b9f31SYork Sun * our alignment constraints, then set it to "invalid". 6097f9b9f31SYork Sun */ 6107f9b9f31SYork Sun while (size > 0) { 6117f9b9f31SYork Sun for (level = 1; level < 4; level++) { 6127f9b9f31SYork Sun /* Set PTE to fault */ 6137f9b9f31SYork Sun r = set_one_region(start, size, PTE_TYPE_FAULT, true, 6147f9b9f31SYork Sun level); 6157f9b9f31SYork Sun if (r) { 6167f9b9f31SYork Sun /* PTE successfully invalidated */ 6177f9b9f31SYork Sun size -= r; 6187f9b9f31SYork Sun start += r; 6197f9b9f31SYork Sun break; 6207f9b9f31SYork Sun } 6217f9b9f31SYork Sun } 6227f9b9f31SYork Sun } 6237f9b9f31SYork Sun 6247f9b9f31SYork Sun flush_dcache_range(gd->arch.tlb_addr, 6257f9b9f31SYork Sun gd->arch.tlb_addr + gd->arch.tlb_size); 6267f9b9f31SYork Sun __asm_invalidate_tlb_all(); 6277f9b9f31SYork Sun 6287f9b9f31SYork Sun /* 6297f9b9f31SYork Sun * Loop through the address range until we find a page granule that fits 6307f9b9f31SYork Sun * our alignment constraints, then set it to the new cache attributes 6317f9b9f31SYork Sun */ 6327f9b9f31SYork Sun start = addr; 6337f9b9f31SYork Sun size = siz; 6347f9b9f31SYork Sun while (size > 0) { 6357f9b9f31SYork Sun for (level = 1; level < 4; level++) { 6367f9b9f31SYork Sun /* Set PTE to new attributes */ 6377f9b9f31SYork Sun r = set_one_region(start, size, attrs, true, level); 6387f9b9f31SYork Sun if (r) { 6397f9b9f31SYork Sun /* PTE successfully updated */ 6407f9b9f31SYork Sun size -= r; 6417f9b9f31SYork Sun start += r; 6427f9b9f31SYork Sun break; 6437f9b9f31SYork Sun } 6447f9b9f31SYork Sun } 6457f9b9f31SYork Sun } 6467f9b9f31SYork Sun flush_dcache_range(gd->arch.tlb_addr, 6477f9b9f31SYork Sun gd->arch.tlb_addr + gd->arch.tlb_size); 6487f9b9f31SYork Sun __asm_invalidate_tlb_all(); 6497f9b9f31SYork Sun } 6507f9b9f31SYork Sun 6510ae76531SDavid Feng #else /* CONFIG_SYS_DCACHE_OFF */ 6520ae76531SDavid Feng 65319503c31SAlexander Graf /* 65419503c31SAlexander Graf * For SPL builds, we may want to not have dcache enabled. Any real U-Boot 65519503c31SAlexander Graf * running however really wants to have dcache and the MMU active. Check that 65619503c31SAlexander Graf * everything is sane and give the developer a hint if it isn't. 65719503c31SAlexander Graf */ 65819503c31SAlexander Graf #ifndef CONFIG_SPL_BUILD 65919503c31SAlexander Graf #error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache. 66019503c31SAlexander Graf #endif 66119503c31SAlexander Graf 6620ae76531SDavid Feng void invalidate_dcache_all(void) 6630ae76531SDavid Feng { 6640ae76531SDavid Feng } 6650ae76531SDavid Feng 6660ae76531SDavid Feng void flush_dcache_all(void) 6670ae76531SDavid Feng { 6680ae76531SDavid Feng } 6690ae76531SDavid Feng 6700ae76531SDavid Feng void dcache_enable(void) 6710ae76531SDavid Feng { 6720ae76531SDavid Feng } 6730ae76531SDavid Feng 6740ae76531SDavid Feng void dcache_disable(void) 6750ae76531SDavid Feng { 6760ae76531SDavid Feng } 6770ae76531SDavid Feng 6780ae76531SDavid Feng int dcache_status(void) 6790ae76531SDavid Feng { 6800ae76531SDavid Feng return 0; 6810ae76531SDavid Feng } 6820ae76531SDavid Feng 683dad17fd5SSiva Durga Prasad Paladugu void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 684dad17fd5SSiva Durga Prasad Paladugu enum dcache_option option) 685dad17fd5SSiva Durga Prasad Paladugu { 686dad17fd5SSiva Durga Prasad Paladugu } 687dad17fd5SSiva Durga Prasad Paladugu 6880ae76531SDavid Feng #endif /* CONFIG_SYS_DCACHE_OFF */ 6890ae76531SDavid Feng 6900ae76531SDavid Feng #ifndef CONFIG_SYS_ICACHE_OFF 6910ae76531SDavid Feng 6920ae76531SDavid Feng void icache_enable(void) 6930ae76531SDavid Feng { 6941ab557a0SStephen Warren invalidate_icache_all(); 6950ae76531SDavid Feng set_sctlr(get_sctlr() | CR_I); 6960ae76531SDavid Feng } 6970ae76531SDavid Feng 6980ae76531SDavid Feng void icache_disable(void) 6990ae76531SDavid Feng { 7000ae76531SDavid Feng set_sctlr(get_sctlr() & ~CR_I); 7010ae76531SDavid Feng } 7020ae76531SDavid Feng 7030ae76531SDavid Feng int icache_status(void) 7040ae76531SDavid Feng { 7050ae76531SDavid Feng return (get_sctlr() & CR_I) != 0; 7060ae76531SDavid Feng } 7070ae76531SDavid Feng 7080ae76531SDavid Feng void invalidate_icache_all(void) 7090ae76531SDavid Feng { 7100ae76531SDavid Feng __asm_invalidate_icache_all(); 7111ab557a0SStephen Warren __asm_invalidate_l3_icache(); 7120ae76531SDavid Feng } 7130ae76531SDavid Feng 7140ae76531SDavid Feng #else /* CONFIG_SYS_ICACHE_OFF */ 7150ae76531SDavid Feng 7160ae76531SDavid Feng void icache_enable(void) 7170ae76531SDavid Feng { 7180ae76531SDavid Feng } 7190ae76531SDavid Feng 7200ae76531SDavid Feng void icache_disable(void) 7210ae76531SDavid Feng { 7220ae76531SDavid Feng } 7230ae76531SDavid Feng 7240ae76531SDavid Feng int icache_status(void) 7250ae76531SDavid Feng { 7260ae76531SDavid Feng return 0; 7270ae76531SDavid Feng } 7280ae76531SDavid Feng 7290ae76531SDavid Feng void invalidate_icache_all(void) 7300ae76531SDavid Feng { 7310ae76531SDavid Feng } 7320ae76531SDavid Feng 7330ae76531SDavid Feng #endif /* CONFIG_SYS_ICACHE_OFF */ 7340ae76531SDavid Feng 7350ae76531SDavid Feng /* 7360ae76531SDavid Feng * Enable dCache & iCache, whether cache is actually enabled 7370ae76531SDavid Feng * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF 7380ae76531SDavid Feng */ 7392f78eae5SYork Sun void __weak enable_caches(void) 7400ae76531SDavid Feng { 7410ae76531SDavid Feng icache_enable(); 7420ae76531SDavid Feng dcache_enable(); 7430ae76531SDavid Feng } 744