1fe85eae5SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause 2fe85eae5SJens Wiklander /* 3fe85eae5SJens Wiklander * Copyright (c) 2024, Linaro Limited 4fe85eae5SJens Wiklander */ 5fe85eae5SJens Wiklander 6fe85eae5SJens Wiklander #include <assert.h> 7fe85eae5SJens Wiklander #include <kernel/boot.h> 8fe85eae5SJens Wiklander #include <mm/core_memprot.h> 9fe85eae5SJens Wiklander #include <mm/core_mmu.h> 10fe85eae5SJens Wiklander #include <mm/phys_mem.h> 11fe85eae5SJens Wiklander #include <mm/tee_mm.h> 12fe85eae5SJens Wiklander #include <stdalign.h> 13fe85eae5SJens Wiklander #include <string.h> 14fe85eae5SJens Wiklander #include <util.h> 15fe85eae5SJens Wiklander 16fe85eae5SJens Wiklander /* 17fe85eae5SJens Wiklander * struct boot_mem_reloc - Pointers relocated in memory during boot 18fe85eae5SJens Wiklander * @ptrs: Array of relocation 19fe85eae5SJens Wiklander * @count: Number of cells used in @ptrs 20fe85eae5SJens Wiklander * @next: Next relocation array when @ptrs is fully used 21fe85eae5SJens Wiklander */ 22fe85eae5SJens Wiklander struct boot_mem_reloc { 23fe85eae5SJens Wiklander void **ptrs[64]; 24fe85eae5SJens Wiklander size_t count; 25fe85eae5SJens Wiklander struct boot_mem_reloc *next; 26fe85eae5SJens Wiklander }; 27fe85eae5SJens Wiklander 28fe85eae5SJens Wiklander /* 29*c62a7972SJens Wiklander * struct boot_mem_padding - unused memory between allocations 30*c62a7972SJens Wiklander * @start: Start of padding 31*c62a7972SJens Wiklander * @len: Length of padding 32*c62a7972SJens Wiklander * @next: Next padding 33*c62a7972SJens Wiklander */ 34*c62a7972SJens Wiklander struct boot_mem_padding { 35*c62a7972SJens Wiklander vaddr_t start; 36*c62a7972SJens Wiklander size_t len; 37*c62a7972SJens Wiklander struct boot_mem_padding *next; 38*c62a7972SJens Wiklander }; 39*c62a7972SJens Wiklander 40*c62a7972SJens Wiklander /* 41fe85eae5SJens Wiklander * struct boot_mem_desc - Stack like boot memory allocation pool 42fe85eae5SJens Wiklander * @orig_mem_start: Boot memory stack base address 43fe85eae5SJens Wiklander * @orig_mem_end: Boot memory start end address 44fe85eae5SJens Wiklander * @mem_start: Boot memory free space start address 45fe85eae5SJens Wiklander * @mem_end: Boot memory free space end address 46fe85eae5SJens Wiklander * @reloc: Boot memory pointers requiring relocation 47*c62a7972SJens Wiklander * @padding: Linked list of unused memory between allocated blocks 48fe85eae5SJens Wiklander */ 49fe85eae5SJens Wiklander struct boot_mem_desc { 50fe85eae5SJens Wiklander vaddr_t orig_mem_start; 51fe85eae5SJens Wiklander vaddr_t orig_mem_end; 52fe85eae5SJens Wiklander vaddr_t mem_start; 53fe85eae5SJens Wiklander vaddr_t mem_end; 54fe85eae5SJens Wiklander struct boot_mem_reloc *reloc; 55*c62a7972SJens Wiklander struct boot_mem_padding *padding; 56fe85eae5SJens Wiklander }; 57fe85eae5SJens Wiklander 58fe85eae5SJens Wiklander static struct boot_mem_desc *boot_mem_desc; 59fe85eae5SJens Wiklander 60fe85eae5SJens Wiklander static void *mem_alloc_tmp(struct boot_mem_desc *desc, size_t len, size_t align) 61fe85eae5SJens Wiklander { 62fe85eae5SJens Wiklander vaddr_t va = 0; 63fe85eae5SJens Wiklander 64fe85eae5SJens Wiklander assert(desc && desc->mem_start && desc->mem_end); 65fe85eae5SJens Wiklander assert(IS_POWER_OF_TWO(align) && !(len % align)); 66fe85eae5SJens Wiklander if (SUB_OVERFLOW(desc->mem_end, len, &va)) 67fe85eae5SJens Wiklander panic(); 6876d6685eSEtienne Carriere va = ROUNDDOWN2(va, align); 69fe85eae5SJens Wiklander if (va < desc->mem_start) 70fe85eae5SJens Wiklander panic(); 71fe85eae5SJens Wiklander desc->mem_end = va; 72fe85eae5SJens Wiklander return (void *)va; 73fe85eae5SJens Wiklander } 74fe85eae5SJens Wiklander 75*c62a7972SJens Wiklander static void add_padding(struct boot_mem_desc *desc, vaddr_t va) 76*c62a7972SJens Wiklander { 77*c62a7972SJens Wiklander struct boot_mem_padding *pad = NULL; 78*c62a7972SJens Wiklander vaddr_t rounded = ROUNDUP(desc->mem_start, alignof(*pad)); 79*c62a7972SJens Wiklander 80*c62a7972SJens Wiklander if (rounded < va && va - rounded > sizeof(*pad)) { 81*c62a7972SJens Wiklander pad = (struct boot_mem_padding *)rounded; 82*c62a7972SJens Wiklander pad->start = desc->mem_start; 83*c62a7972SJens Wiklander pad->len = va - desc->mem_start; 84*c62a7972SJens Wiklander DMSG("%#"PRIxVA" len %#zx", pad->start, pad->len); 85*c62a7972SJens Wiklander pad->next = desc->padding; 86*c62a7972SJens Wiklander desc->padding = pad; 87*c62a7972SJens Wiklander } 88*c62a7972SJens Wiklander } 89*c62a7972SJens Wiklander 90fe85eae5SJens Wiklander static void *mem_alloc(struct boot_mem_desc *desc, size_t len, size_t align) 91fe85eae5SJens Wiklander { 92fe85eae5SJens Wiklander vaddr_t va = 0; 93fe85eae5SJens Wiklander vaddr_t ve = 0; 94fe85eae5SJens Wiklander 95fe85eae5SJens Wiklander runtime_assert(!IS_ENABLED(CFG_WITH_PAGER)); 96fe85eae5SJens Wiklander assert(desc && desc->mem_start && desc->mem_end); 97fe85eae5SJens Wiklander assert(IS_POWER_OF_TWO(align) && !(len % align)); 9876d6685eSEtienne Carriere va = ROUNDUP2(desc->mem_start, align); 99fe85eae5SJens Wiklander if (ADD_OVERFLOW(va, len, &ve)) 100fe85eae5SJens Wiklander panic(); 101fe85eae5SJens Wiklander if (ve > desc->mem_end) 102fe85eae5SJens Wiklander panic(); 103*c62a7972SJens Wiklander add_padding(desc, va); 104fe85eae5SJens Wiklander desc->mem_start = ve; 105fe85eae5SJens Wiklander return (void *)va; 106fe85eae5SJens Wiklander } 107fe85eae5SJens Wiklander 108fe85eae5SJens Wiklander void boot_mem_init(vaddr_t start, vaddr_t end, vaddr_t orig_end) 109fe85eae5SJens Wiklander { 110fe85eae5SJens Wiklander struct boot_mem_desc desc = { 111fe85eae5SJens Wiklander .orig_mem_start = start, 112fe85eae5SJens Wiklander .orig_mem_end = orig_end, 113fe85eae5SJens Wiklander .mem_start = start, 114fe85eae5SJens Wiklander .mem_end = end, 115fe85eae5SJens Wiklander }; 116fe85eae5SJens Wiklander 117fe85eae5SJens Wiklander boot_mem_desc = mem_alloc_tmp(&desc, sizeof(desc), alignof(desc)); 118fe85eae5SJens Wiklander *boot_mem_desc = desc; 119fe85eae5SJens Wiklander boot_mem_desc->reloc = mem_alloc_tmp(boot_mem_desc, 120fe85eae5SJens Wiklander sizeof(*boot_mem_desc->reloc), 121fe85eae5SJens Wiklander alignof(*boot_mem_desc->reloc)); 122fe85eae5SJens Wiklander memset(boot_mem_desc->reloc, 0, sizeof(*boot_mem_desc->reloc)); 123fe85eae5SJens Wiklander } 124fe85eae5SJens Wiklander 125fe85eae5SJens Wiklander void boot_mem_add_reloc(void *ptr) 126fe85eae5SJens Wiklander { 127fe85eae5SJens Wiklander struct boot_mem_reloc *reloc = NULL; 128fe85eae5SJens Wiklander 129fe85eae5SJens Wiklander assert(boot_mem_desc && boot_mem_desc->reloc); 130fe85eae5SJens Wiklander reloc = boot_mem_desc->reloc; 131fe85eae5SJens Wiklander 132fe85eae5SJens Wiklander /* If the reloc struct is full, allocate a new and link it first */ 133fe85eae5SJens Wiklander if (reloc->count == ARRAY_SIZE(reloc->ptrs)) { 134fe85eae5SJens Wiklander reloc = boot_mem_alloc_tmp(sizeof(*reloc), alignof(*reloc)); 135fe85eae5SJens Wiklander reloc->next = boot_mem_desc->reloc; 136fe85eae5SJens Wiklander boot_mem_desc->reloc = reloc; 137fe85eae5SJens Wiklander } 138fe85eae5SJens Wiklander 139fe85eae5SJens Wiklander reloc->ptrs[reloc->count] = ptr; 140fe85eae5SJens Wiklander reloc->count++; 141fe85eae5SJens Wiklander } 142fe85eae5SJens Wiklander 143fe85eae5SJens Wiklander static void *add_offs(void *p, size_t offs) 144fe85eae5SJens Wiklander { 145fe85eae5SJens Wiklander assert(p); 146fe85eae5SJens Wiklander return (uint8_t *)p + offs; 147fe85eae5SJens Wiklander } 148fe85eae5SJens Wiklander 1496b61de6cSJens Wiklander static void *add_offs_or_null(void *p, size_t offs) 1506b61de6cSJens Wiklander { 1516b61de6cSJens Wiklander if (!p) 1526b61de6cSJens Wiklander return NULL; 1536b61de6cSJens Wiklander return (uint8_t *)p + offs; 1546b61de6cSJens Wiklander } 1556b61de6cSJens Wiklander 156fe85eae5SJens Wiklander void boot_mem_relocate(size_t offs) 157fe85eae5SJens Wiklander { 158fe85eae5SJens Wiklander struct boot_mem_reloc *reloc = NULL; 159*c62a7972SJens Wiklander struct boot_mem_padding *pad = NULL; 160fe85eae5SJens Wiklander size_t n = 0; 161fe85eae5SJens Wiklander 162fe85eae5SJens Wiklander boot_mem_desc = add_offs(boot_mem_desc, offs); 163fe85eae5SJens Wiklander 164fe85eae5SJens Wiklander boot_mem_desc->orig_mem_start += offs; 165fe85eae5SJens Wiklander boot_mem_desc->orig_mem_end += offs; 166fe85eae5SJens Wiklander boot_mem_desc->mem_start += offs; 167fe85eae5SJens Wiklander boot_mem_desc->mem_end += offs; 168fe85eae5SJens Wiklander boot_mem_desc->reloc = add_offs(boot_mem_desc->reloc, offs); 169fe85eae5SJens Wiklander 170fe85eae5SJens Wiklander for (reloc = boot_mem_desc->reloc;; reloc = reloc->next) { 171fe85eae5SJens Wiklander for (n = 0; n < reloc->count; n++) { 172fe85eae5SJens Wiklander reloc->ptrs[n] = add_offs(reloc->ptrs[n], offs); 1736b61de6cSJens Wiklander *reloc->ptrs[n] = add_offs_or_null(*reloc->ptrs[n], 1746b61de6cSJens Wiklander offs); 175fe85eae5SJens Wiklander } 176fe85eae5SJens Wiklander if (!reloc->next) 177fe85eae5SJens Wiklander break; 178fe85eae5SJens Wiklander reloc->next = add_offs(reloc->next, offs); 179fe85eae5SJens Wiklander } 180*c62a7972SJens Wiklander 181*c62a7972SJens Wiklander if (boot_mem_desc->padding) { 182*c62a7972SJens Wiklander boot_mem_desc->padding = add_offs(boot_mem_desc->padding, offs); 183*c62a7972SJens Wiklander pad = boot_mem_desc->padding; 184*c62a7972SJens Wiklander while (true) { 185*c62a7972SJens Wiklander pad->start += offs; 186*c62a7972SJens Wiklander if (!pad->next) 187*c62a7972SJens Wiklander break; 188*c62a7972SJens Wiklander pad->next = add_offs(pad->next, offs); 189*c62a7972SJens Wiklander pad = pad->next; 190*c62a7972SJens Wiklander } 191*c62a7972SJens Wiklander } 192fe85eae5SJens Wiklander } 193fe85eae5SJens Wiklander 194fe85eae5SJens Wiklander void *boot_mem_alloc(size_t len, size_t align) 195fe85eae5SJens Wiklander { 196fe85eae5SJens Wiklander return mem_alloc(boot_mem_desc, len, align); 197fe85eae5SJens Wiklander } 198fe85eae5SJens Wiklander 199fe85eae5SJens Wiklander void *boot_mem_alloc_tmp(size_t len, size_t align) 200fe85eae5SJens Wiklander { 201fe85eae5SJens Wiklander return mem_alloc_tmp(boot_mem_desc, len, align); 202fe85eae5SJens Wiklander } 203fe85eae5SJens Wiklander 204*c62a7972SJens Wiklander /* 205*c62a7972SJens Wiklander * Calls the supplied @func() for each padding and removes the paddings 206*c62a7972SJens Wiklander * where @func() returns true. 207*c62a7972SJens Wiklander */ 208*c62a7972SJens Wiklander void boot_mem_foreach_padding(bool (*func)(vaddr_t va, size_t len, void *ptr), 209*c62a7972SJens Wiklander void *ptr) 210*c62a7972SJens Wiklander { 211*c62a7972SJens Wiklander struct boot_mem_padding **prev = NULL; 212*c62a7972SJens Wiklander struct boot_mem_padding *next = NULL; 213*c62a7972SJens Wiklander struct boot_mem_padding *pad = NULL; 214*c62a7972SJens Wiklander 215*c62a7972SJens Wiklander assert(boot_mem_desc); 216*c62a7972SJens Wiklander 217*c62a7972SJens Wiklander prev = &boot_mem_desc->padding; 218*c62a7972SJens Wiklander for (pad = boot_mem_desc->padding; pad; pad = next) { 219*c62a7972SJens Wiklander vaddr_t start = pad->start; 220*c62a7972SJens Wiklander size_t len = pad->len; 221*c62a7972SJens Wiklander 222*c62a7972SJens Wiklander next = pad->next; 223*c62a7972SJens Wiklander if (func(start, len, ptr)) { 224*c62a7972SJens Wiklander DMSG("consumed %p %#"PRIxVA" len %#zx", 225*c62a7972SJens Wiklander pad, start, len); 226*c62a7972SJens Wiklander *prev = next; 227*c62a7972SJens Wiklander } else { 228*c62a7972SJens Wiklander DMSG("keeping %p %#"PRIxVA" len %#zx", 229*c62a7972SJens Wiklander pad, start, len); 230*c62a7972SJens Wiklander prev = &pad->next; 231*c62a7972SJens Wiklander } 232*c62a7972SJens Wiklander } 233*c62a7972SJens Wiklander } 234*c62a7972SJens Wiklander 235fe85eae5SJens Wiklander vaddr_t boot_mem_release_unused(void) 236fe85eae5SJens Wiklander { 237fe85eae5SJens Wiklander tee_mm_entry_t *mm = NULL; 238fe85eae5SJens Wiklander paddr_t pa = 0; 239fe85eae5SJens Wiklander vaddr_t va = 0; 240fe85eae5SJens Wiklander size_t n = 0; 241fe85eae5SJens Wiklander vaddr_t tmp_va = 0; 242fe85eae5SJens Wiklander paddr_t tmp_pa = 0; 243fe85eae5SJens Wiklander size_t tmp_n = 0; 244fe85eae5SJens Wiklander 245fe85eae5SJens Wiklander assert(boot_mem_desc); 246fe85eae5SJens Wiklander 247fe85eae5SJens Wiklander n = boot_mem_desc->mem_start - boot_mem_desc->orig_mem_start; 248fe85eae5SJens Wiklander DMSG("Allocated %zu bytes at va %#"PRIxVA" pa %#"PRIxPA, 249fe85eae5SJens Wiklander n, boot_mem_desc->orig_mem_start, 250fe85eae5SJens Wiklander vaddr_to_phys(boot_mem_desc->orig_mem_start)); 251fe85eae5SJens Wiklander 252fe85eae5SJens Wiklander DMSG("Tempalloc %zu bytes at va %#"PRIxVA, 253fe85eae5SJens Wiklander (size_t)(boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end), 254fe85eae5SJens Wiklander boot_mem_desc->mem_end); 255fe85eae5SJens Wiklander 256fe85eae5SJens Wiklander if (IS_ENABLED(CFG_WITH_PAGER)) 257fe85eae5SJens Wiklander goto out; 258fe85eae5SJens Wiklander 259fe85eae5SJens Wiklander pa = vaddr_to_phys(ROUNDUP(boot_mem_desc->orig_mem_start, 260fe85eae5SJens Wiklander SMALL_PAGE_SIZE)); 261fe85eae5SJens Wiklander mm = nex_phys_mem_mm_find(pa); 262fe85eae5SJens Wiklander if (!mm) 263fe85eae5SJens Wiklander panic(); 264fe85eae5SJens Wiklander 265fe85eae5SJens Wiklander va = ROUNDUP(boot_mem_desc->mem_start, SMALL_PAGE_SIZE); 266fe85eae5SJens Wiklander 267fe85eae5SJens Wiklander tmp_va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE); 268fe85eae5SJens Wiklander tmp_n = boot_mem_desc->orig_mem_end - tmp_va; 269fe85eae5SJens Wiklander tmp_pa = vaddr_to_phys(tmp_va); 270fe85eae5SJens Wiklander 271fe85eae5SJens Wiklander pa = tee_mm_get_smem(mm); 272fe85eae5SJens Wiklander n = vaddr_to_phys(boot_mem_desc->mem_start) - pa; 273fe85eae5SJens Wiklander tee_mm_free(mm); 274fe85eae5SJens Wiklander DMSG("Carving out %#"PRIxPA"..%#"PRIxPA, pa, pa + n - 1); 275fe85eae5SJens Wiklander mm = nex_phys_mem_alloc2(pa, n); 276fe85eae5SJens Wiklander if (!mm) 277fe85eae5SJens Wiklander panic(); 278fe85eae5SJens Wiklander mm = nex_phys_mem_alloc2(tmp_pa, tmp_n); 279fe85eae5SJens Wiklander if (!mm) 280fe85eae5SJens Wiklander panic(); 281fe85eae5SJens Wiklander 282fe85eae5SJens Wiklander n = tmp_va - boot_mem_desc->mem_start; 283fe85eae5SJens Wiklander DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va); 284fe85eae5SJens Wiklander 285fe85eae5SJens Wiklander /* Unmap the now unused pages */ 286fe85eae5SJens Wiklander core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE); 287fe85eae5SJens Wiklander 288fe85eae5SJens Wiklander out: 289fe85eae5SJens Wiklander /* Stop further allocations. */ 290fe85eae5SJens Wiklander boot_mem_desc->mem_start = boot_mem_desc->mem_end; 291fe85eae5SJens Wiklander return va; 292fe85eae5SJens Wiklander } 293fe85eae5SJens Wiklander 294fe85eae5SJens Wiklander void boot_mem_release_tmp_alloc(void) 295fe85eae5SJens Wiklander { 296fe85eae5SJens Wiklander tee_mm_entry_t *mm = NULL; 297fe85eae5SJens Wiklander vaddr_t va = 0; 298fe85eae5SJens Wiklander paddr_t pa = 0; 299fe85eae5SJens Wiklander size_t n = 0; 300fe85eae5SJens Wiklander 301fe85eae5SJens Wiklander assert(boot_mem_desc && 302fe85eae5SJens Wiklander boot_mem_desc->mem_start == boot_mem_desc->mem_end); 303fe85eae5SJens Wiklander 304fe85eae5SJens Wiklander if (IS_ENABLED(CFG_WITH_PAGER)) { 305fe85eae5SJens Wiklander n = boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end; 306fe85eae5SJens Wiklander va = boot_mem_desc->mem_end; 307fe85eae5SJens Wiklander boot_mem_desc = NULL; 308fe85eae5SJens Wiklander DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va); 309fe85eae5SJens Wiklander return; 310fe85eae5SJens Wiklander } 311fe85eae5SJens Wiklander 312fe85eae5SJens Wiklander va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE); 313fe85eae5SJens Wiklander pa = vaddr_to_phys(va); 314fe85eae5SJens Wiklander 315fe85eae5SJens Wiklander mm = nex_phys_mem_mm_find(pa); 316fe85eae5SJens Wiklander if (!mm) 317fe85eae5SJens Wiklander panic(); 318fe85eae5SJens Wiklander assert(pa == tee_mm_get_smem(mm)); 319fe85eae5SJens Wiklander n = tee_mm_get_bytes(mm); 320fe85eae5SJens Wiklander 321fe85eae5SJens Wiklander /* Boot memory allocation is now done */ 322fe85eae5SJens Wiklander boot_mem_desc = NULL; 323fe85eae5SJens Wiklander 324fe85eae5SJens Wiklander DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va); 325fe85eae5SJens Wiklander 326fe85eae5SJens Wiklander /* Unmap the now unused pages */ 327fe85eae5SJens Wiklander core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE); 328fe85eae5SJens Wiklander } 329