1ee546289SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause 2ee546289SJens Wiklander /* 3ee546289SJens Wiklander * Copyright (c) 2019, Linaro Limited 4ee546289SJens Wiklander */ 5ee546289SJens Wiklander 6ee546289SJens Wiklander #include <crypto/crypto.h> 7ee546289SJens Wiklander #include <crypto/internal_aes-gcm.h> 8*c6744caaSJens Wiklander #include <kernel/generic_boot.h> 9ee546289SJens Wiklander #include <kernel/panic.h> 10ee546289SJens Wiklander #include <mm/core_memprot.h> 11ee546289SJens Wiklander #include <mm/core_mmu.h> 12ee546289SJens Wiklander #include <mm/fobj.h> 13ee546289SJens Wiklander #include <mm/tee_mm.h> 14ee546289SJens Wiklander #include <stdlib.h> 15ee546289SJens Wiklander #include <string.h> 16ee546289SJens Wiklander #include <tee_api_types.h> 17ee546289SJens Wiklander #include <types_ext.h> 18ee546289SJens Wiklander #include <util.h> 19ee546289SJens Wiklander 20ee546289SJens Wiklander #ifdef CFG_WITH_PAGER 21ee546289SJens Wiklander 22ee546289SJens Wiklander #define RWP_AE_KEY_BITS 256 23ee546289SJens Wiklander 24ee546289SJens Wiklander struct rwp_aes_gcm_iv { 25ee546289SJens Wiklander uint32_t iv[3]; 26ee546289SJens Wiklander }; 27ee546289SJens Wiklander 28ee546289SJens Wiklander #define RWP_AES_GCM_TAG_LEN 16 29ee546289SJens Wiklander 30ee546289SJens Wiklander struct rwp_state { 31ee546289SJens Wiklander uint64_t iv; 32ee546289SJens Wiklander uint8_t tag[RWP_AES_GCM_TAG_LEN]; 33ee546289SJens Wiklander }; 34ee546289SJens Wiklander 35ee546289SJens Wiklander struct fobj_rwp { 36ee546289SJens Wiklander uint8_t *store; 37ee546289SJens Wiklander struct rwp_state *state; 38ee546289SJens Wiklander struct fobj fobj; 39ee546289SJens Wiklander }; 40ee546289SJens Wiklander 41*c6744caaSJens Wiklander static const struct fobj_ops ops_rw_paged; 42ee546289SJens Wiklander 43ee546289SJens Wiklander static struct internal_aes_gcm_key rwp_ae_key; 44ee546289SJens Wiklander 45ee546289SJens Wiklander void fobj_generate_authenc_key(void) 46ee546289SJens Wiklander { 47ee546289SJens Wiklander uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 }; 48ee546289SJens Wiklander 49ee546289SJens Wiklander if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS) 50ee546289SJens Wiklander panic("failed to generate random"); 51ee546289SJens Wiklander if (internal_aes_gcm_expand_enc_key(key, sizeof(key), &rwp_ae_key)) 52ee546289SJens Wiklander panic("failed to expand key"); 53ee546289SJens Wiklander } 54ee546289SJens Wiklander 55ee546289SJens Wiklander static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops, 56ee546289SJens Wiklander unsigned int num_pages) 57ee546289SJens Wiklander { 58ee546289SJens Wiklander fobj->ops = ops; 59ee546289SJens Wiklander fobj->num_pages = num_pages; 60ee546289SJens Wiklander refcount_set(&fobj->refc, 1); 61b83c0d5fSJens Wiklander TAILQ_INIT(&fobj->areas); 62ee546289SJens Wiklander } 63ee546289SJens Wiklander 64ee546289SJens Wiklander static void fobj_uninit(struct fobj *fobj) 65ee546289SJens Wiklander { 66ee546289SJens Wiklander assert(!refcount_val(&fobj->refc)); 67b83c0d5fSJens Wiklander assert(TAILQ_EMPTY(&fobj->areas)); 68b83c0d5fSJens Wiklander tee_pager_invalidate_fobj(fobj); 69ee546289SJens Wiklander } 70ee546289SJens Wiklander 71ee546289SJens Wiklander struct fobj *fobj_rw_paged_alloc(unsigned int num_pages) 72ee546289SJens Wiklander { 73ee546289SJens Wiklander tee_mm_entry_t *mm = NULL; 74ee546289SJens Wiklander struct fobj_rwp *rwp = NULL; 75ee546289SJens Wiklander size_t size = 0; 76ee546289SJens Wiklander 77ee546289SJens Wiklander assert(num_pages); 78ee546289SJens Wiklander 79ee546289SJens Wiklander rwp = calloc(1, sizeof(*rwp)); 80ee546289SJens Wiklander if (!rwp) 81ee546289SJens Wiklander return NULL; 82ee546289SJens Wiklander 83ee546289SJens Wiklander rwp->state = calloc(num_pages, sizeof(*rwp->state)); 84ee546289SJens Wiklander if (!rwp->state) 85ee546289SJens Wiklander goto err; 86ee546289SJens Wiklander 87ee546289SJens Wiklander if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) 88ee546289SJens Wiklander goto err; 89ee546289SJens Wiklander mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 90ee546289SJens Wiklander if (!mm) 91ee546289SJens Wiklander goto err; 92ee546289SJens Wiklander rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM); 93ee546289SJens Wiklander assert(rwp->store); /* to assist debugging if it would ever happen */ 94ee546289SJens Wiklander if (!rwp->store) 95ee546289SJens Wiklander goto err; 96ee546289SJens Wiklander 97ee546289SJens Wiklander fobj_init(&rwp->fobj, &ops_rw_paged, num_pages); 98ee546289SJens Wiklander 99ee546289SJens Wiklander return &rwp->fobj; 100ee546289SJens Wiklander 101ee546289SJens Wiklander err: 102ee546289SJens Wiklander tee_mm_free(mm); 103ee546289SJens Wiklander free(rwp->state); 104ee546289SJens Wiklander free(rwp); 105ee546289SJens Wiklander 106ee546289SJens Wiklander return NULL; 107ee546289SJens Wiklander } 108ee546289SJens Wiklander 109ee546289SJens Wiklander static struct fobj_rwp *to_rwp(struct fobj *fobj) 110ee546289SJens Wiklander { 111ee546289SJens Wiklander assert(fobj->ops == &ops_rw_paged); 112ee546289SJens Wiklander 113ee546289SJens Wiklander return container_of(fobj, struct fobj_rwp, fobj); 114ee546289SJens Wiklander } 115ee546289SJens Wiklander 116ee546289SJens Wiklander static void rwp_free(struct fobj *fobj) 117ee546289SJens Wiklander { 118ee546289SJens Wiklander struct fobj_rwp *rwp = to_rwp(fobj); 119ee546289SJens Wiklander 120ee546289SJens Wiklander fobj_uninit(fobj); 121ee546289SJens Wiklander tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store))); 122ee546289SJens Wiklander free(rwp->state); 123ee546289SJens Wiklander free(rwp); 124ee546289SJens Wiklander } 125ee546289SJens Wiklander 126ee546289SJens Wiklander static TEE_Result rwp_load_page(struct fobj *fobj, unsigned int page_idx, 127ee546289SJens Wiklander void *va) 128ee546289SJens Wiklander { 129ee546289SJens Wiklander struct fobj_rwp *rwp = to_rwp(fobj); 130ee546289SJens Wiklander struct rwp_state *state = rwp->state + page_idx; 131ee546289SJens Wiklander uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE; 132ee546289SJens Wiklander struct rwp_aes_gcm_iv iv = { 133ee546289SJens Wiklander .iv = { (vaddr_t)state, state->iv >> 32, state->iv } 134ee546289SJens Wiklander }; 135ee546289SJens Wiklander 136ee546289SJens Wiklander assert(refcount_val(&fobj->refc)); 137ee546289SJens Wiklander assert(page_idx < fobj->num_pages); 138ee546289SJens Wiklander 139ee546289SJens Wiklander if (!state->iv) { 140ee546289SJens Wiklander /* 141ee546289SJens Wiklander * iv still zero which means that this is previously unused 142ee546289SJens Wiklander * page. 143ee546289SJens Wiklander */ 144ee546289SJens Wiklander memset(va, 0, SMALL_PAGE_SIZE); 145ee546289SJens Wiklander return TEE_SUCCESS; 146ee546289SJens Wiklander } 147ee546289SJens Wiklander 148ee546289SJens Wiklander return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv), 149ee546289SJens Wiklander NULL, 0, src, SMALL_PAGE_SIZE, va, 150ee546289SJens Wiklander state->tag, sizeof(state->tag)); 151ee546289SJens Wiklander } 152ee546289SJens Wiklander KEEP_PAGER(rwp_load_page); 153ee546289SJens Wiklander 154ee546289SJens Wiklander static TEE_Result rwp_save_page(struct fobj *fobj, unsigned int page_idx, 155ee546289SJens Wiklander const void *va) 156ee546289SJens Wiklander { 157ee546289SJens Wiklander struct fobj_rwp *rwp = to_rwp(fobj); 158ee546289SJens Wiklander struct rwp_state *state = rwp->state + page_idx; 159ee546289SJens Wiklander size_t tag_len = sizeof(state->tag); 160ee546289SJens Wiklander uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE; 161ee546289SJens Wiklander struct rwp_aes_gcm_iv iv; 162ee546289SJens Wiklander 163ee546289SJens Wiklander memset(&iv, 0, sizeof(iv)); 164b83c0d5fSJens Wiklander 165b83c0d5fSJens Wiklander if (!refcount_val(&fobj->refc)) { 166b83c0d5fSJens Wiklander /* 167b83c0d5fSJens Wiklander * This fobj is being teared down, it just hasn't had the time 168b83c0d5fSJens Wiklander * to call tee_pager_invalidate_fobj() yet. 169b83c0d5fSJens Wiklander */ 170b83c0d5fSJens Wiklander assert(TAILQ_EMPTY(&fobj->areas)); 171b83c0d5fSJens Wiklander return TEE_SUCCESS; 172b83c0d5fSJens Wiklander } 173b83c0d5fSJens Wiklander 174ee546289SJens Wiklander assert(page_idx < fobj->num_pages); 175ee546289SJens Wiklander assert(state->iv + 1 > state->iv); 176ee546289SJens Wiklander 177ee546289SJens Wiklander state->iv++; 178ee546289SJens Wiklander /* 179ee546289SJens Wiklander * IV is constructed as recommended in section "8.2.1 Deterministic 180ee546289SJens Wiklander * Construction" of "Recommendation for Block Cipher Modes of 181ee546289SJens Wiklander * Operation: Galois/Counter Mode (GCM) and GMAC", 182ee546289SJens Wiklander * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf 183ee546289SJens Wiklander */ 184ee546289SJens Wiklander 185ee546289SJens Wiklander iv.iv[0] = (vaddr_t)state; 186ee546289SJens Wiklander iv.iv[1] = state->iv >> 32; 187ee546289SJens Wiklander iv.iv[2] = state->iv; 188ee546289SJens Wiklander 189ee546289SJens Wiklander return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv), 190ee546289SJens Wiklander NULL, 0, va, SMALL_PAGE_SIZE, dst, 191ee546289SJens Wiklander state->tag, &tag_len); 192ee546289SJens Wiklander } 193ee546289SJens Wiklander KEEP_PAGER(rwp_save_page); 194ee546289SJens Wiklander 195*c6744caaSJens Wiklander static const struct fobj_ops ops_rw_paged __rodata_unpaged = { 196ee546289SJens Wiklander .free = rwp_free, 197ee546289SJens Wiklander .load_page = rwp_load_page, 198ee546289SJens Wiklander .save_page = rwp_save_page, 199ee546289SJens Wiklander }; 200ee546289SJens Wiklander 201ee546289SJens Wiklander struct fobj_rop { 202ee546289SJens Wiklander uint8_t *hashes; 203ee546289SJens Wiklander uint8_t *store; 204ee546289SJens Wiklander struct fobj fobj; 205ee546289SJens Wiklander }; 206ee546289SJens Wiklander 207*c6744caaSJens Wiklander static const struct fobj_ops ops_ro_paged; 208*c6744caaSJens Wiklander 209*c6744caaSJens Wiklander static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops, 210*c6744caaSJens Wiklander unsigned int num_pages, void *hashes, void *store) 211*c6744caaSJens Wiklander { 212*c6744caaSJens Wiklander rop->hashes = hashes; 213*c6744caaSJens Wiklander rop->store = store; 214*c6744caaSJens Wiklander fobj_init(&rop->fobj, ops, num_pages); 215*c6744caaSJens Wiklander } 216ee546289SJens Wiklander 217ee546289SJens Wiklander struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes, 218ee546289SJens Wiklander void *store) 219ee546289SJens Wiklander { 220ee546289SJens Wiklander struct fobj_rop *rop = NULL; 221ee546289SJens Wiklander 222ee546289SJens Wiklander assert(num_pages && hashes && store); 223ee546289SJens Wiklander 224ee546289SJens Wiklander rop = calloc(1, sizeof(*rop)); 225ee546289SJens Wiklander if (!rop) 226ee546289SJens Wiklander return NULL; 227ee546289SJens Wiklander 228*c6744caaSJens Wiklander rop_init(rop, &ops_ro_paged, num_pages, hashes, store); 229ee546289SJens Wiklander 230ee546289SJens Wiklander return &rop->fobj; 231ee546289SJens Wiklander } 232ee546289SJens Wiklander 233ee546289SJens Wiklander static struct fobj_rop *to_rop(struct fobj *fobj) 234ee546289SJens Wiklander { 235ee546289SJens Wiklander assert(fobj->ops == &ops_ro_paged); 236ee546289SJens Wiklander 237ee546289SJens Wiklander return container_of(fobj, struct fobj_rop, fobj); 238ee546289SJens Wiklander } 239ee546289SJens Wiklander 240*c6744caaSJens Wiklander static void rop_uninit(struct fobj_rop *rop) 241*c6744caaSJens Wiklander { 242*c6744caaSJens Wiklander fobj_uninit(&rop->fobj); 243*c6744caaSJens Wiklander tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store))); 244*c6744caaSJens Wiklander free(rop->hashes); 245*c6744caaSJens Wiklander } 246*c6744caaSJens Wiklander 247ee546289SJens Wiklander static void rop_free(struct fobj *fobj) 248ee546289SJens Wiklander { 249ee546289SJens Wiklander struct fobj_rop *rop = to_rop(fobj); 250ee546289SJens Wiklander 251*c6744caaSJens Wiklander rop_uninit(rop); 252ee546289SJens Wiklander free(rop); 253ee546289SJens Wiklander } 254ee546289SJens Wiklander 255*c6744caaSJens Wiklander static TEE_Result rop_load_page_helper(struct fobj_rop *rop, 256*c6744caaSJens Wiklander unsigned int page_idx, void *va) 257*c6744caaSJens Wiklander { 258*c6744caaSJens Wiklander const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE; 259*c6744caaSJens Wiklander const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE; 260*c6744caaSJens Wiklander 261*c6744caaSJens Wiklander assert(refcount_val(&rop->fobj.refc)); 262*c6744caaSJens Wiklander assert(page_idx < rop->fobj.num_pages); 263*c6744caaSJens Wiklander memcpy(va, src, SMALL_PAGE_SIZE); 264*c6744caaSJens Wiklander 265*c6744caaSJens Wiklander return hash_sha256_check(hash, va, SMALL_PAGE_SIZE); 266*c6744caaSJens Wiklander } 267*c6744caaSJens Wiklander 268ee546289SJens Wiklander static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx, 269ee546289SJens Wiklander void *va) 270ee546289SJens Wiklander { 271*c6744caaSJens Wiklander return rop_load_page_helper(to_rop(fobj), page_idx, va); 272ee546289SJens Wiklander } 273ee546289SJens Wiklander KEEP_PAGER(rop_load_page); 274ee546289SJens Wiklander 275ee546289SJens Wiklander static TEE_Result rop_save_page(struct fobj *fobj __unused, 276ee546289SJens Wiklander unsigned int page_idx __unused, 277ee546289SJens Wiklander const void *va __unused) 278ee546289SJens Wiklander { 279ee546289SJens Wiklander return TEE_ERROR_GENERIC; 280ee546289SJens Wiklander } 281ee546289SJens Wiklander KEEP_PAGER(rop_save_page); 282ee546289SJens Wiklander 283*c6744caaSJens Wiklander static const struct fobj_ops ops_ro_paged __rodata_unpaged = { 284ee546289SJens Wiklander .free = rop_free, 285ee546289SJens Wiklander .load_page = rop_load_page, 286ee546289SJens Wiklander .save_page = rop_save_page, 287ee546289SJens Wiklander }; 288ee546289SJens Wiklander 289*c6744caaSJens Wiklander #ifdef CFG_CORE_ASLR 290*c6744caaSJens Wiklander /* 291*c6744caaSJens Wiklander * When using relocated pages the relocation information must be applied 292*c6744caaSJens Wiklander * before the pages can be used. With read-only paging the content is only 293*c6744caaSJens Wiklander * integrity protected so relocation cannot be applied on pages in the less 294*c6744caaSJens Wiklander * secure "store" or the load_address selected by ASLR could be given away. 295*c6744caaSJens Wiklander * This means that each time a page has been loaded and verified it has to 296*c6744caaSJens Wiklander * have its relocation information applied before it can be used. 297*c6744caaSJens Wiklander * 298*c6744caaSJens Wiklander * Only the relative relocations are supported, this allows a rather compact 299*c6744caaSJens Wiklander * represenation of the needed relocation information in this struct. 300*c6744caaSJens Wiklander * r_offset is replaced with the offset into the page that need to be updated, 301*c6744caaSJens Wiklander * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be 302*c6744caaSJens Wiklander * used to represent it. 303*c6744caaSJens Wiklander * 304*c6744caaSJens Wiklander * All relocations are converted and stored in @relocs. @page_reloc_idx is 305*c6744caaSJens Wiklander * an array of length @rop.fobj.num_pages with an entry for each page. If 306*c6744caaSJens Wiklander * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs. 307*c6744caaSJens Wiklander */ 308*c6744caaSJens Wiklander struct fobj_ro_reloc_paged { 309*c6744caaSJens Wiklander uint16_t *page_reloc_idx; 310*c6744caaSJens Wiklander uint16_t *relocs; 311*c6744caaSJens Wiklander unsigned int num_relocs; 312*c6744caaSJens Wiklander struct fobj_rop rop; 313*c6744caaSJens Wiklander }; 314*c6744caaSJens Wiklander 315*c6744caaSJens Wiklander static const struct fobj_ops ops_ro_reloc_paged; 316*c6744caaSJens Wiklander 317*c6744caaSJens Wiklander static unsigned int get_num_rels(unsigned int num_pages, 318*c6744caaSJens Wiklander unsigned int reloc_offs, 319*c6744caaSJens Wiklander const uint32_t *reloc, unsigned int num_relocs) 320*c6744caaSJens Wiklander { 321*c6744caaSJens Wiklander const unsigned int align_mask __maybe_unused = sizeof(long) - 1; 322*c6744caaSJens Wiklander unsigned int nrels = 0; 323*c6744caaSJens Wiklander unsigned int n = 0; 324*c6744caaSJens Wiklander vaddr_t offs = 0; 325*c6744caaSJens Wiklander 326*c6744caaSJens Wiklander /* 327*c6744caaSJens Wiklander * Count the number of relocations which are needed for these 328*c6744caaSJens Wiklander * pages. Also check that the data is well formed, only expected 329*c6744caaSJens Wiklander * relocations and sorted in order of address which it applies to. 330*c6744caaSJens Wiklander */ 331*c6744caaSJens Wiklander for (; n < num_relocs; n++) { 332*c6744caaSJens Wiklander assert(ALIGNMENT_IS_OK(reloc[n], unsigned long)); 333*c6744caaSJens Wiklander assert(offs < reloc[n]); /* check that it's sorted */ 334*c6744caaSJens Wiklander offs = reloc[n]; 335*c6744caaSJens Wiklander if (offs >= reloc_offs && 336*c6744caaSJens Wiklander offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE) 337*c6744caaSJens Wiklander nrels++; 338*c6744caaSJens Wiklander } 339*c6744caaSJens Wiklander 340*c6744caaSJens Wiklander return nrels; 341*c6744caaSJens Wiklander } 342*c6744caaSJens Wiklander 343*c6744caaSJens Wiklander static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs, 344*c6744caaSJens Wiklander const uint32_t *reloc, unsigned int num_relocs) 345*c6744caaSJens Wiklander { 346*c6744caaSJens Wiklander unsigned int npg = rrp->rop.fobj.num_pages; 347*c6744caaSJens Wiklander unsigned int pg_idx = 0; 348*c6744caaSJens Wiklander unsigned int reln = 0; 349*c6744caaSJens Wiklander unsigned int n = 0; 350*c6744caaSJens Wiklander uint32_t r = 0; 351*c6744caaSJens Wiklander 352*c6744caaSJens Wiklander for (n = 0; n < npg; n++) 353*c6744caaSJens Wiklander rrp->page_reloc_idx[n] = UINT16_MAX; 354*c6744caaSJens Wiklander 355*c6744caaSJens Wiklander for (n = 0; n < num_relocs ; n++) { 356*c6744caaSJens Wiklander if (reloc[n] < reloc_offs) 357*c6744caaSJens Wiklander continue; 358*c6744caaSJens Wiklander 359*c6744caaSJens Wiklander /* r is the offset from beginning of this fobj */ 360*c6744caaSJens Wiklander r = reloc[n] - reloc_offs; 361*c6744caaSJens Wiklander 362*c6744caaSJens Wiklander pg_idx = r / SMALL_PAGE_SIZE; 363*c6744caaSJens Wiklander if (pg_idx >= npg) 364*c6744caaSJens Wiklander break; 365*c6744caaSJens Wiklander 366*c6744caaSJens Wiklander if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX) 367*c6744caaSJens Wiklander rrp->page_reloc_idx[pg_idx] = reln; 368*c6744caaSJens Wiklander rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE; 369*c6744caaSJens Wiklander reln++; 370*c6744caaSJens Wiklander } 371*c6744caaSJens Wiklander 372*c6744caaSJens Wiklander assert(reln == rrp->num_relocs); 373*c6744caaSJens Wiklander } 374*c6744caaSJens Wiklander 375*c6744caaSJens Wiklander struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes, 376*c6744caaSJens Wiklander unsigned int reloc_offs, 377*c6744caaSJens Wiklander const void *reloc, 378*c6744caaSJens Wiklander unsigned int reloc_len, void *store) 379*c6744caaSJens Wiklander { 380*c6744caaSJens Wiklander struct fobj_ro_reloc_paged *rrp = NULL; 381*c6744caaSJens Wiklander const unsigned int num_relocs = reloc_len / sizeof(uint32_t); 382*c6744caaSJens Wiklander unsigned int nrels = 0; 383*c6744caaSJens Wiklander 384*c6744caaSJens Wiklander assert(ALIGNMENT_IS_OK(reloc, uint32_t)); 385*c6744caaSJens Wiklander assert(ALIGNMENT_IS_OK(reloc_len, uint32_t)); 386*c6744caaSJens Wiklander assert(num_pages && hashes && store); 387*c6744caaSJens Wiklander if (!reloc_len) { 388*c6744caaSJens Wiklander assert(!reloc); 389*c6744caaSJens Wiklander return fobj_ro_paged_alloc(num_pages, hashes, store); 390*c6744caaSJens Wiklander } 391*c6744caaSJens Wiklander assert(reloc); 392*c6744caaSJens Wiklander 393*c6744caaSJens Wiklander nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs); 394*c6744caaSJens Wiklander if (!nrels) 395*c6744caaSJens Wiklander return fobj_ro_paged_alloc(num_pages, hashes, store); 396*c6744caaSJens Wiklander 397*c6744caaSJens Wiklander rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) + 398*c6744caaSJens Wiklander nrels * sizeof(uint16_t)); 399*c6744caaSJens Wiklander if (!rrp) 400*c6744caaSJens Wiklander return NULL; 401*c6744caaSJens Wiklander rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store); 402*c6744caaSJens Wiklander rrp->page_reloc_idx = (uint16_t *)(rrp + 1); 403*c6744caaSJens Wiklander rrp->relocs = rrp->page_reloc_idx + num_pages; 404*c6744caaSJens Wiklander rrp->num_relocs = nrels; 405*c6744caaSJens Wiklander init_rels(rrp, reloc_offs, reloc, num_relocs); 406*c6744caaSJens Wiklander 407*c6744caaSJens Wiklander return &rrp->rop.fobj; 408*c6744caaSJens Wiklander } 409*c6744caaSJens Wiklander 410*c6744caaSJens Wiklander static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj) 411*c6744caaSJens Wiklander { 412*c6744caaSJens Wiklander assert(fobj->ops == &ops_ro_reloc_paged); 413*c6744caaSJens Wiklander 414*c6744caaSJens Wiklander return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj); 415*c6744caaSJens Wiklander } 416*c6744caaSJens Wiklander 417*c6744caaSJens Wiklander static void rrp_free(struct fobj *fobj) 418*c6744caaSJens Wiklander { 419*c6744caaSJens Wiklander struct fobj_ro_reloc_paged *rrp = to_rrp(fobj); 420*c6744caaSJens Wiklander 421*c6744caaSJens Wiklander rop_uninit(&rrp->rop); 422*c6744caaSJens Wiklander free(rrp); 423*c6744caaSJens Wiklander } 424*c6744caaSJens Wiklander 425*c6744caaSJens Wiklander static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx, 426*c6744caaSJens Wiklander void *va) 427*c6744caaSJens Wiklander { 428*c6744caaSJens Wiklander struct fobj_ro_reloc_paged *rrp = to_rrp(fobj); 429*c6744caaSJens Wiklander unsigned int end_rel = rrp->num_relocs; 430*c6744caaSJens Wiklander TEE_Result res = TEE_SUCCESS; 431*c6744caaSJens Wiklander unsigned long *where = NULL; 432*c6744caaSJens Wiklander unsigned int n = 0; 433*c6744caaSJens Wiklander 434*c6744caaSJens Wiklander res = rop_load_page_helper(&rrp->rop, page_idx, va); 435*c6744caaSJens Wiklander if (res) 436*c6744caaSJens Wiklander return res; 437*c6744caaSJens Wiklander 438*c6744caaSJens Wiklander /* Find the reloc index of the next page to tell when we're done */ 439*c6744caaSJens Wiklander for (n = page_idx + 1; n < fobj->num_pages; n++) { 440*c6744caaSJens Wiklander if (rrp->page_reloc_idx[n] != UINT16_MAX) { 441*c6744caaSJens Wiklander end_rel = rrp->page_reloc_idx[n]; 442*c6744caaSJens Wiklander break; 443*c6744caaSJens Wiklander } 444*c6744caaSJens Wiklander } 445*c6744caaSJens Wiklander 446*c6744caaSJens Wiklander for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) { 447*c6744caaSJens Wiklander where = (void *)((vaddr_t)va + rrp->relocs[n]); 448*c6744caaSJens Wiklander *where += boot_mmu_config.load_offset; 449*c6744caaSJens Wiklander } 450*c6744caaSJens Wiklander 451*c6744caaSJens Wiklander return TEE_SUCCESS; 452*c6744caaSJens Wiklander } 453*c6744caaSJens Wiklander KEEP_PAGER(rrp_load_page); 454*c6744caaSJens Wiklander 455*c6744caaSJens Wiklander static const struct fobj_ops ops_ro_reloc_paged __rodata_unpaged = { 456*c6744caaSJens Wiklander .free = rrp_free, 457*c6744caaSJens Wiklander .load_page = rrp_load_page, 458*c6744caaSJens Wiklander .save_page = rop_save_page, /* Direct reuse */ 459*c6744caaSJens Wiklander }; 460*c6744caaSJens Wiklander #endif /*CFG_CORE_ASLR*/ 461*c6744caaSJens Wiklander 462*c6744caaSJens Wiklander static const struct fobj_ops ops_locked_paged; 463ee546289SJens Wiklander 464ee546289SJens Wiklander struct fobj *fobj_locked_paged_alloc(unsigned int num_pages) 465ee546289SJens Wiklander { 466ee546289SJens Wiklander struct fobj *f = NULL; 467ee546289SJens Wiklander 468ee546289SJens Wiklander assert(num_pages); 469ee546289SJens Wiklander 470ee546289SJens Wiklander f = calloc(1, sizeof(*f)); 471ee546289SJens Wiklander if (!f) 472ee546289SJens Wiklander return NULL; 473ee546289SJens Wiklander 474ee546289SJens Wiklander fobj_init(f, &ops_locked_paged, num_pages); 475ee546289SJens Wiklander 476ee546289SJens Wiklander return f; 477ee546289SJens Wiklander } 478ee546289SJens Wiklander 479ee546289SJens Wiklander static void lop_free(struct fobj *fobj) 480ee546289SJens Wiklander { 481ee546289SJens Wiklander assert(fobj->ops == &ops_locked_paged); 482ee546289SJens Wiklander fobj_uninit(fobj); 483ee546289SJens Wiklander free(fobj); 484ee546289SJens Wiklander } 485ee546289SJens Wiklander 486ee546289SJens Wiklander static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused, 487ee546289SJens Wiklander unsigned int page_idx __maybe_unused, 488ee546289SJens Wiklander void *va) 489ee546289SJens Wiklander { 490ee546289SJens Wiklander assert(fobj->ops == &ops_locked_paged); 491ee546289SJens Wiklander assert(refcount_val(&fobj->refc)); 492ee546289SJens Wiklander assert(page_idx < fobj->num_pages); 493ee546289SJens Wiklander 494ee546289SJens Wiklander memset(va, 0, SMALL_PAGE_SIZE); 495ee546289SJens Wiklander 496ee546289SJens Wiklander return TEE_SUCCESS; 497ee546289SJens Wiklander } 498ee546289SJens Wiklander KEEP_PAGER(lop_load_page); 499ee546289SJens Wiklander 500ee546289SJens Wiklander static TEE_Result lop_save_page(struct fobj *fobj __unused, 501ee546289SJens Wiklander unsigned int page_idx __unused, 502ee546289SJens Wiklander const void *va __unused) 503ee546289SJens Wiklander { 504ee546289SJens Wiklander return TEE_ERROR_GENERIC; 505ee546289SJens Wiklander } 506ee546289SJens Wiklander KEEP_PAGER(lop_save_page); 507ee546289SJens Wiklander 508*c6744caaSJens Wiklander static const struct fobj_ops ops_locked_paged __rodata_unpaged = { 509ee546289SJens Wiklander .free = lop_free, 510ee546289SJens Wiklander .load_page = lop_load_page, 511ee546289SJens Wiklander .save_page = lop_save_page, 512ee546289SJens Wiklander }; 513ee546289SJens Wiklander #endif /*CFG_WITH_PAGER*/ 514fbcaa411SJens Wiklander 515fbcaa411SJens Wiklander #ifndef CFG_PAGED_USER_TA 516fbcaa411SJens Wiklander 517fbcaa411SJens Wiklander struct fobj_sec_mem { 518fbcaa411SJens Wiklander tee_mm_entry_t *mm; 519fbcaa411SJens Wiklander struct fobj fobj; 520fbcaa411SJens Wiklander }; 521fbcaa411SJens Wiklander 522fbcaa411SJens Wiklander static struct fobj_ops ops_sec_mem; 523fbcaa411SJens Wiklander 524fbcaa411SJens Wiklander struct fobj *fobj_sec_mem_alloc(unsigned int num_pages) 525fbcaa411SJens Wiklander { 526fbcaa411SJens Wiklander struct fobj_sec_mem *f = calloc(1, sizeof(*f)); 527fbcaa411SJens Wiklander size_t size = 0; 528fbcaa411SJens Wiklander void *va = NULL; 529fbcaa411SJens Wiklander 530fbcaa411SJens Wiklander if (!f) 531fbcaa411SJens Wiklander return NULL; 532fbcaa411SJens Wiklander 533fbcaa411SJens Wiklander if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) 534fbcaa411SJens Wiklander goto err; 535fbcaa411SJens Wiklander 536fbcaa411SJens Wiklander f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 537fbcaa411SJens Wiklander if (!f->mm) 538fbcaa411SJens Wiklander goto err; 539fbcaa411SJens Wiklander 540fbcaa411SJens Wiklander va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM); 541fbcaa411SJens Wiklander if (!va) 542fbcaa411SJens Wiklander goto err; 543fbcaa411SJens Wiklander 544fbcaa411SJens Wiklander memset(va, 0, size); 545fbcaa411SJens Wiklander f->fobj.ops = &ops_sec_mem; 546fbcaa411SJens Wiklander f->fobj.num_pages = num_pages; 547fbcaa411SJens Wiklander refcount_set(&f->fobj.refc, 1); 548fbcaa411SJens Wiklander 549fbcaa411SJens Wiklander return &f->fobj; 550fbcaa411SJens Wiklander err: 551fbcaa411SJens Wiklander tee_mm_free(f->mm); 552fbcaa411SJens Wiklander free(f); 553fbcaa411SJens Wiklander 554fbcaa411SJens Wiklander return NULL; 555fbcaa411SJens Wiklander } 556fbcaa411SJens Wiklander 557fbcaa411SJens Wiklander static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj) 558fbcaa411SJens Wiklander { 559fbcaa411SJens Wiklander assert(fobj->ops == &ops_sec_mem); 560fbcaa411SJens Wiklander 561fbcaa411SJens Wiklander return container_of(fobj, struct fobj_sec_mem, fobj); 562fbcaa411SJens Wiklander } 563fbcaa411SJens Wiklander 564fbcaa411SJens Wiklander static void sec_mem_free(struct fobj *fobj) 565fbcaa411SJens Wiklander { 566fbcaa411SJens Wiklander struct fobj_sec_mem *f = to_sec_mem(fobj); 567fbcaa411SJens Wiklander 568fbcaa411SJens Wiklander assert(!refcount_val(&fobj->refc)); 569fbcaa411SJens Wiklander tee_mm_free(f->mm); 570fbcaa411SJens Wiklander free(f); 571fbcaa411SJens Wiklander } 572fbcaa411SJens Wiklander 573fbcaa411SJens Wiklander static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx) 574fbcaa411SJens Wiklander { 575fbcaa411SJens Wiklander struct fobj_sec_mem *f = to_sec_mem(fobj); 576fbcaa411SJens Wiklander 577fbcaa411SJens Wiklander assert(refcount_val(&fobj->refc)); 578fbcaa411SJens Wiklander assert(page_idx < fobj->num_pages); 579fbcaa411SJens Wiklander 580fbcaa411SJens Wiklander return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE; 581fbcaa411SJens Wiklander } 582fbcaa411SJens Wiklander 583fbcaa411SJens Wiklander static struct fobj_ops ops_sec_mem __rodata_unpaged = { 584fbcaa411SJens Wiklander .free = sec_mem_free, 585fbcaa411SJens Wiklander .get_pa = sec_mem_get_pa, 586fbcaa411SJens Wiklander }; 587fbcaa411SJens Wiklander 588fbcaa411SJens Wiklander #endif /*PAGED_USER_TA*/ 589