1ee546289SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause 2ee546289SJens Wiklander /* 3*aad1cf6bSJens Wiklander * Copyright (c) 2019-2021, Linaro Limited 4ee546289SJens Wiklander */ 5ee546289SJens Wiklander 6ee546289SJens Wiklander #include <crypto/crypto.h> 7ee546289SJens Wiklander #include <crypto/internal_aes-gcm.h> 8cfde90a6SJens Wiklander #include <initcall.h> 965401337SJens Wiklander #include <kernel/boot.h> 10ee546289SJens Wiklander #include <kernel/panic.h> 11ee546289SJens Wiklander #include <mm/core_memprot.h> 12ee546289SJens Wiklander #include <mm/core_mmu.h> 13ee546289SJens Wiklander #include <mm/fobj.h> 14ee546289SJens Wiklander #include <mm/tee_mm.h> 15ee546289SJens Wiklander #include <stdlib.h> 16ee546289SJens Wiklander #include <string.h> 17ee546289SJens Wiklander #include <tee_api_types.h> 18ee546289SJens Wiklander #include <types_ext.h> 19ee546289SJens Wiklander #include <util.h> 20ee546289SJens Wiklander 21ee546289SJens Wiklander #ifdef CFG_WITH_PAGER 22ee546289SJens Wiklander 23ee546289SJens Wiklander #define RWP_AE_KEY_BITS 256 24ee546289SJens Wiklander 25ee546289SJens Wiklander struct rwp_aes_gcm_iv { 26ee546289SJens Wiklander uint32_t iv[3]; 27ee546289SJens Wiklander }; 28ee546289SJens Wiklander 29ee546289SJens Wiklander #define RWP_AES_GCM_TAG_LEN 16 30ee546289SJens Wiklander 31ee546289SJens Wiklander struct rwp_state { 32ee546289SJens Wiklander uint64_t iv; 33ee546289SJens Wiklander uint8_t tag[RWP_AES_GCM_TAG_LEN]; 34ee546289SJens Wiklander }; 35ee546289SJens Wiklander 36*aad1cf6bSJens Wiklander /* 37*aad1cf6bSJens Wiklander * Note that this struct is padded to a size which is a power of 2, this 38*aad1cf6bSJens Wiklander * guarantees that this state will not span two pages. This avoids a corner 39*aad1cf6bSJens Wiklander * case in the pager when making the state available. 40*aad1cf6bSJens Wiklander */ 41*aad1cf6bSJens Wiklander struct rwp_state_padded { 42*aad1cf6bSJens Wiklander struct rwp_state state; 43*aad1cf6bSJens Wiklander uint64_t pad; 44*aad1cf6bSJens Wiklander }; 45*aad1cf6bSJens Wiklander 46*aad1cf6bSJens Wiklander struct fobj_rwp_unpaged_iv { 47ee546289SJens Wiklander uint8_t *store; 48ee546289SJens Wiklander struct rwp_state *state; 49ee546289SJens Wiklander struct fobj fobj; 50ee546289SJens Wiklander }; 51ee546289SJens Wiklander 52*aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv { 53*aad1cf6bSJens Wiklander size_t idx; 54*aad1cf6bSJens Wiklander struct fobj fobj; 55*aad1cf6bSJens Wiklander }; 56*aad1cf6bSJens Wiklander 57*aad1cf6bSJens Wiklander static const struct fobj_ops ops_rwp_paged_iv; 58*aad1cf6bSJens Wiklander static const struct fobj_ops ops_rwp_unpaged_iv; 59ee546289SJens Wiklander 60ee546289SJens Wiklander static struct internal_aes_gcm_key rwp_ae_key; 61ee546289SJens Wiklander 62*aad1cf6bSJens Wiklander static struct rwp_state_padded *rwp_state_base; 63*aad1cf6bSJens Wiklander static uint8_t *rwp_store_base; 64ee546289SJens Wiklander 65ee546289SJens Wiklander static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops, 66ee546289SJens Wiklander unsigned int num_pages) 67ee546289SJens Wiklander { 68ee546289SJens Wiklander fobj->ops = ops; 69ee546289SJens Wiklander fobj->num_pages = num_pages; 70ee546289SJens Wiklander refcount_set(&fobj->refc, 1); 71b83c0d5fSJens Wiklander TAILQ_INIT(&fobj->areas); 72ee546289SJens Wiklander } 73ee546289SJens Wiklander 74ee546289SJens Wiklander static void fobj_uninit(struct fobj *fobj) 75ee546289SJens Wiklander { 76ee546289SJens Wiklander assert(!refcount_val(&fobj->refc)); 77b83c0d5fSJens Wiklander assert(TAILQ_EMPTY(&fobj->areas)); 78b83c0d5fSJens Wiklander tee_pager_invalidate_fobj(fobj); 79ee546289SJens Wiklander } 80ee546289SJens Wiklander 81*aad1cf6bSJens Wiklander static TEE_Result rwp_load_page(void *va, struct rwp_state *state, 82*aad1cf6bSJens Wiklander const uint8_t *src) 83ee546289SJens Wiklander { 84ee546289SJens Wiklander struct rwp_aes_gcm_iv iv = { 85ee546289SJens Wiklander .iv = { (vaddr_t)state, state->iv >> 32, state->iv } 86ee546289SJens Wiklander }; 87ee546289SJens Wiklander 88ee546289SJens Wiklander if (!state->iv) { 89ee546289SJens Wiklander /* 90*aad1cf6bSJens Wiklander * IV still zero which means that this is previously unused 91ee546289SJens Wiklander * page. 92ee546289SJens Wiklander */ 93ee546289SJens Wiklander memset(va, 0, SMALL_PAGE_SIZE); 94ee546289SJens Wiklander return TEE_SUCCESS; 95ee546289SJens Wiklander } 96ee546289SJens Wiklander 97ee546289SJens Wiklander return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv), 98ee546289SJens Wiklander NULL, 0, src, SMALL_PAGE_SIZE, va, 99ee546289SJens Wiklander state->tag, sizeof(state->tag)); 100ee546289SJens Wiklander } 101ee546289SJens Wiklander 102*aad1cf6bSJens Wiklander static TEE_Result rwp_save_page(const void *va, struct rwp_state *state, 103*aad1cf6bSJens Wiklander uint8_t *dst) 104ee546289SJens Wiklander { 105ee546289SJens Wiklander size_t tag_len = sizeof(state->tag); 106*aad1cf6bSJens Wiklander struct rwp_aes_gcm_iv iv = { }; 107ee546289SJens Wiklander 108*aad1cf6bSJens Wiklander assert(state->iv + 1 > state->iv); 109*aad1cf6bSJens Wiklander 110*aad1cf6bSJens Wiklander state->iv++; 111*aad1cf6bSJens Wiklander 112*aad1cf6bSJens Wiklander /* 113*aad1cf6bSJens Wiklander * IV is constructed as recommended in section "8.2.1 Deterministic 114*aad1cf6bSJens Wiklander * Construction" of "Recommendation for Block Cipher Modes of 115*aad1cf6bSJens Wiklander * Operation: Galois/Counter Mode (GCM) and GMAC", 116*aad1cf6bSJens Wiklander * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf 117*aad1cf6bSJens Wiklander */ 118*aad1cf6bSJens Wiklander iv.iv[0] = (vaddr_t)state; 119*aad1cf6bSJens Wiklander iv.iv[1] = state->iv >> 32; 120*aad1cf6bSJens Wiklander iv.iv[2] = state->iv; 121*aad1cf6bSJens Wiklander 122*aad1cf6bSJens Wiklander return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv), 123*aad1cf6bSJens Wiklander NULL, 0, va, SMALL_PAGE_SIZE, dst, 124*aad1cf6bSJens Wiklander state->tag, &tag_len); 125*aad1cf6bSJens Wiklander } 126*aad1cf6bSJens Wiklander 127*aad1cf6bSJens Wiklander static struct rwp_state_padded *idx_to_state_padded(size_t idx) 128*aad1cf6bSJens Wiklander { 129*aad1cf6bSJens Wiklander assert(rwp_state_base); 130*aad1cf6bSJens Wiklander return rwp_state_base + idx; 131*aad1cf6bSJens Wiklander } 132*aad1cf6bSJens Wiklander 133*aad1cf6bSJens Wiklander static uint8_t *idx_to_store(size_t idx) 134*aad1cf6bSJens Wiklander { 135*aad1cf6bSJens Wiklander assert(rwp_store_base); 136*aad1cf6bSJens Wiklander return rwp_store_base + idx * SMALL_PAGE_SIZE; 137*aad1cf6bSJens Wiklander } 138*aad1cf6bSJens Wiklander 139*aad1cf6bSJens Wiklander struct fobj *fobj_rw_paged_alloc(unsigned int num_pages) 140*aad1cf6bSJens Wiklander { 141*aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv *rwp = NULL; 142*aad1cf6bSJens Wiklander tee_mm_entry_t *mm = NULL; 143*aad1cf6bSJens Wiklander size_t size = 0; 144*aad1cf6bSJens Wiklander 145*aad1cf6bSJens Wiklander COMPILE_TIME_ASSERT(IS_POWER_OF_TWO(sizeof(struct rwp_state_padded))); 146*aad1cf6bSJens Wiklander assert(num_pages); 147*aad1cf6bSJens Wiklander 148*aad1cf6bSJens Wiklander rwp = calloc(1, sizeof(*rwp)); 149*aad1cf6bSJens Wiklander if (!rwp) 150*aad1cf6bSJens Wiklander return NULL; 151*aad1cf6bSJens Wiklander 152*aad1cf6bSJens Wiklander if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) 153*aad1cf6bSJens Wiklander goto err; 154*aad1cf6bSJens Wiklander mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 155*aad1cf6bSJens Wiklander if (!mm) 156*aad1cf6bSJens Wiklander goto err; 157*aad1cf6bSJens Wiklander rwp->idx = (tee_mm_get_smem(mm) - tee_mm_sec_ddr.lo) / SMALL_PAGE_SIZE; 158*aad1cf6bSJens Wiklander 159*aad1cf6bSJens Wiklander memset(idx_to_state_padded(rwp->idx), 0, 160*aad1cf6bSJens Wiklander num_pages * sizeof(struct rwp_state_padded)); 161*aad1cf6bSJens Wiklander 162*aad1cf6bSJens Wiklander fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages); 163*aad1cf6bSJens Wiklander 164*aad1cf6bSJens Wiklander return &rwp->fobj; 165*aad1cf6bSJens Wiklander err: 166*aad1cf6bSJens Wiklander tee_mm_free(mm); 167*aad1cf6bSJens Wiklander free(rwp); 168*aad1cf6bSJens Wiklander 169*aad1cf6bSJens Wiklander return NULL; 170*aad1cf6bSJens Wiklander } 171*aad1cf6bSJens Wiklander 172*aad1cf6bSJens Wiklander static struct fobj_rwp_paged_iv *to_rwp_paged_iv(struct fobj *fobj) 173*aad1cf6bSJens Wiklander { 174*aad1cf6bSJens Wiklander assert(fobj->ops == &ops_rwp_paged_iv); 175*aad1cf6bSJens Wiklander 176*aad1cf6bSJens Wiklander return container_of(fobj, struct fobj_rwp_paged_iv, fobj); 177*aad1cf6bSJens Wiklander } 178*aad1cf6bSJens Wiklander 179*aad1cf6bSJens Wiklander static TEE_Result rwp_paged_iv_load_page(struct fobj *fobj, 180*aad1cf6bSJens Wiklander unsigned int page_idx, void *va) 181*aad1cf6bSJens Wiklander { 182*aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj); 183*aad1cf6bSJens Wiklander uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE; 184*aad1cf6bSJens Wiklander struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx); 185*aad1cf6bSJens Wiklander 186*aad1cf6bSJens Wiklander assert(refcount_val(&fobj->refc)); 187*aad1cf6bSJens Wiklander assert(page_idx < fobj->num_pages); 188*aad1cf6bSJens Wiklander 189*aad1cf6bSJens Wiklander return rwp_load_page(va, &st->state, src); 190*aad1cf6bSJens Wiklander } 191*aad1cf6bSJens Wiklander DECLARE_KEEP_PAGER(rwp_paged_iv_load_page); 192*aad1cf6bSJens Wiklander 193*aad1cf6bSJens Wiklander static TEE_Result rwp_paged_iv_save_page(struct fobj *fobj, 194*aad1cf6bSJens Wiklander unsigned int page_idx, const void *va) 195*aad1cf6bSJens Wiklander { 196*aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj); 197*aad1cf6bSJens Wiklander uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE; 198*aad1cf6bSJens Wiklander struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx); 199*aad1cf6bSJens Wiklander 200*aad1cf6bSJens Wiklander assert(page_idx < fobj->num_pages); 201b83c0d5fSJens Wiklander 202b83c0d5fSJens Wiklander if (!refcount_val(&fobj->refc)) { 203b83c0d5fSJens Wiklander /* 204b83c0d5fSJens Wiklander * This fobj is being teared down, it just hasn't had the time 205b83c0d5fSJens Wiklander * to call tee_pager_invalidate_fobj() yet. 206b83c0d5fSJens Wiklander */ 207b83c0d5fSJens Wiklander assert(TAILQ_EMPTY(&fobj->areas)); 208b83c0d5fSJens Wiklander return TEE_SUCCESS; 209b83c0d5fSJens Wiklander } 210b83c0d5fSJens Wiklander 211*aad1cf6bSJens Wiklander return rwp_save_page(va, &st->state, dst); 212*aad1cf6bSJens Wiklander } 213*aad1cf6bSJens Wiklander DECLARE_KEEP_PAGER(rwp_paged_iv_save_page); 214ee546289SJens Wiklander 215*aad1cf6bSJens Wiklander static void rwp_paged_iv_free(struct fobj *fobj) 216*aad1cf6bSJens Wiklander { 217*aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj); 218*aad1cf6bSJens Wiklander paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + tee_mm_sec_ddr.lo; 219*aad1cf6bSJens Wiklander tee_mm_entry_t *mm = tee_mm_find(&tee_mm_sec_ddr, pa); 220*aad1cf6bSJens Wiklander 221*aad1cf6bSJens Wiklander assert(mm); 222*aad1cf6bSJens Wiklander 223*aad1cf6bSJens Wiklander fobj_uninit(fobj); 224*aad1cf6bSJens Wiklander tee_mm_free(mm); 225*aad1cf6bSJens Wiklander free(rwp); 226*aad1cf6bSJens Wiklander } 227*aad1cf6bSJens Wiklander 228*aad1cf6bSJens Wiklander static vaddr_t rwp_paged_iv_get_iv_vaddr(struct fobj *fobj, 229*aad1cf6bSJens Wiklander unsigned int page_idx) 230*aad1cf6bSJens Wiklander { 231*aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj); 232*aad1cf6bSJens Wiklander struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx); 233*aad1cf6bSJens Wiklander 234*aad1cf6bSJens Wiklander assert(page_idx < fobj->num_pages); 235*aad1cf6bSJens Wiklander return (vaddr_t)&st->state & ~SMALL_PAGE_MASK; 236*aad1cf6bSJens Wiklander } 237*aad1cf6bSJens Wiklander DECLARE_KEEP_PAGER(rwp_paged_iv_get_iv_vaddr); 238*aad1cf6bSJens Wiklander 239*aad1cf6bSJens Wiklander static const struct fobj_ops ops_rwp_paged_iv __rodata_unpaged = { 240*aad1cf6bSJens Wiklander .free = rwp_paged_iv_free, 241*aad1cf6bSJens Wiklander .load_page = rwp_paged_iv_load_page, 242*aad1cf6bSJens Wiklander .save_page = rwp_paged_iv_save_page, 243*aad1cf6bSJens Wiklander .get_iv_vaddr = rwp_paged_iv_get_iv_vaddr, 244*aad1cf6bSJens Wiklander }; 245*aad1cf6bSJens Wiklander 246*aad1cf6bSJens Wiklander static struct fobj_rwp_unpaged_iv *to_rwp_unpaged_iv(struct fobj *fobj) 247*aad1cf6bSJens Wiklander { 248*aad1cf6bSJens Wiklander assert(fobj->ops == &ops_rwp_unpaged_iv); 249*aad1cf6bSJens Wiklander 250*aad1cf6bSJens Wiklander return container_of(fobj, struct fobj_rwp_unpaged_iv, fobj); 251*aad1cf6bSJens Wiklander } 252*aad1cf6bSJens Wiklander 253*aad1cf6bSJens Wiklander static TEE_Result rwp_unpaged_iv_load_page(struct fobj *fobj, 254*aad1cf6bSJens Wiklander unsigned int page_idx, void *va) 255*aad1cf6bSJens Wiklander { 256*aad1cf6bSJens Wiklander struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj); 257*aad1cf6bSJens Wiklander uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE; 258*aad1cf6bSJens Wiklander 259*aad1cf6bSJens Wiklander assert(refcount_val(&fobj->refc)); 260*aad1cf6bSJens Wiklander assert(page_idx < fobj->num_pages); 261*aad1cf6bSJens Wiklander 262*aad1cf6bSJens Wiklander return rwp_load_page(va, rwp->state + page_idx, src); 263*aad1cf6bSJens Wiklander } 264*aad1cf6bSJens Wiklander DECLARE_KEEP_PAGER(rwp_unpaged_iv_load_page); 265*aad1cf6bSJens Wiklander 266*aad1cf6bSJens Wiklander static TEE_Result rwp_unpaged_iv_save_page(struct fobj *fobj, 267*aad1cf6bSJens Wiklander unsigned int page_idx, 268*aad1cf6bSJens Wiklander const void *va) 269*aad1cf6bSJens Wiklander { 270*aad1cf6bSJens Wiklander struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj); 271*aad1cf6bSJens Wiklander uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE; 272*aad1cf6bSJens Wiklander 273*aad1cf6bSJens Wiklander assert(page_idx < fobj->num_pages); 274*aad1cf6bSJens Wiklander 275*aad1cf6bSJens Wiklander if (!refcount_val(&fobj->refc)) { 276ee546289SJens Wiklander /* 277*aad1cf6bSJens Wiklander * This fobj is being teared down, it just hasn't had the time 278*aad1cf6bSJens Wiklander * to call tee_pager_invalidate_fobj() yet. 279*aad1cf6bSJens Wiklander */ 280*aad1cf6bSJens Wiklander assert(TAILQ_EMPTY(&fobj->areas)); 281*aad1cf6bSJens Wiklander return TEE_SUCCESS; 282*aad1cf6bSJens Wiklander } 283*aad1cf6bSJens Wiklander 284*aad1cf6bSJens Wiklander return rwp_save_page(va, rwp->state + page_idx, dst); 285*aad1cf6bSJens Wiklander } 286*aad1cf6bSJens Wiklander DECLARE_KEEP_PAGER(rwp_unpaged_iv_save_page); 287*aad1cf6bSJens Wiklander 288*aad1cf6bSJens Wiklander static void rwp_unpaged_iv_free(struct fobj *fobj __unused) 289*aad1cf6bSJens Wiklander { 290*aad1cf6bSJens Wiklander panic(); 291*aad1cf6bSJens Wiklander } 292*aad1cf6bSJens Wiklander 293*aad1cf6bSJens Wiklander static const struct fobj_ops ops_rwp_unpaged_iv __rodata_unpaged = { 294*aad1cf6bSJens Wiklander .free = rwp_unpaged_iv_free, 295*aad1cf6bSJens Wiklander .load_page = rwp_unpaged_iv_load_page, 296*aad1cf6bSJens Wiklander .save_page = rwp_unpaged_iv_save_page, 297*aad1cf6bSJens Wiklander }; 298*aad1cf6bSJens Wiklander 299*aad1cf6bSJens Wiklander static TEE_Result rwp_init(void) 300*aad1cf6bSJens Wiklander { 301*aad1cf6bSJens Wiklander uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 }; 302*aad1cf6bSJens Wiklander struct fobj_rwp_unpaged_iv *rwp = NULL; 303*aad1cf6bSJens Wiklander tee_mm_entry_t *mm = NULL; 304*aad1cf6bSJens Wiklander size_t num_pool_pages = 0; 305*aad1cf6bSJens Wiklander size_t num_fobj_pages = 0; 306*aad1cf6bSJens Wiklander size_t sz = 0; 307*aad1cf6bSJens Wiklander 308*aad1cf6bSJens Wiklander if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS) 309*aad1cf6bSJens Wiklander panic("failed to generate random"); 310*aad1cf6bSJens Wiklander if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data, 311*aad1cf6bSJens Wiklander sizeof(rwp_ae_key.data), 312*aad1cf6bSJens Wiklander &rwp_ae_key.rounds)) 313*aad1cf6bSJens Wiklander panic("failed to expand key"); 314*aad1cf6bSJens Wiklander 315*aad1cf6bSJens Wiklander assert(tee_mm_sec_ddr.hi > tee_mm_sec_ddr.lo); 316*aad1cf6bSJens Wiklander sz = tee_mm_sec_ddr.hi - tee_mm_sec_ddr.lo; 317*aad1cf6bSJens Wiklander assert(!(sz & SMALL_PAGE_SIZE)); 318*aad1cf6bSJens Wiklander 319*aad1cf6bSJens Wiklander num_pool_pages = sz / SMALL_PAGE_SIZE; 320*aad1cf6bSJens Wiklander num_fobj_pages = ROUNDUP(num_pool_pages * sizeof(*rwp_state_base), 321*aad1cf6bSJens Wiklander SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; 322*aad1cf6bSJens Wiklander 323*aad1cf6bSJens Wiklander /* 324*aad1cf6bSJens Wiklander * Each page in the pool needs a struct rwp_state. 325*aad1cf6bSJens Wiklander * 326*aad1cf6bSJens Wiklander * This isn't entirely true, the pages not used by 327*aad1cf6bSJens Wiklander * fobj_rw_paged_alloc() don't need any. A future optimization 328*aad1cf6bSJens Wiklander * may try to avoid allocating for such pages. 329ee546289SJens Wiklander */ 330ee546289SJens Wiklander 331*aad1cf6bSJens Wiklander rwp = calloc(1, sizeof(*rwp)); 332*aad1cf6bSJens Wiklander if (!rwp) 333*aad1cf6bSJens Wiklander panic(); 334ee546289SJens Wiklander 335*aad1cf6bSJens Wiklander rwp->state = calloc(num_fobj_pages, sizeof(*rwp->state)); 336*aad1cf6bSJens Wiklander if (!rwp->state) 337*aad1cf6bSJens Wiklander panic(); 338*aad1cf6bSJens Wiklander mm = tee_mm_alloc(&tee_mm_sec_ddr, num_fobj_pages * SMALL_PAGE_SIZE); 339*aad1cf6bSJens Wiklander if (!mm) 340*aad1cf6bSJens Wiklander panic(); 341*aad1cf6bSJens Wiklander rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM); 342*aad1cf6bSJens Wiklander assert(rwp->store); 343*aad1cf6bSJens Wiklander 344*aad1cf6bSJens Wiklander fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_fobj_pages); 345*aad1cf6bSJens Wiklander 346*aad1cf6bSJens Wiklander rwp_state_base = (void *)tee_pager_init_iv_area(&rwp->fobj); 347*aad1cf6bSJens Wiklander assert(rwp_state_base); 348*aad1cf6bSJens Wiklander 349*aad1cf6bSJens Wiklander rwp_store_base = phys_to_virt(tee_mm_sec_ddr.lo, MEM_AREA_TA_RAM); 350*aad1cf6bSJens Wiklander assert(rwp_store_base); 351*aad1cf6bSJens Wiklander 352*aad1cf6bSJens Wiklander return TEE_SUCCESS; 353ee546289SJens Wiklander } 354*aad1cf6bSJens Wiklander driver_init_late(rwp_init); 355ee546289SJens Wiklander 356ee546289SJens Wiklander struct fobj_rop { 357ee546289SJens Wiklander uint8_t *hashes; 358ee546289SJens Wiklander uint8_t *store; 359ee546289SJens Wiklander struct fobj fobj; 360ee546289SJens Wiklander }; 361ee546289SJens Wiklander 362c6744caaSJens Wiklander static const struct fobj_ops ops_ro_paged; 363c6744caaSJens Wiklander 364c6744caaSJens Wiklander static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops, 365c6744caaSJens Wiklander unsigned int num_pages, void *hashes, void *store) 366c6744caaSJens Wiklander { 367c6744caaSJens Wiklander rop->hashes = hashes; 368c6744caaSJens Wiklander rop->store = store; 369c6744caaSJens Wiklander fobj_init(&rop->fobj, ops, num_pages); 370c6744caaSJens Wiklander } 371ee546289SJens Wiklander 372ee546289SJens Wiklander struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes, 373ee546289SJens Wiklander void *store) 374ee546289SJens Wiklander { 375ee546289SJens Wiklander struct fobj_rop *rop = NULL; 376ee546289SJens Wiklander 377ee546289SJens Wiklander assert(num_pages && hashes && store); 378ee546289SJens Wiklander 379ee546289SJens Wiklander rop = calloc(1, sizeof(*rop)); 380ee546289SJens Wiklander if (!rop) 381ee546289SJens Wiklander return NULL; 382ee546289SJens Wiklander 383c6744caaSJens Wiklander rop_init(rop, &ops_ro_paged, num_pages, hashes, store); 384ee546289SJens Wiklander 385ee546289SJens Wiklander return &rop->fobj; 386ee546289SJens Wiklander } 387ee546289SJens Wiklander 388ee546289SJens Wiklander static struct fobj_rop *to_rop(struct fobj *fobj) 389ee546289SJens Wiklander { 390ee546289SJens Wiklander assert(fobj->ops == &ops_ro_paged); 391ee546289SJens Wiklander 392ee546289SJens Wiklander return container_of(fobj, struct fobj_rop, fobj); 393ee546289SJens Wiklander } 394ee546289SJens Wiklander 395c6744caaSJens Wiklander static void rop_uninit(struct fobj_rop *rop) 396c6744caaSJens Wiklander { 397c6744caaSJens Wiklander fobj_uninit(&rop->fobj); 398c6744caaSJens Wiklander tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store))); 399c6744caaSJens Wiklander free(rop->hashes); 400c6744caaSJens Wiklander } 401c6744caaSJens Wiklander 402ee546289SJens Wiklander static void rop_free(struct fobj *fobj) 403ee546289SJens Wiklander { 404ee546289SJens Wiklander struct fobj_rop *rop = to_rop(fobj); 405ee546289SJens Wiklander 406c6744caaSJens Wiklander rop_uninit(rop); 407ee546289SJens Wiklander free(rop); 408ee546289SJens Wiklander } 409ee546289SJens Wiklander 410c6744caaSJens Wiklander static TEE_Result rop_load_page_helper(struct fobj_rop *rop, 411c6744caaSJens Wiklander unsigned int page_idx, void *va) 412c6744caaSJens Wiklander { 413c6744caaSJens Wiklander const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE; 414c6744caaSJens Wiklander const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE; 415c6744caaSJens Wiklander 416c6744caaSJens Wiklander assert(refcount_val(&rop->fobj.refc)); 417c6744caaSJens Wiklander assert(page_idx < rop->fobj.num_pages); 418c6744caaSJens Wiklander memcpy(va, src, SMALL_PAGE_SIZE); 419c6744caaSJens Wiklander 420c6744caaSJens Wiklander return hash_sha256_check(hash, va, SMALL_PAGE_SIZE); 421c6744caaSJens Wiklander } 422c6744caaSJens Wiklander 423ee546289SJens Wiklander static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx, 424ee546289SJens Wiklander void *va) 425ee546289SJens Wiklander { 426c6744caaSJens Wiklander return rop_load_page_helper(to_rop(fobj), page_idx, va); 427ee546289SJens Wiklander } 4283639b55fSJerome Forissier DECLARE_KEEP_PAGER(rop_load_page); 429ee546289SJens Wiklander 430ee546289SJens Wiklander static TEE_Result rop_save_page(struct fobj *fobj __unused, 431ee546289SJens Wiklander unsigned int page_idx __unused, 432ee546289SJens Wiklander const void *va __unused) 433ee546289SJens Wiklander { 434ee546289SJens Wiklander return TEE_ERROR_GENERIC; 435ee546289SJens Wiklander } 4363639b55fSJerome Forissier DECLARE_KEEP_PAGER(rop_save_page); 437ee546289SJens Wiklander 438c6744caaSJens Wiklander static const struct fobj_ops ops_ro_paged __rodata_unpaged = { 439ee546289SJens Wiklander .free = rop_free, 440ee546289SJens Wiklander .load_page = rop_load_page, 441ee546289SJens Wiklander .save_page = rop_save_page, 442ee546289SJens Wiklander }; 443ee546289SJens Wiklander 444c6744caaSJens Wiklander #ifdef CFG_CORE_ASLR 445c6744caaSJens Wiklander /* 446c6744caaSJens Wiklander * When using relocated pages the relocation information must be applied 447c6744caaSJens Wiklander * before the pages can be used. With read-only paging the content is only 448c6744caaSJens Wiklander * integrity protected so relocation cannot be applied on pages in the less 449c6744caaSJens Wiklander * secure "store" or the load_address selected by ASLR could be given away. 450c6744caaSJens Wiklander * This means that each time a page has been loaded and verified it has to 451c6744caaSJens Wiklander * have its relocation information applied before it can be used. 452c6744caaSJens Wiklander * 453c6744caaSJens Wiklander * Only the relative relocations are supported, this allows a rather compact 454c6744caaSJens Wiklander * represenation of the needed relocation information in this struct. 455c6744caaSJens Wiklander * r_offset is replaced with the offset into the page that need to be updated, 456c6744caaSJens Wiklander * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be 457c6744caaSJens Wiklander * used to represent it. 458c6744caaSJens Wiklander * 459c6744caaSJens Wiklander * All relocations are converted and stored in @relocs. @page_reloc_idx is 460c6744caaSJens Wiklander * an array of length @rop.fobj.num_pages with an entry for each page. If 461c6744caaSJens Wiklander * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs. 462c6744caaSJens Wiklander */ 463c6744caaSJens Wiklander struct fobj_ro_reloc_paged { 464c6744caaSJens Wiklander uint16_t *page_reloc_idx; 465c6744caaSJens Wiklander uint16_t *relocs; 466c6744caaSJens Wiklander unsigned int num_relocs; 467c6744caaSJens Wiklander struct fobj_rop rop; 468c6744caaSJens Wiklander }; 469c6744caaSJens Wiklander 470c6744caaSJens Wiklander static const struct fobj_ops ops_ro_reloc_paged; 471c6744caaSJens Wiklander 472c6744caaSJens Wiklander static unsigned int get_num_rels(unsigned int num_pages, 473c6744caaSJens Wiklander unsigned int reloc_offs, 474c6744caaSJens Wiklander const uint32_t *reloc, unsigned int num_relocs) 475c6744caaSJens Wiklander { 476c6744caaSJens Wiklander const unsigned int align_mask __maybe_unused = sizeof(long) - 1; 477c6744caaSJens Wiklander unsigned int nrels = 0; 478c6744caaSJens Wiklander unsigned int n = 0; 479c6744caaSJens Wiklander vaddr_t offs = 0; 480c6744caaSJens Wiklander 481c6744caaSJens Wiklander /* 482c6744caaSJens Wiklander * Count the number of relocations which are needed for these 483c6744caaSJens Wiklander * pages. Also check that the data is well formed, only expected 484c6744caaSJens Wiklander * relocations and sorted in order of address which it applies to. 485c6744caaSJens Wiklander */ 486c6744caaSJens Wiklander for (; n < num_relocs; n++) { 487c6744caaSJens Wiklander assert(ALIGNMENT_IS_OK(reloc[n], unsigned long)); 488c6744caaSJens Wiklander assert(offs < reloc[n]); /* check that it's sorted */ 489c6744caaSJens Wiklander offs = reloc[n]; 490c6744caaSJens Wiklander if (offs >= reloc_offs && 491c6744caaSJens Wiklander offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE) 492c6744caaSJens Wiklander nrels++; 493c6744caaSJens Wiklander } 494c6744caaSJens Wiklander 495c6744caaSJens Wiklander return nrels; 496c6744caaSJens Wiklander } 497c6744caaSJens Wiklander 498c6744caaSJens Wiklander static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs, 499c6744caaSJens Wiklander const uint32_t *reloc, unsigned int num_relocs) 500c6744caaSJens Wiklander { 501c6744caaSJens Wiklander unsigned int npg = rrp->rop.fobj.num_pages; 502c6744caaSJens Wiklander unsigned int pg_idx = 0; 503c6744caaSJens Wiklander unsigned int reln = 0; 504c6744caaSJens Wiklander unsigned int n = 0; 505c6744caaSJens Wiklander uint32_t r = 0; 506c6744caaSJens Wiklander 507c6744caaSJens Wiklander for (n = 0; n < npg; n++) 508c6744caaSJens Wiklander rrp->page_reloc_idx[n] = UINT16_MAX; 509c6744caaSJens Wiklander 510c6744caaSJens Wiklander for (n = 0; n < num_relocs ; n++) { 511c6744caaSJens Wiklander if (reloc[n] < reloc_offs) 512c6744caaSJens Wiklander continue; 513c6744caaSJens Wiklander 514c6744caaSJens Wiklander /* r is the offset from beginning of this fobj */ 515c6744caaSJens Wiklander r = reloc[n] - reloc_offs; 516c6744caaSJens Wiklander 517c6744caaSJens Wiklander pg_idx = r / SMALL_PAGE_SIZE; 518c6744caaSJens Wiklander if (pg_idx >= npg) 519c6744caaSJens Wiklander break; 520c6744caaSJens Wiklander 521c6744caaSJens Wiklander if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX) 522c6744caaSJens Wiklander rrp->page_reloc_idx[pg_idx] = reln; 523c6744caaSJens Wiklander rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE; 524c6744caaSJens Wiklander reln++; 525c6744caaSJens Wiklander } 526c6744caaSJens Wiklander 527c6744caaSJens Wiklander assert(reln == rrp->num_relocs); 528c6744caaSJens Wiklander } 529c6744caaSJens Wiklander 530c6744caaSJens Wiklander struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes, 531c6744caaSJens Wiklander unsigned int reloc_offs, 532c6744caaSJens Wiklander const void *reloc, 533c6744caaSJens Wiklander unsigned int reloc_len, void *store) 534c6744caaSJens Wiklander { 535c6744caaSJens Wiklander struct fobj_ro_reloc_paged *rrp = NULL; 536c6744caaSJens Wiklander const unsigned int num_relocs = reloc_len / sizeof(uint32_t); 537c6744caaSJens Wiklander unsigned int nrels = 0; 538c6744caaSJens Wiklander 539c6744caaSJens Wiklander assert(ALIGNMENT_IS_OK(reloc, uint32_t)); 540c6744caaSJens Wiklander assert(ALIGNMENT_IS_OK(reloc_len, uint32_t)); 541c6744caaSJens Wiklander assert(num_pages && hashes && store); 542c6744caaSJens Wiklander if (!reloc_len) { 543c6744caaSJens Wiklander assert(!reloc); 544c6744caaSJens Wiklander return fobj_ro_paged_alloc(num_pages, hashes, store); 545c6744caaSJens Wiklander } 546c6744caaSJens Wiklander assert(reloc); 547c6744caaSJens Wiklander 548c6744caaSJens Wiklander nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs); 549c6744caaSJens Wiklander if (!nrels) 550c6744caaSJens Wiklander return fobj_ro_paged_alloc(num_pages, hashes, store); 551c6744caaSJens Wiklander 552c6744caaSJens Wiklander rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) + 553c6744caaSJens Wiklander nrels * sizeof(uint16_t)); 554c6744caaSJens Wiklander if (!rrp) 555c6744caaSJens Wiklander return NULL; 556c6744caaSJens Wiklander rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store); 557c6744caaSJens Wiklander rrp->page_reloc_idx = (uint16_t *)(rrp + 1); 558c6744caaSJens Wiklander rrp->relocs = rrp->page_reloc_idx + num_pages; 559c6744caaSJens Wiklander rrp->num_relocs = nrels; 560c6744caaSJens Wiklander init_rels(rrp, reloc_offs, reloc, num_relocs); 561c6744caaSJens Wiklander 562c6744caaSJens Wiklander return &rrp->rop.fobj; 563c6744caaSJens Wiklander } 564c6744caaSJens Wiklander 565c6744caaSJens Wiklander static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj) 566c6744caaSJens Wiklander { 567c6744caaSJens Wiklander assert(fobj->ops == &ops_ro_reloc_paged); 568c6744caaSJens Wiklander 569c6744caaSJens Wiklander return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj); 570c6744caaSJens Wiklander } 571c6744caaSJens Wiklander 572c6744caaSJens Wiklander static void rrp_free(struct fobj *fobj) 573c6744caaSJens Wiklander { 574c6744caaSJens Wiklander struct fobj_ro_reloc_paged *rrp = to_rrp(fobj); 575c6744caaSJens Wiklander 576c6744caaSJens Wiklander rop_uninit(&rrp->rop); 577c6744caaSJens Wiklander free(rrp); 578c6744caaSJens Wiklander } 579c6744caaSJens Wiklander 580c6744caaSJens Wiklander static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx, 581c6744caaSJens Wiklander void *va) 582c6744caaSJens Wiklander { 583c6744caaSJens Wiklander struct fobj_ro_reloc_paged *rrp = to_rrp(fobj); 584c6744caaSJens Wiklander unsigned int end_rel = rrp->num_relocs; 585c6744caaSJens Wiklander TEE_Result res = TEE_SUCCESS; 586c6744caaSJens Wiklander unsigned long *where = NULL; 587c6744caaSJens Wiklander unsigned int n = 0; 588c6744caaSJens Wiklander 589c6744caaSJens Wiklander res = rop_load_page_helper(&rrp->rop, page_idx, va); 590c6744caaSJens Wiklander if (res) 591c6744caaSJens Wiklander return res; 592c6744caaSJens Wiklander 593c6744caaSJens Wiklander /* Find the reloc index of the next page to tell when we're done */ 594c6744caaSJens Wiklander for (n = page_idx + 1; n < fobj->num_pages; n++) { 595c6744caaSJens Wiklander if (rrp->page_reloc_idx[n] != UINT16_MAX) { 596c6744caaSJens Wiklander end_rel = rrp->page_reloc_idx[n]; 597c6744caaSJens Wiklander break; 598c6744caaSJens Wiklander } 599c6744caaSJens Wiklander } 600c6744caaSJens Wiklander 601c6744caaSJens Wiklander for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) { 602c6744caaSJens Wiklander where = (void *)((vaddr_t)va + rrp->relocs[n]); 603c6744caaSJens Wiklander *where += boot_mmu_config.load_offset; 604c6744caaSJens Wiklander } 605c6744caaSJens Wiklander 606c6744caaSJens Wiklander return TEE_SUCCESS; 607c6744caaSJens Wiklander } 6083639b55fSJerome Forissier DECLARE_KEEP_PAGER(rrp_load_page); 609c6744caaSJens Wiklander 610c6744caaSJens Wiklander static const struct fobj_ops ops_ro_reloc_paged __rodata_unpaged = { 611c6744caaSJens Wiklander .free = rrp_free, 612c6744caaSJens Wiklander .load_page = rrp_load_page, 613c6744caaSJens Wiklander .save_page = rop_save_page, /* Direct reuse */ 614c6744caaSJens Wiklander }; 615c6744caaSJens Wiklander #endif /*CFG_CORE_ASLR*/ 616c6744caaSJens Wiklander 617c6744caaSJens Wiklander static const struct fobj_ops ops_locked_paged; 618ee546289SJens Wiklander 619ee546289SJens Wiklander struct fobj *fobj_locked_paged_alloc(unsigned int num_pages) 620ee546289SJens Wiklander { 621ee546289SJens Wiklander struct fobj *f = NULL; 622ee546289SJens Wiklander 623ee546289SJens Wiklander assert(num_pages); 624ee546289SJens Wiklander 625ee546289SJens Wiklander f = calloc(1, sizeof(*f)); 626ee546289SJens Wiklander if (!f) 627ee546289SJens Wiklander return NULL; 628ee546289SJens Wiklander 629ee546289SJens Wiklander fobj_init(f, &ops_locked_paged, num_pages); 630ee546289SJens Wiklander 631ee546289SJens Wiklander return f; 632ee546289SJens Wiklander } 633ee546289SJens Wiklander 634ee546289SJens Wiklander static void lop_free(struct fobj *fobj) 635ee546289SJens Wiklander { 636ee546289SJens Wiklander assert(fobj->ops == &ops_locked_paged); 637ee546289SJens Wiklander fobj_uninit(fobj); 638ee546289SJens Wiklander free(fobj); 639ee546289SJens Wiklander } 640ee546289SJens Wiklander 641ee546289SJens Wiklander static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused, 642ee546289SJens Wiklander unsigned int page_idx __maybe_unused, 643ee546289SJens Wiklander void *va) 644ee546289SJens Wiklander { 645ee546289SJens Wiklander assert(fobj->ops == &ops_locked_paged); 646ee546289SJens Wiklander assert(refcount_val(&fobj->refc)); 647ee546289SJens Wiklander assert(page_idx < fobj->num_pages); 648ee546289SJens Wiklander 649ee546289SJens Wiklander memset(va, 0, SMALL_PAGE_SIZE); 650ee546289SJens Wiklander 651ee546289SJens Wiklander return TEE_SUCCESS; 652ee546289SJens Wiklander } 6533639b55fSJerome Forissier DECLARE_KEEP_PAGER(lop_load_page); 654ee546289SJens Wiklander 655ee546289SJens Wiklander static TEE_Result lop_save_page(struct fobj *fobj __unused, 656ee546289SJens Wiklander unsigned int page_idx __unused, 657ee546289SJens Wiklander const void *va __unused) 658ee546289SJens Wiklander { 659ee546289SJens Wiklander return TEE_ERROR_GENERIC; 660ee546289SJens Wiklander } 6613639b55fSJerome Forissier DECLARE_KEEP_PAGER(lop_save_page); 662ee546289SJens Wiklander 663c6744caaSJens Wiklander static const struct fobj_ops ops_locked_paged __rodata_unpaged = { 664ee546289SJens Wiklander .free = lop_free, 665ee546289SJens Wiklander .load_page = lop_load_page, 666ee546289SJens Wiklander .save_page = lop_save_page, 667ee546289SJens Wiklander }; 668ee546289SJens Wiklander #endif /*CFG_WITH_PAGER*/ 669fbcaa411SJens Wiklander 670fbcaa411SJens Wiklander #ifndef CFG_PAGED_USER_TA 671fbcaa411SJens Wiklander 672fbcaa411SJens Wiklander struct fobj_sec_mem { 673fbcaa411SJens Wiklander tee_mm_entry_t *mm; 674fbcaa411SJens Wiklander struct fobj fobj; 675fbcaa411SJens Wiklander }; 676fbcaa411SJens Wiklander 677d49bc745SJens Wiklander static const struct fobj_ops ops_sec_mem; 678fbcaa411SJens Wiklander 679fbcaa411SJens Wiklander struct fobj *fobj_sec_mem_alloc(unsigned int num_pages) 680fbcaa411SJens Wiklander { 681fbcaa411SJens Wiklander struct fobj_sec_mem *f = calloc(1, sizeof(*f)); 682fbcaa411SJens Wiklander size_t size = 0; 683fbcaa411SJens Wiklander void *va = NULL; 684fbcaa411SJens Wiklander 685fbcaa411SJens Wiklander if (!f) 686fbcaa411SJens Wiklander return NULL; 687fbcaa411SJens Wiklander 688fbcaa411SJens Wiklander if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) 689fbcaa411SJens Wiklander goto err; 690fbcaa411SJens Wiklander 691fbcaa411SJens Wiklander f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 692fbcaa411SJens Wiklander if (!f->mm) 693fbcaa411SJens Wiklander goto err; 694fbcaa411SJens Wiklander 695fbcaa411SJens Wiklander va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM); 696fbcaa411SJens Wiklander if (!va) 697fbcaa411SJens Wiklander goto err; 698fbcaa411SJens Wiklander 699fbcaa411SJens Wiklander memset(va, 0, size); 700fbcaa411SJens Wiklander f->fobj.ops = &ops_sec_mem; 701fbcaa411SJens Wiklander f->fobj.num_pages = num_pages; 702fbcaa411SJens Wiklander refcount_set(&f->fobj.refc, 1); 703fbcaa411SJens Wiklander 704fbcaa411SJens Wiklander return &f->fobj; 705fbcaa411SJens Wiklander err: 706fbcaa411SJens Wiklander tee_mm_free(f->mm); 707fbcaa411SJens Wiklander free(f); 708fbcaa411SJens Wiklander 709fbcaa411SJens Wiklander return NULL; 710fbcaa411SJens Wiklander } 711fbcaa411SJens Wiklander 712fbcaa411SJens Wiklander static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj) 713fbcaa411SJens Wiklander { 714fbcaa411SJens Wiklander assert(fobj->ops == &ops_sec_mem); 715fbcaa411SJens Wiklander 716fbcaa411SJens Wiklander return container_of(fobj, struct fobj_sec_mem, fobj); 717fbcaa411SJens Wiklander } 718fbcaa411SJens Wiklander 719fbcaa411SJens Wiklander static void sec_mem_free(struct fobj *fobj) 720fbcaa411SJens Wiklander { 721fbcaa411SJens Wiklander struct fobj_sec_mem *f = to_sec_mem(fobj); 722fbcaa411SJens Wiklander 723fbcaa411SJens Wiklander assert(!refcount_val(&fobj->refc)); 724fbcaa411SJens Wiklander tee_mm_free(f->mm); 725fbcaa411SJens Wiklander free(f); 726fbcaa411SJens Wiklander } 727fbcaa411SJens Wiklander 728fbcaa411SJens Wiklander static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx) 729fbcaa411SJens Wiklander { 730fbcaa411SJens Wiklander struct fobj_sec_mem *f = to_sec_mem(fobj); 731fbcaa411SJens Wiklander 732fbcaa411SJens Wiklander assert(refcount_val(&fobj->refc)); 733fbcaa411SJens Wiklander assert(page_idx < fobj->num_pages); 734fbcaa411SJens Wiklander 735fbcaa411SJens Wiklander return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE; 736fbcaa411SJens Wiklander } 737fbcaa411SJens Wiklander 738d49bc745SJens Wiklander static const struct fobj_ops ops_sec_mem __rodata_unpaged = { 739fbcaa411SJens Wiklander .free = sec_mem_free, 740fbcaa411SJens Wiklander .get_pa = sec_mem_get_pa, 741fbcaa411SJens Wiklander }; 742fbcaa411SJens Wiklander 743fbcaa411SJens Wiklander #endif /*PAGED_USER_TA*/ 744