1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2022, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <config.h> 8 #include <initcall.h> 9 #include <keep.h> 10 #include <kernel/linker.h> 11 #include <kernel/mutex.h> 12 #include <kernel/panic.h> 13 #include <kernel/refcount.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/tee_misc.h> 16 #include <mm/core_mmu.h> 17 #include <mm/mobj.h> 18 #include <mm/tee_pager.h> 19 #include <mm/vm.h> 20 #include <optee_msg.h> 21 #include <stdlib.h> 22 #include <tee_api_types.h> 23 #include <types_ext.h> 24 #include <util.h> 25 26 struct mobj *mobj_sec_ddr; 27 struct mobj *mobj_tee_ram_rx; 28 struct mobj *mobj_tee_ram_rw; 29 30 /* 31 * mobj_phys implementation 32 */ 33 34 struct mobj_phys { 35 struct mobj mobj; 36 enum buf_is_attr battr; 37 /* Defined by TEE_MATTR_MEM_TYPE_* in tee_mmu_types.h */ 38 uint32_t mem_type; 39 vaddr_t va; 40 paddr_t pa; 41 }; 42 43 static struct mobj_phys *to_mobj_phys(struct mobj *mobj); 44 45 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len) 46 { 47 struct mobj_phys *moph = to_mobj_phys(mobj); 48 49 if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len)) 50 return NULL; 51 52 return (void *)(moph->va + offset); 53 } 54 55 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs, 56 size_t granule, paddr_t *pa) 57 { 58 struct mobj_phys *moph = to_mobj_phys(mobj); 59 paddr_t p; 60 61 if (!pa) 62 return TEE_ERROR_GENERIC; 63 64 p = moph->pa + offs; 65 66 if (granule) { 67 if (granule != SMALL_PAGE_SIZE && 68 granule != CORE_MMU_PGDIR_SIZE) 69 return TEE_ERROR_GENERIC; 70 p &= ~(granule - 1); 71 } 72 73 *pa = p; 74 return TEE_SUCCESS; 75 } 76 DECLARE_KEEP_PAGER(mobj_phys_get_pa); 77 78 static TEE_Result mobj_phys_get_mem_type(struct mobj *mobj, uint32_t *mem_type) 79 { 80 struct mobj_phys *moph = to_mobj_phys(mobj); 81 82 if (!mem_type) 83 return TEE_ERROR_GENERIC; 84 85 *mem_type = moph->mem_type; 86 return TEE_SUCCESS; 87 } 88 89 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr) 90 { 91 struct mobj_phys *moph = to_mobj_phys(mobj); 92 enum buf_is_attr a; 93 94 a = moph->battr; 95 96 switch (attr) { 97 case CORE_MEM_SEC: 98 return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM || 99 a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM; 100 case CORE_MEM_NON_SEC: 101 return a == CORE_MEM_NSEC_SHM; 102 case CORE_MEM_TEE_RAM: 103 case CORE_MEM_TA_RAM: 104 case CORE_MEM_NSEC_SHM: 105 case CORE_MEM_SDP_MEM: 106 return attr == a; 107 default: 108 return false; 109 } 110 } 111 112 static void mobj_phys_free(struct mobj *mobj) 113 { 114 struct mobj_phys *moph = to_mobj_phys(mobj); 115 116 free(moph); 117 } 118 119 /* 120 * Note: this variable is weak just to ease breaking its dependency chain 121 * when added to the unpaged area. 122 */ 123 const struct mobj_ops mobj_phys_ops 124 __weak __relrodata_unpaged("mobj_phys_ops") = { 125 .get_va = mobj_phys_get_va, 126 .get_pa = mobj_phys_get_pa, 127 .get_phys_offs = NULL, /* only offset 0 */ 128 .get_mem_type = mobj_phys_get_mem_type, 129 .matches = mobj_phys_matches, 130 .free = mobj_phys_free, 131 }; 132 133 static struct mobj_phys *to_mobj_phys(struct mobj *mobj) 134 { 135 assert(mobj->ops == &mobj_phys_ops); 136 return container_of(mobj, struct mobj_phys, mobj); 137 } 138 139 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t mem_type, 140 enum buf_is_attr battr, 141 enum teecore_memtypes area_type) 142 { 143 void *va = NULL; 144 struct mobj_phys *moph = NULL; 145 struct tee_mmap_region *map = NULL; 146 147 if ((pa & CORE_MMU_USER_PARAM_MASK) || 148 (size & CORE_MMU_USER_PARAM_MASK)) { 149 DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE); 150 return NULL; 151 } 152 153 if (pa) { 154 va = phys_to_virt(pa, area_type, size); 155 } else { 156 map = core_mmu_find_mapping_exclusive(area_type, size); 157 if (!map) 158 return NULL; 159 160 pa = map->pa; 161 va = (void *)map->va; 162 } 163 164 /* Only SDP memory may not have a virtual address */ 165 if (!va && battr != CORE_MEM_SDP_MEM) 166 return NULL; 167 168 moph = calloc(1, sizeof(*moph)); 169 if (!moph) 170 return NULL; 171 172 moph->battr = battr; 173 moph->mem_type = mem_type; 174 moph->mobj.size = size; 175 moph->mobj.ops = &mobj_phys_ops; 176 refcount_set(&moph->mobj.refc, 1); 177 moph->pa = pa; 178 moph->va = (vaddr_t)va; 179 180 return &moph->mobj; 181 } 182 183 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t mem_type, 184 enum buf_is_attr battr) 185 { 186 enum teecore_memtypes area_type; 187 188 switch (battr) { 189 case CORE_MEM_TEE_RAM: 190 area_type = MEM_AREA_TEE_RAM_RW_DATA; 191 break; 192 case CORE_MEM_TA_RAM: 193 area_type = MEM_AREA_TA_RAM; 194 break; 195 case CORE_MEM_NSEC_SHM: 196 area_type = MEM_AREA_NSEC_SHM; 197 break; 198 case CORE_MEM_SDP_MEM: 199 area_type = MEM_AREA_SDP_MEM; 200 break; 201 default: 202 DMSG("can't allocate with specified attribute"); 203 return NULL; 204 } 205 206 return mobj_phys_init(pa, size, mem_type, battr, area_type); 207 } 208 209 /* 210 * mobj_virt implementation 211 */ 212 213 static void mobj_virt_assert_type(struct mobj *mobj); 214 215 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset, 216 size_t len __maybe_unused) 217 { 218 mobj_virt_assert_type(mobj); 219 assert(mobj_check_offset_and_len(mobj, offset, len)); 220 221 return (void *)(vaddr_t)offset; 222 } 223 224 /* 225 * Note: this variable is weak just to ease breaking its dependency chain 226 * when added to the unpaged area. 227 */ 228 const struct mobj_ops mobj_virt_ops 229 __weak __relrodata_unpaged("mobj_virt_ops") = { 230 .get_va = mobj_virt_get_va, 231 }; 232 233 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused) 234 { 235 assert(mobj->ops == &mobj_virt_ops); 236 } 237 238 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX }; 239 240 /* 241 * mobj_shm implementation. mobj_shm represents buffer in predefined shm region 242 * - it is physically contiguous. 243 * - it is identified in static physical layout as MEM_AREA_NSEC_SHM. 244 * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure 245 * generic CORE_MEM_NON_SEC. 246 */ 247 248 struct mobj_shm { 249 struct mobj mobj; 250 paddr_t pa; 251 uint64_t cookie; 252 }; 253 254 static struct mobj_shm *to_mobj_shm(struct mobj *mobj); 255 256 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len) 257 { 258 struct mobj_shm *m = to_mobj_shm(mobj); 259 260 if (!mobj_check_offset_and_len(mobj, offset, len)) 261 return NULL; 262 263 return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM, 264 mobj->size - offset); 265 } 266 267 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs, 268 size_t granule, paddr_t *pa) 269 { 270 struct mobj_shm *m = to_mobj_shm(mobj); 271 paddr_t p; 272 273 if (!pa || offs >= mobj->size) 274 return TEE_ERROR_GENERIC; 275 276 p = m->pa + offs; 277 278 if (granule) { 279 if (granule != SMALL_PAGE_SIZE && 280 granule != CORE_MMU_PGDIR_SIZE) 281 return TEE_ERROR_GENERIC; 282 p &= ~(granule - 1); 283 } 284 285 *pa = p; 286 return TEE_SUCCESS; 287 } 288 DECLARE_KEEP_PAGER(mobj_shm_get_pa); 289 290 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule) 291 { 292 assert(IS_POWER_OF_TWO(granule)); 293 return to_mobj_shm(mobj)->pa & (granule - 1); 294 } 295 296 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr) 297 { 298 return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC; 299 } 300 301 static TEE_Result mobj_shm_get_mem_type(struct mobj *mobj __unused, 302 uint32_t *mem_type) 303 { 304 if (!mem_type) 305 return TEE_ERROR_GENERIC; 306 307 *mem_type = TEE_MATTR_MEM_TYPE_CACHED; 308 309 return TEE_SUCCESS; 310 } 311 312 static void mobj_shm_free(struct mobj *mobj) 313 { 314 struct mobj_shm *m = to_mobj_shm(mobj); 315 316 free(m); 317 } 318 319 static uint64_t mobj_shm_get_cookie(struct mobj *mobj) 320 { 321 return to_mobj_shm(mobj)->cookie; 322 } 323 324 /* 325 * Note: this variable is weak just to ease breaking its dependency chain 326 * when added to the unpaged area. 327 */ 328 const struct mobj_ops mobj_shm_ops 329 __weak __relrodata_unpaged("mobj_shm_ops") = { 330 .get_va = mobj_shm_get_va, 331 .get_pa = mobj_shm_get_pa, 332 .get_phys_offs = mobj_shm_get_phys_offs, 333 .get_mem_type = mobj_shm_get_mem_type, 334 .matches = mobj_shm_matches, 335 .free = mobj_shm_free, 336 .get_cookie = mobj_shm_get_cookie, 337 }; 338 339 static struct mobj_shm *to_mobj_shm(struct mobj *mobj) 340 { 341 assert(mobj->ops == &mobj_shm_ops); 342 return container_of(mobj, struct mobj_shm, mobj); 343 } 344 345 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie) 346 { 347 struct mobj_shm *m; 348 349 if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size)) 350 return NULL; 351 352 m = calloc(1, sizeof(*m)); 353 if (!m) 354 return NULL; 355 356 m->mobj.size = size; 357 m->mobj.ops = &mobj_shm_ops; 358 m->mobj.phys_granule = SMALL_PAGE_SIZE; 359 refcount_set(&m->mobj.refc, 1); 360 m->pa = pa; 361 m->cookie = cookie; 362 363 return &m->mobj; 364 } 365 366 struct mobj_with_fobj { 367 struct fobj *fobj; 368 struct file *file; 369 struct mobj mobj; 370 uint8_t mem_type; 371 }; 372 373 const struct mobj_ops mobj_with_fobj_ops; 374 375 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file, 376 uint32_t mem_type) 377 { 378 struct mobj_with_fobj *m = NULL; 379 380 assert(!(mem_type & ~TEE_MATTR_MEM_TYPE_MASK)); 381 382 if (!fobj) 383 return NULL; 384 if (mem_type > UINT8_MAX) 385 return NULL; 386 387 m = calloc(1, sizeof(*m)); 388 if (!m) 389 return NULL; 390 391 m->mobj.ops = &mobj_with_fobj_ops; 392 refcount_set(&m->mobj.refc, 1); 393 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE; 394 m->mobj.phys_granule = SMALL_PAGE_SIZE; 395 m->fobj = fobj_get(fobj); 396 m->file = file_get(file); 397 m->mem_type = mem_type; 398 399 return &m->mobj; 400 } 401 402 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj) 403 { 404 assert(mobj && mobj->ops == &mobj_with_fobj_ops); 405 406 return container_of(mobj, struct mobj_with_fobj, mobj); 407 } 408 409 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused, 410 enum buf_is_attr attr) 411 { 412 assert(to_mobj_with_fobj(mobj)); 413 414 /* 415 * All fobjs are supposed to be mapped secure so classify it as 416 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information 417 * needed it can probably be carried in another way than to put the 418 * burden directly on fobj. 419 */ 420 return attr == CORE_MEM_SEC; 421 } 422 423 static void mobj_with_fobj_free(struct mobj *mobj) 424 { 425 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj); 426 427 fobj_put(m->fobj); 428 file_put(m->file); 429 free(m); 430 } 431 432 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj) 433 { 434 return fobj_get(to_mobj_with_fobj(mobj)->fobj); 435 } 436 437 static TEE_Result mobj_with_fobj_get_mem_type(struct mobj *mobj, 438 uint32_t *mem_type) 439 { 440 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj); 441 442 if (!mem_type) 443 return TEE_ERROR_GENERIC; 444 445 *mem_type = m->mem_type; 446 447 return TEE_SUCCESS; 448 } 449 450 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs, 451 size_t granule, paddr_t *pa) 452 { 453 struct mobj_with_fobj *f = to_mobj_with_fobj(mobj); 454 paddr_t p = 0; 455 456 if (!f->fobj->ops->get_pa) { 457 assert(mobj_is_paged(mobj)); 458 return TEE_ERROR_NOT_SUPPORTED; 459 } 460 461 p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) + 462 offs % SMALL_PAGE_SIZE; 463 464 if (granule) { 465 if (granule != SMALL_PAGE_SIZE && 466 granule != CORE_MMU_PGDIR_SIZE) 467 return TEE_ERROR_GENERIC; 468 p &= ~(granule - 1); 469 } 470 471 *pa = p; 472 473 return TEE_SUCCESS; 474 } 475 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa); 476 477 /* 478 * Note: this variable is weak just to ease breaking its dependency chain 479 * when added to the unpaged area. 480 */ 481 const struct mobj_ops mobj_with_fobj_ops 482 __weak __relrodata_unpaged("mobj_with_fobj_ops") = { 483 .matches = mobj_with_fobj_matches, 484 .free = mobj_with_fobj_free, 485 .get_fobj = mobj_with_fobj_get_fobj, 486 .get_mem_type = mobj_with_fobj_get_mem_type, 487 .get_pa = mobj_with_fobj_get_pa, 488 }; 489 490 #ifdef CFG_PAGED_USER_TA 491 bool mobj_is_paged(struct mobj *mobj) 492 { 493 if (mobj->ops == &mobj_with_fobj_ops && 494 !to_mobj_with_fobj(mobj)->fobj->ops->get_pa) 495 return true; 496 497 return false; 498 } 499 #endif /*CFG_PAGED_USER_TA*/ 500 501 static TEE_Result mobj_init(void) 502 { 503 mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo, 504 tee_mm_sec_ddr.size, 505 TEE_MATTR_MEM_TYPE_CACHED, 506 CORE_MEM_TA_RAM); 507 if (!mobj_sec_ddr) 508 panic("Failed to register secure ta ram"); 509 510 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) { 511 mobj_tee_ram_rx = mobj_phys_init(0, 512 VCORE_UNPG_RX_SZ, 513 TEE_MATTR_MEM_TYPE_CACHED, 514 CORE_MEM_TEE_RAM, 515 MEM_AREA_TEE_RAM_RX); 516 if (!mobj_tee_ram_rx) 517 panic("Failed to register tee ram rx"); 518 519 mobj_tee_ram_rw = mobj_phys_init(0, 520 VCORE_UNPG_RW_SZ, 521 TEE_MATTR_MEM_TYPE_CACHED, 522 CORE_MEM_TEE_RAM, 523 MEM_AREA_TEE_RAM_RW_DATA); 524 if (!mobj_tee_ram_rw) 525 panic("Failed to register tee ram rw"); 526 } else { 527 mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START, 528 VCORE_UNPG_RW_PA + 529 VCORE_UNPG_RW_SZ - 530 TEE_RAM_START, 531 TEE_MATTR_MEM_TYPE_CACHED, 532 CORE_MEM_TEE_RAM, 533 MEM_AREA_TEE_RAM_RW_DATA); 534 if (!mobj_tee_ram_rw) 535 panic("Failed to register tee ram"); 536 537 mobj_tee_ram_rx = mobj_tee_ram_rw; 538 } 539 540 return TEE_SUCCESS; 541 } 542 543 driver_init_late(mobj_init); 544