1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 #ifndef __MM_VM_H 6 #define __MM_VM_H 7 8 #include <tee_api_types.h> 9 #include <kernel/tee_ta_manager.h> 10 #include <kernel/user_ta.h> 11 12 /* Allocate context resources like ASID and MMU table information */ 13 TEE_Result vm_info_init(struct user_mode_ctx *uctx, struct ts_ctx *ts_ctx); 14 15 /* Release context resources like ASID */ 16 void vm_info_final(struct user_mode_ctx *uctx); 17 18 /* 19 * Creates a memory map of a mobj. 20 * Desired virtual address can be specified in @va otherwise @va must be 21 * initialized to 0 if the next available can be chosen. 22 * 23 * @pad_begin and @pad_end specify how much extra free space should be kept 24 * when establishing the map. This allows mapping the first part of for 25 * instance an ELF file while knowing that the next part which has to be of 26 * a certain offset from the first part also will succeed. 27 */ 28 29 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len, 30 uint32_t prot, uint32_t flags, struct mobj *mobj, 31 size_t offs, size_t pad_begin, size_t pad_end, 32 size_t align); 33 34 /* 35 * Creates a memory map of a mobj. 36 * Desired virtual address can be specified in @va otherwise @va must be 37 * initialized to 0 if the next available can be chosen. 38 */ 39 static inline TEE_Result vm_map(struct user_mode_ctx *uctx, vaddr_t *va, 40 size_t len, uint32_t prot, uint32_t flags, 41 struct mobj *mobj, size_t offs) 42 { 43 return vm_map_pad(uctx, va, len, prot, flags, mobj, offs, 0, 0, 0); 44 } 45 46 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va, 47 size_t len, size_t pad_begin, size_t pad_end); 48 49 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 50 uint32_t *flags); 51 52 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 53 uint16_t *prot); 54 55 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 56 uint32_t prot); 57 58 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len); 59 60 /* Map parameters for a user TA */ 61 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param, 62 void *param_va[TEE_NUM_PARAMS]); 63 void vm_clean_param(struct user_mode_ctx *uctx); 64 65 /* 66 * User mode private memory is defined as user mode image static segment 67 * (code, ro/rw static data, heap, stack). The sole other virtual memory 68 * mapped to user mode are memref parameters. These later are considered 69 * outside user mode private memory as it might be accessed by the user 70 * mode context and its client(s). 71 */ 72 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx, 73 const void *va, size_t size); 74 75 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx, 76 const void *va, size_t size); 77 78 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx, 79 const void *va, size_t size, 80 struct mobj **mobj, size_t *offs); 81 82 /* Helper function for virt_to_phys(), shouldn't be used directly elsewhere */ 83 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa); 84 85 /* Helper function for phys_to_virt(), shouldn't be used directly elsewhere */ 86 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size); 87 88 /* 89 * Return TEE_SUCCESS or TEE_ERROR_ACCESS_DENIED when buffer exists or return 90 * another TEE_Result code. 91 */ 92 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx, 93 uint32_t flags, uaddr_t uaddr, size_t len); 94 95 /* Set user context @ctx or core privileged context if @ctx is NULL */ 96 void vm_set_ctx(struct ts_ctx *ctx); 97 98 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len, 99 uint16_t *prot, size_t *offs); 100 #endif /*__MM_VM_H*/ 101