1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 */ 5 #ifndef MM_PGT_CACHE_H 6 #define MM_PGT_CACHE_H 7 8 #ifdef CFG_WITH_LPAE 9 #define PGT_SIZE (4 * 1024) 10 #define PGT_NUM_PGT_PER_PAGE 1 11 #else 12 #define PGT_SIZE (1 * 1024) 13 #define PGT_NUM_PGT_PER_PAGE 4 14 #endif 15 16 #include <assert.h> 17 #include <kernel/tee_ta_manager.h> 18 #include <sys/queue.h> 19 #include <types_ext.h> 20 #include <util.h> 21 22 struct ts_ctx; 23 24 struct pgt { 25 void *tbl; 26 vaddr_t vabase; 27 struct ts_ctx *ctx; 28 bool populated; 29 #if defined(CFG_PAGED_USER_TA) 30 uint16_t num_used_entries; 31 #endif 32 #if defined(CFG_WITH_PAGER) && !defined(CFG_WITH_LPAE) 33 struct pgt_parent *parent; 34 #endif 35 SLIST_ENTRY(pgt) link; 36 }; 37 38 /* 39 * A proper value for PGT_CACHE_SIZE depends on many factors: CFG_WITH_LPAE, 40 * CFG_TA_ASLR, size of TA, size of memrefs passed to TA, CFG_ULIBS_SHARED and 41 * possibly others. The value is based on the number of threads as an indicator 42 * on how large the system might be. 43 */ 44 #if CFG_NUM_THREADS < 2 45 #define PGT_CACHE_SIZE 4 46 #elif (CFG_NUM_THREADS == 2 && !defined(CFG_WITH_LPAE)) 47 #define PGT_CACHE_SIZE 8 48 #else 49 #define PGT_CACHE_SIZE ROUNDUP(CFG_NUM_THREADS * 2, PGT_NUM_PGT_PER_PAGE) 50 #endif 51 52 SLIST_HEAD(pgt_cache, pgt); 53 54 bool pgt_check_avail(struct vm_info *vm_info); 55 56 /* 57 * pgt_get_all() - makes all needed translation tables available 58 * @pgt_cache: list of translation tables for the owning context 59 * @owning_ctx: the context to own the tables 60 * @vm_info: VM map for the context 61 * 62 * Guaranteed to succeed, but may need to sleep for a while to get all the 63 * needed translation tables. 64 */ 65 void pgt_get_all(struct pgt_cache *pgt_cache, struct ts_ctx *owning_ctx, 66 struct vm_info *vm_info); 67 68 /* 69 * pgt_put_all() - informs the translation table manager that these tables 70 * will not be needed for a while 71 * @pgt_cache: list of translation tables to make inactive 72 */ 73 void pgt_put_all(struct pgt_cache *pgt_cache); 74 75 void pgt_clear_ctx_range(struct pgt_cache *pgt_cache, struct ts_ctx *ctx, 76 vaddr_t begin, vaddr_t end); 77 void pgt_flush_ctx_range(struct pgt_cache *pgt_cache, struct ts_ctx *ctx, 78 vaddr_t begin, vaddr_t last); 79 80 struct pgt *pgt_pop_from_cache_list(vaddr_t vabase, struct ts_ctx *ctx); 81 void pgt_push_to_cache_list(struct pgt *pgt); 82 83 void pgt_init(void); 84 85 void pgt_flush_ctx(struct ts_ctx *ctx); 86 87 #if defined(CFG_PAGED_USER_TA) 88 static inline void pgt_inc_used_entries(struct pgt *pgt) 89 { 90 pgt->num_used_entries++; 91 assert(pgt->num_used_entries); 92 } 93 94 static inline void pgt_dec_used_entries(struct pgt *pgt) 95 { 96 assert(pgt->num_used_entries); 97 pgt->num_used_entries--; 98 } 99 100 static inline void pgt_set_used_entries(struct pgt *pgt, size_t val) 101 { 102 pgt->num_used_entries = val; 103 } 104 105 #else 106 static inline void pgt_inc_used_entries(struct pgt *pgt __unused) 107 { 108 } 109 110 static inline void pgt_dec_used_entries(struct pgt *pgt __unused) 111 { 112 } 113 114 static inline void pgt_set_used_entries(struct pgt *pgt __unused, 115 size_t val __unused) 116 { 117 } 118 119 #endif 120 121 #endif /*MM_PGT_CACHE_H*/ 122