xref: /optee_os/core/include/mm/pgt_cache.h (revision bfdeae238814352fb0922611c41f1ad6f04d7862)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  */
5 #ifndef MM_PGT_CACHE_H
6 #define MM_PGT_CACHE_H
7 
8 #ifdef CFG_WITH_LPAE
9 #define PGT_SIZE	(4 * 1024)
10 #define PGT_NUM_PGT_PER_PAGE	1
11 #else
12 #define PGT_SIZE	(1 * 1024)
13 #define PGT_NUM_PGT_PER_PAGE	4
14 #endif
15 
16 #include <assert.h>
17 #include <kernel/tee_ta_manager.h>
18 #include <sys/queue.h>
19 #include <types_ext.h>
20 #include <util.h>
21 
22 struct ts_ctx;
23 
24 struct pgt {
25 	void *tbl;
26 	vaddr_t vabase;
27 #if !defined(CFG_CORE_PREALLOC_EL0_TBLS)
28 	struct ts_ctx *ctx;
29 #endif
30 	bool populated;
31 #if defined(CFG_PAGED_USER_TA)
32 	uint16_t num_used_entries;
33 #endif
34 #if defined(CFG_CORE_PREALLOC_EL0_TBLS) || \
35 	(defined(CFG_WITH_PAGER) && !defined(CFG_WITH_LPAE))
36 	struct pgt_parent *parent;
37 #endif
38 	SLIST_ENTRY(pgt) link;
39 };
40 
41 /*
42  * A proper value for PGT_CACHE_SIZE depends on many factors: CFG_WITH_LPAE,
43  * CFG_TA_ASLR, size of TA, size of memrefs passed to TA, CFG_ULIBS_SHARED and
44  * possibly others. The value is based on the number of threads as an indicator
45  * on how large the system might be.
46  */
47 #if CFG_NUM_THREADS < 2
48 #define PGT_CACHE_SIZE	4
49 #elif (CFG_NUM_THREADS == 2 && !defined(CFG_WITH_LPAE))
50 #define PGT_CACHE_SIZE	8
51 #else
52 #define PGT_CACHE_SIZE	ROUNDUP(CFG_NUM_THREADS * 2, PGT_NUM_PGT_PER_PAGE)
53 #endif
54 
55 SLIST_HEAD(pgt_cache, pgt);
56 
57 bool pgt_check_avail(struct pgt_cache *pgt_cache, struct vm_info *vm_info);
58 
59 /*
60  * pgt_get_all() - makes all needed translation tables available
61  * @pgt_cache:	list of translation tables for the owning context
62  * @owning_ctx:	the context to own the tables
63  * @vm_info:	VM map for the context
64  *
65  * Guaranteed to succeed, but may need to sleep for a while to get all the
66  * needed translation tables.
67  */
68 #if defined(CFG_CORE_PREALLOC_EL0_TBLS)
69 static inline void pgt_get_all(struct pgt_cache *pgt_cache __unused,
70 			       struct ts_ctx *owning_ctx __unused,
71 			       struct vm_info *vm_info __unused) { }
72 #else
73 void pgt_get_all(struct pgt_cache *pgt_cache, struct ts_ctx *owning_ctx,
74 		 struct vm_info *vm_info);
75 #endif
76 
77 /*
78  * pgt_put_all() - informs the translation table manager that these tables
79  *		   will not be needed for a while
80  * @pgt_cache:	list of translation tables to make inactive
81  */
82 #if defined(CFG_CORE_PREALLOC_EL0_TBLS)
83 static inline void pgt_put_all(struct pgt_cache *pgt_cache __unused) { }
84 #else
85 void pgt_put_all(struct pgt_cache *pgt_cache);
86 #endif
87 
88 void pgt_clear_ctx_range(struct pgt_cache *pgt_cache, struct ts_ctx *ctx,
89 			 vaddr_t begin, vaddr_t end);
90 void pgt_flush_ctx_range(struct pgt_cache *pgt_cache, struct ts_ctx *ctx,
91 			 vaddr_t begin, vaddr_t last);
92 
93 #if defined(CFG_CORE_PREALLOC_EL0_TBLS)
94 static inline struct pgt *pgt_pop_from_cache_list(vaddr_t vabase __unused,
95 						  struct ts_ctx *ctx __unused)
96 { return NULL; }
97 static inline void pgt_push_to_cache_list(struct pgt *pgt __unused) { }
98 #else
99 struct pgt *pgt_pop_from_cache_list(vaddr_t vabase, struct ts_ctx *ctx);
100 void pgt_push_to_cache_list(struct pgt *pgt);
101 #endif
102 
103 #if defined(CFG_CORE_PREALLOC_EL0_TBLS)
104 static inline void pgt_init(void) { }
105 #else
106 void pgt_init(void);
107 #endif
108 
109 void pgt_flush_ctx(struct ts_ctx *ctx);
110 
111 #if defined(CFG_PAGED_USER_TA)
112 static inline void pgt_inc_used_entries(struct pgt *pgt)
113 {
114 	pgt->num_used_entries++;
115 	assert(pgt->num_used_entries);
116 }
117 
118 static inline void pgt_dec_used_entries(struct pgt *pgt)
119 {
120 	assert(pgt->num_used_entries);
121 	pgt->num_used_entries--;
122 }
123 
124 static inline void pgt_set_used_entries(struct pgt *pgt, size_t val)
125 {
126 	pgt->num_used_entries = val;
127 }
128 
129 #else
130 static inline void pgt_inc_used_entries(struct pgt *pgt __unused)
131 {
132 }
133 
134 static inline void pgt_dec_used_entries(struct pgt *pgt __unused)
135 {
136 }
137 
138 static inline void pgt_set_used_entries(struct pgt *pgt __unused,
139 					size_t val __unused)
140 {
141 }
142 
143 #endif
144 
145 #endif /*MM_PGT_CACHE_H*/
146