xref: /optee_os/core/include/mm/core_memprot.h (revision 1fbe848c7736857c598097549b8571a282b610a7)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 #ifndef __MM_CORE_MEMPROT_H
7 #define __MM_CORE_MEMPROT_H
8 
9 #include <mm/core_mmu.h>
10 #include <types_ext.h>
11 
12 /*
13  * "pbuf_is" support.
14  *
15  * core_vbuf_is()/core_pbuf_is() can be used to check if a teecore mapped
16  * virtual address or a physical address is "Secure", "Unsecure", "external
17  * RAM" and some other fancy attributes.
18  *
19  * DO NOT use 'buf_is(Secure, buffer)==false' as a assumption that buffer is
20  * UnSecured ! This is NOT a valid asumption ! A buffer is certified UnSecured
21  * only if 'buf_is(UnSecure, buffer)==true'.
22  */
23 
24 /* memory atttributes */
25 enum buf_is_attr {
26 	CORE_MEM_CACHED,
27 	CORE_MEM_NSEC_SHM,
28 	CORE_MEM_NON_SEC,
29 	CORE_MEM_SEC,
30 	CORE_MEM_TEE_RAM,
31 	CORE_MEM_SDP_MEM,
32 	CORE_MEM_REG_SHM,
33 };
34 
35 /* redirect legacy tee_vbuf_is() and tee_pbuf_is() to our routines */
36 #define tee_pbuf_is     core_pbuf_is
37 #define tee_vbuf_is     core_vbuf_is
38 
39 /* Convenience macros */
40 #define tee_pbuf_is_non_sec(buf, len) \
41 		core_pbuf_is(CORE_MEM_NON_SEC, (paddr_t)(buf), (len))
42 
43 #define tee_pbuf_is_sec(buf, len) \
44 		core_pbuf_is(CORE_MEM_SEC, (paddr_t)(buf), (len))
45 
46 #define tee_vbuf_is_non_sec(buf, len) \
47 		core_vbuf_is(CORE_MEM_NON_SEC, (void *)(buf), (len))
48 
49 #define tee_vbuf_is_sec(buf, len) \
50 		core_vbuf_is(CORE_MEM_SEC, (void *)(buf), (len))
51 
52 /*
53  * This function return true if the buf complies with supplied flags.
54  * If this function returns false buf doesn't comply with supplied flags
55  * or something went wrong.
56  *
57  * Note that returning false doesn't guarantee that buf complies with
58  * the complement of the supplied flags.
59  */
60 bool core_pbuf_is(uint32_t flags, paddr_t pbuf, size_t len);
61 
62 /*
63  * Translates the supplied virtual address to a physical address and uses
64  * tee_phys_buf_is() to check the compliance of the buffer.
65  */
66 bool core_vbuf_is(uint32_t flags, const void *vbuf, size_t len);
67 
68 /*
69  * Translate physical address to virtual address using specified mapping.
70  * Also tries to find proper mapping which have counterpart translation
71  * for specified length of data starting from given physical address.
72  * Len parameter can be set to 1 if caller knows that requested (pa + len)
73  * doesn`t cross mapping granule boundary.
74  * Returns NULL on failure or a valid virtual address on success.
75  */
76 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len);
77 
78 /*
79  * Translate physical address to virtual address trying MEM_AREA_IO_SEC
80  * first then MEM_AREA_IO_NSEC if not found. Like phys_to_virt() tries
81  * to find proper mapping relying on length parameter.
82  * Returns NULL on failure or a valid virtual address on success.
83  */
84 void *phys_to_virt_io(paddr_t pa, size_t len);
85 
86 /*
87  * Translate virtual address to physical address
88  * Returns 0 on failure or a valid physical address on success.
89  */
90 paddr_t virt_to_phys(void *va);
91 
vaddr_to_phys(vaddr_t va)92 static inline paddr_t vaddr_to_phys(vaddr_t va)
93 {
94 	return virt_to_phys((void *)va);
95 }
96 
97 /*
98  * Return runtime usable address, irrespective of whether
99  * the MMU is enabled or not. In case of MMU enabled also will be performed
100  * check for requested amount of data is present in found mapping.
101  */
102 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len);
103 
104 /*
105  * is_unpaged() - report unpaged status of an address
106  * @va:		virtual address
107  *
108  * Returns true if the @va is non-NULL and is in the unpaged area if paging
109  * is enabled, else false.
110  */
111 #ifdef CFG_WITH_PAGER
112 bool is_unpaged(const void *va);
113 #else
is_unpaged(const void * va)114 static inline bool is_unpaged(const void *va) { return va; }
115 #endif
116 
117 /*
118  * is_nexus() - report nexus status of an address
119  * @va:		virtual address
120  *
121  * Returns true if the @va is non-NULL and is in the nexus memory area
122  * if ns-virtualization is enabled, else false.
123  */
124 #ifdef CFG_NS_VIRTUALIZATION
125 bool is_nexus(const void *va);
126 #else
is_nexus(const void * va)127 static inline bool is_nexus(const void *va) { return va; }
128 #endif
129 
130 struct io_pa_va {
131 	paddr_t pa;
132 	vaddr_t va;
133 };
134 
135 /*
136  * Helper function to return a physical or virtual address for a device,
137  * depending on whether the MMU is enabled or not
138  * io_pa_or_va() uses secure mapped IO memory if found or fallback to
139  * non-secure mapped IO memory.
140  */
141 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len);
142 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len);
143 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len);
144 
145 #endif /* __MM_CORE_MEMPROT_H */
146