xref: /optee_os/core/include/mm/core_memprot.h (revision 77bdbf67c42209142ef43129e01113d29d9c62f6)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 #ifndef CORE_MEMPROT_H
7 #define CORE_MEMPROT_H
8 
9 #include <mm/core_mmu.h>
10 #include <types_ext.h>
11 
12 /*
13  * "pbuf_is" support.
14  *
15  * core_vbuf_is()/core_pbuf_is() can be used to check if a teecore mapped
16  * virtual address or a physical address is "Secure", "Unsecure", "external
17  * RAM" and some other fancy attributes.
18  *
19  * DO NOT use 'buf_is(Secure, buffer)==false' as a assumption that buffer is
20  * UnSecured ! This is NOT a valid asumption ! A buffer is certified UnSecured
21  * only if 'buf_is(UnSecure, buffer)==true'.
22  */
23 
24 /* memory atttributes */
25 enum buf_is_attr {
26 	CORE_MEM_CACHED,
27 	CORE_MEM_NSEC_SHM,
28 	CORE_MEM_NON_SEC,
29 	CORE_MEM_SEC,
30 	CORE_MEM_TEE_RAM,
31 	CORE_MEM_TA_RAM,
32 	CORE_MEM_SDP_MEM,
33 	CORE_MEM_REG_SHM,
34 };
35 
36 /* redirect legacy tee_vbuf_is() and tee_pbuf_is() to our routines */
37 #define tee_pbuf_is     core_pbuf_is
38 #define tee_vbuf_is     core_vbuf_is
39 
40 /* Convenience macros */
41 #define tee_pbuf_is_non_sec(buf, len) \
42 		core_pbuf_is(CORE_MEM_NON_SEC, (paddr_t)(buf), (len))
43 
44 #define tee_pbuf_is_sec(buf, len) \
45 		core_pbuf_is(CORE_MEM_SEC, (paddr_t)(buf), (len))
46 
47 #define tee_vbuf_is_non_sec(buf, len) \
48 		core_vbuf_is(CORE_MEM_NON_SEC, (void *)(buf), (len))
49 
50 #define tee_vbuf_is_sec(buf, len) \
51 		core_vbuf_is(CORE_MEM_SEC, (void *)(buf), (len))
52 
53 /*
54  * This function return true if the buf complies with supplied flags.
55  * If this function returns false buf doesn't comply with supplied flags
56  * or something went wrong.
57  *
58  * Note that returning false doesn't guarantee that buf complies with
59  * the complement of the supplied flags.
60  */
61 bool core_pbuf_is(uint32_t flags, paddr_t pbuf, size_t len);
62 
63 /*
64  * Translates the supplied virtual address to a physical address and uses
65  * tee_phys_buf_is() to check the compliance of the buffer.
66  */
67 bool core_vbuf_is(uint32_t flags, const void *vbuf, size_t len);
68 
69 /*
70  * Translate physical address to virtual address using specified mapping
71  * Returns NULL on failure or a valid virtual address on success.
72  */
73 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m);
74 
75 /*
76  * Translate physical address to virtual address trying MEM_AREA_IO_SEC
77  * first then MEM_AREA_IO_NSEC if not found.
78  * Returns NULL on failure or a valid virtual address on success.
79  */
80 void *phys_to_virt_io(paddr_t pa);
81 
82 /*
83  * Translate virtual address to physical address
84  * Returns 0 on failure or a valid physical address on success.
85  */
86 paddr_t virt_to_phys(void *va);
87 
88 /*
89  * Return runtime usable address, irrespective of whether
90  * the MMU is enabled or not.
91  */
92 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type);
93 
94 /* Return true if @va relates to a unpaged section else false */
95 bool is_unpaged(void *va);
96 
97 struct io_pa_va {
98 	paddr_t pa;
99 	vaddr_t va;
100 };
101 
102 /*
103  * Helper function to return a physical or virtual address for a device,
104  * depending on whether the MMU is enabled or not
105  * io_pa_or_va() uses secure mapped IO memory if found or fallback to
106  * non-secure mapped IO memory.
107  */
108 vaddr_t io_pa_or_va_secure(struct io_pa_va *p);
109 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p);
110 vaddr_t io_pa_or_va(struct io_pa_va *p);
111 
112 #endif /* CORE_MEMPROT_H */
113