xref: /optee_os/core/arch/arm/tee/svc_cache.c (revision c2f5808039471d8cb9ac43385b63fb8dc6aa8ac4)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * Copyright (c) 2015, Linaro Limited
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 #include <types_ext.h>
29 #include <utee_types.h>
30 #include <kernel/tee_ta_manager.h>
31 #include <mm/tee_mmu.h>
32 #include <mm/core_memprot.h>
33 
34 #include "svc_cache.h"
35 
36 /*
37  * tee_uta_cache_operation - dynamic cache clean/inval request from a TA
38  * It follows ARM recommendation:
39  *     http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246d/Beicdhde.html
40  * Note that this implementation assumes dsb operations are part of
41  * cache_maintenance_l1(), and L2 cache sync are part of
42  * cache_maintenance_l2()
43  */
44 static TEE_Result cache_operation(struct tee_ta_session *sess,
45 			enum utee_cache_operation op, void *va, size_t len)
46 {
47 	TEE_Result ret;
48 	paddr_t pa = 0;
49 	struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx);
50 
51 	if ((sess->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0)
52 		return TEE_ERROR_NOT_SUPPORTED;
53 
54 	ret = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_WRITE,
55 					  (tee_uaddr_t)va, len);
56 	if (ret != TEE_SUCCESS)
57 		return TEE_ERROR_ACCESS_DENIED;
58 
59 	pa = virt_to_phys(va);
60 	if (!pa)
61 		return TEE_ERROR_ACCESS_DENIED;
62 
63 	switch (op) {
64 	case TEE_CACHEFLUSH:
65 		/* Clean L1, Flush L2, Flush L1 */
66 		ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
67 		if (ret != TEE_SUCCESS)
68 			return ret;
69 		ret = cache_maintenance_l2(L2CACHE_AREA_CLEAN_INV, pa, len);
70 		if (ret != TEE_SUCCESS)
71 			return ret;
72 		return cache_maintenance_l1(DCACHE_AREA_CLEAN_INV, va, len);
73 
74 	case TEE_CACHECLEAN:
75 		/* Clean L1, Clean L2 */
76 		ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
77 		if (ret != TEE_SUCCESS)
78 			return ret;
79 		return cache_maintenance_l2(L2CACHE_AREA_CLEAN, pa, len);
80 
81 	case TEE_CACHEINVALIDATE:
82 		/* Inval L2, Inval L1 */
83 		ret = cache_maintenance_l2(L2CACHE_AREA_INVALIDATE, pa, len);
84 		if (ret != TEE_SUCCESS)
85 			return ret;
86 		return cache_maintenance_l1(DCACHE_AREA_INVALIDATE, va, len);
87 
88 	default:
89 		return TEE_ERROR_NOT_SUPPORTED;
90 	}
91 }
92 
93 TEE_Result syscall_cache_operation(void *va, size_t len, unsigned long op)
94 {
95 	TEE_Result res;
96 	struct tee_ta_session *s = NULL;
97 
98 	res = tee_ta_get_current_session(&s);
99 	if (res != TEE_SUCCESS)
100 		return res;
101 
102 	if ((s->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0)
103 		return TEE_ERROR_NOT_SUPPORTED;
104 
105 	return cache_operation(s, op, va, len);
106 }
107