xref: /optee_os/core/drivers/crypto/caam/caam_sm.c (revision 68045ae95313b78298e98fe4646a161e135ec17b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2019, 2023 NXP
4  */
5 #include <caam_common.h>
6 #include <caam_hal_ctrl.h>
7 #include <caam_hal_sm.h>
8 #include <caam_jr.h>
9 #include <caam_status.h>
10 #include <caam_sm.h>
11 #include <caam_utils_status.h>
12 #include <tee_api_defines.h>
13 
14 /*
15  * Secure memory module private data
16  */
17 static struct sm_privdata {
18 	vaddr_t baseaddr;  /* Secure memory base address */
19 	vaddr_t ctrl_addr; /* CAAM base address */
20 	vaddr_t jr_addr;   /* Job Ring base address */
21 	paddr_t jr_offset; /* Job Ring offset */
22 } sm_privdata;
23 
24 enum caam_status caam_sm_alloc(const struct caam_sm_page_desc *page_desc,
25 			       struct caam_sm_page_addr *page_addr)
26 {
27 	enum caam_status ret = CAAM_FAILURE;
28 
29 	if (!page_desc || !page_addr)
30 		return TEE_ERROR_BAD_PARAMETERS;
31 
32 	ret = caam_hal_sm_check_page_partition(sm_privdata.jr_addr, page_desc);
33 	if (ret != CAAM_NO_ERROR) {
34 		SM_TRACE("Pages %u to %u or partition %u are out of bounds",
35 			 page_desc->page,
36 			 page_desc->page + page_desc->page_count - 1,
37 			 page_desc->partition);
38 		return ret;
39 	}
40 
41 	/* Check if partition is already allocated */
42 	if (!caam_hal_sm_prtn_is_free(sm_privdata.jr_addr,
43 				      page_desc->partition)) {
44 		SM_TRACE("Partition %u not available", page_desc->partition);
45 		return CAAM_BUSY;
46 	}
47 
48 	/* Open secure memory partition to all groups */
49 	caam_hal_sm_open_access_perm(sm_privdata.jr_addr, page_desc->partition);
50 	caam_hal_sm_set_access_all_group(sm_privdata.jr_addr,
51 					 page_desc->partition);
52 
53 	ret = caam_hal_sm_allocate_page(sm_privdata.jr_addr, page_desc);
54 	if (ret != CAAM_NO_ERROR) {
55 		SM_TRACE("Error allocation Pages %u to %u into partition %u",
56 			 page_desc->page,
57 			 page_desc->page + page_desc->page_count - 1,
58 			 page_desc->partition);
59 
60 		/* Free all potientiel pages allocated before failure */
61 		return caam_hal_sm_deallocate_pages(sm_privdata.jr_addr,
62 						    page_desc);
63 	}
64 
65 	page_addr->paddr = caam_hal_ctrl_get_smvaddr(sm_privdata.ctrl_addr,
66 						     sm_privdata.jr_offset) +
67 			   caam_hal_sm_get_pages_size(sm_privdata.jr_addr,
68 						      page_desc->page);
69 	page_addr->vaddr = sm_privdata.baseaddr +
70 			   caam_hal_sm_get_pages_size(sm_privdata.jr_addr,
71 						      page_desc->page);
72 
73 	SM_TRACE("Partition %u Pages %u to %u allocated @0x%" PRIxVA
74 		 " (phys 0x@%" PRIxPA,
75 		 page_desc->partition, page_desc->page,
76 		 page_desc->page + page_desc->page_count - 1, page_addr->vaddr,
77 		 page_addr->paddr);
78 
79 	return CAAM_NO_ERROR;
80 }
81 
82 enum caam_status caam_sm_free(const struct caam_sm_page_desc *page_desc)
83 {
84 	enum caam_status ret = CAAM_FAILURE;
85 
86 	SM_TRACE("Free Secure Memory pages %u to %u from partition %u",
87 		 page_desc->page, page_desc->page + page_desc->page_count,
88 		 page_desc->partition);
89 
90 	/*
91 	 * De-allocate partition. It automatically releases partition's pages
92 	 * to the pool of available pages. if the partition if marked as CSP,
93 	 * pages will be zeroized. If the partition is marked as PSP,
94 	 * partition and pages will not be de-allocated and a PSP will be
95 	 * returned.
96 	 */
97 	if (!caam_hal_sm_prtn_is_owned(sm_privdata.jr_addr,
98 				       page_desc->partition)) {
99 		SM_TRACE("Partition %u not owned by used JR",
100 			 page_desc->partition);
101 		return TEE_ERROR_ACCESS_DENIED;
102 	}
103 
104 	ret = caam_hal_sm_deallocate_pages(sm_privdata.jr_addr, page_desc);
105 	if (ret) {
106 		SM_TRACE("De-alloc pages %u to %u error 0x%" PRIx32,
107 			 page_desc->page,
108 			 page_desc->page + page_desc->page_count, ret);
109 
110 		return ret;
111 	}
112 
113 	ret = caam_hal_sm_deallocate_partition(sm_privdata.jr_addr,
114 					       page_desc->partition);
115 	if (ret) {
116 		SM_TRACE("De-alloc partition %u error 0x%" PRIx32,
117 			 page_desc->partition, ret);
118 		return ret;
119 	}
120 
121 	return CAAM_NO_ERROR;
122 }
123 
124 enum caam_status
125 caam_sm_set_access_perm(const struct caam_sm_page_desc *page_desc,
126 			unsigned int grp1_perm, unsigned int grp2_perm)
127 {
128 	uint32_t grp1 = UINT32_MAX;
129 	uint32_t grp2 = UINT32_MAX;
130 
131 	if (!page_desc)
132 		return CAAM_BAD_PARAM;
133 
134 	/* Check if the partition is already owned */
135 	if (!caam_hal_sm_prtn_is_owned(sm_privdata.jr_addr,
136 				       page_desc->partition)) {
137 		SM_TRACE("Partition %d not owned by current JR",
138 			 page_desc->partition);
139 		return CAAM_FAILURE;
140 	}
141 
142 	/*
143 	 * Set ourself to access Secure Memory group 1 and/or group 2
144 	 * function if @grp1_perm and/or @grp2_perm not equal 0.
145 	 *
146 	 * The Access Group is related to the Job Ring owner setting without
147 	 * the Secure Bit setting already managed by the Job Ring.
148 	 */
149 	if (grp1_perm)
150 		grp1 = JROWN_ARM_NS;
151 
152 	if (grp2_perm)
153 		grp2 = JROWN_ARM_NS;
154 
155 	caam_hal_sm_set_access_group(sm_privdata.jr_addr, page_desc->partition,
156 				     grp1, grp2);
157 	caam_hal_sm_set_access_perm(sm_privdata.jr_addr, page_desc->partition,
158 				    grp1_perm, grp2_perm);
159 
160 	return CAAM_NO_ERROR;
161 }
162 
163 enum caam_status caam_sm_init(struct caam_jrcfg *jrcfg)
164 {
165 	if (!jrcfg)
166 		return CAAM_FAILURE;
167 
168 	sm_privdata.ctrl_addr = jrcfg->base;
169 	sm_privdata.jr_addr = jrcfg->base + jrcfg->offset;
170 	sm_privdata.jr_offset = jrcfg->offset;
171 	sm_privdata.baseaddr = caam_hal_sm_get_base();
172 
173 	if (!sm_privdata.baseaddr)
174 		return CAAM_FAILURE;
175 
176 	SM_TRACE("Secure Memory Base address = 0x%" PRIxVA,
177 		 sm_privdata.baseaddr);
178 	SM_TRACE("CAAM controller address = 0x%" PRIxVA, sm_privdata.ctrl_addr);
179 	SM_TRACE("CAAM Job Ring address = 0x%" PRIxVA, sm_privdata.jr_addr);
180 
181 	return CAAM_NO_ERROR;
182 }
183