xref: /optee_os/core/drivers/crypto/versal/pki/ecc_pki_engine.c (revision d10103ea9c410fa29b3ea69732f561cb370dac98)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (C) 2023 ProvenRun S.A.S
4  */
5 
6 #include <config.h>
7 #include <crypto/crypto_impl.h>
8 #include <drivers/versal_trng.h>
9 #include <drvcrypt.h>
10 #include <drvcrypt_acipher.h>
11 #include <ecc.h>
12 #include <ecc_pki.h>
13 #include <initcall.h>
14 #include <io.h>
15 #include <kernel/delay.h>
16 #include <mm/core_memprot.h>
17 #include <mm/core_mmu.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <tee/cache.h>
21 #include <tee/tee_cryp_utl.h>
22 #include <util.h>
23 
24 #ifdef CFG_VERSAL_RNG_DRV
25 /* PKI Engine, first TRNG instance */
26 #define FPD_PKI_CSR_BASE	UINT64_C(0x20400050000)
27 #define FPD_PKI_TRNG0_BASE	(FPD_PKI_CSR_BASE + 0x1000)
28 #define FPD_PKI_TRNG0_SIZE	0x1000
29 
30 static struct versal_trng ecc_pki_trng = {
31 	.cfg.base = FPD_PKI_TRNG0_BASE,
32 	.cfg.len = FPD_PKI_TRNG0_SIZE,
33 	.cfg.version = TRNG_V2,
34 };
35 
versal_ecc_trng_init(void)36 static TEE_Result versal_ecc_trng_init(void)
37 {
38 	/* configure in hybrid mode with derivative function enabled */
39 	struct trng_usr_cfg usr_cfg = {
40 		.mode = TRNG_HRNG,
41 		.seed_life = CFG_VERSAL_TRNG_SEED_LIFE,
42 		.predict_en = false,
43 		.df_disable = false,
44 		.dfmul = CFG_VERSAL_TRNG_DF_MUL,
45 		.iseed_en = false,
46 		.pstr_en = true,
47 	};
48 
49 	memcpy(usr_cfg.pstr, trng_pers_str, sizeof(trng_pers_str));
50 	return versal_trng_hw_init(&ecc_pki_trng, &usr_cfg);
51 }
52 
versal_ecc_trng_get_random_bytes(void * buf,size_t len)53 static inline TEE_Result versal_ecc_trng_get_random_bytes(void *buf, size_t len)
54 {
55 	return versal_trng_get_random_bytes(&ecc_pki_trng, buf, len);
56 }
57 #else
versal_ecc_trng_init(void)58 static TEE_Result versal_ecc_trng_init(void)
59 {
60 	return TEE_ERROR_NOT_SUPPORTED;
61 }
62 
versal_ecc_trng_get_random_bytes(void * buf,size_t len)63 static inline TEE_Result versal_ecc_trng_get_random_bytes(void *buf, size_t len)
64 {
65 	return crypto_rng_read(buf, len);
66 }
67 #endif
68 
69 #define FPD_SLCR_BASEADDR		0xEC8C0000
70 #define FPD_SLCR_SIZE			0x4000
71 
72 #define FPD_SLCR_WPROT0_OFFSET		0x00000000
73 #define FPD_SLCR_PKI_MUX_SEL_OFFSET	0x00002000
74 
75 #define FPD_CLEAR_WRITE_PROTECT		0
76 #define FPD_ENABLE_WRITE_PROTECT	1
77 
78 #define PKI_MUX_SEL_MASK		0x00000001
79 #define PKI_MUX_SELECT			0x00000001
80 
81 #define PSX_CRF_RST_PKI			0xEC200340
82 
83 #define PKI_ASSERT_RESET		1
84 #define PKI_DEASSERT_RESET		0
85 
86 #define FPD_PKI_CRYPTO_BASEADDR		UINT64_C(0x20400000000)
87 #define FPD_PKI_CTRLSTAT_BASEADDR	UINT64_C(0x20400050000)
88 
89 #define FPD_PKI_SIZE			0x30000
90 
91 #define PKI_ENGINE_GEN_CTRL_OFFSET	UINT64_C(0x00000000)
92 #define PKI_ENGINE_GEN_CTRL_TZ		0x100
93 
94 #define PKI_ENGINE_CTRL_OFFSET		UINT64_C(0x00000C00)
95 #define PKI_ENGINE_CTRL_CM_MASK		0x1
96 
97 #define PKI_CRYPTO_SOFT_RESET_OFFSET	UINT64_C(0x00000038)
98 #define PKI_CRYPTO_IRQ_STATUS_OFFSET	UINT64_C(0x00000088)
99 #define PKI_CRYPTO_IRQ_ENABLE_OFFSET	UINT64_C(0x00000090)
100 #define PKI_CRYPTO_IRQ_RESET_OFFSET	UINT64_C(0x000000A0)
101 #define PKI_RQ_CFG_PAGE_ADDR_IN_OFFSET	UINT64_C(0x00000100)
102 #define PKI_RQ_CFG_PAGE_ADDR_OUT_OFFSET	UINT64_C(0x00000108)
103 #define PKI_RQ_CFG_PAGE_SIZE_OFFSET	UINT64_C(0x00000120)
104 #define PKI_RQ_CFG_CQID_OFFSET		UINT64_C(0x00000128)
105 #define PKI_RQ_CFG_PERMISSIONS_OFFSET	UINT64_C(0x00000130)
106 #define PKI_RQ_CFG_QUEUE_DEPTH_OFFSET	UINT64_C(0x00000140)
107 #define PKI_CQ_CFG_ADDR_OFFSET		UINT64_C(0x00001100)
108 #define PKI_CQ_CFG_SIZE_OFFSET		UINT64_C(0x00001108)
109 #define PKI_CQ_CFG_IRQ_IDX_OFFSET	UINT64_C(0x00001110)
110 #define PKI_RQ_CTL_NEW_REQUEST_OFFSET	UINT64_C(0x00002000)
111 #define PKI_CQ_CTL_TRIGPOS_OFFSET	UINT64_C(0x00002028)
112 
113 #define PKI_RQ_CFG_PERMISSIONS_SAFE	0x0
114 #define PKI_RQ_CFG_PAGE_SIZE_4096	0xc
115 #define PKI_RQ_CFG_CQID			0x0
116 #define	PKI_CQ_CFG_SIZE_4096		0xC
117 #define PKI_CQ_CFG_IRQ_ID_VAL		0x0
118 #define PKI_RQ_CFG_QUEUE_DEPTH_VAL	0x80
119 #define PKI_IRQ_ENABLE_VAL		0xFFFF
120 #define PKI_CQ_CTL_TRIGPOS_VAL		0x201
121 
122 #define PKI_IRQ_DONE_STATUS_VAL		0x1
123 
124 #define PKI_NEW_REQUEST_MASK		0x00000FFF
125 
126 #define PKI_MAX_RETRY_COUNT		10000
127 
128 #define PKI_QUEUE_BUF_SIZE		0x1000
129 
130 struct versal_pki {
131 	vaddr_t regs;
132 
133 	uint8_t *rq_in;
134 	uint8_t *rq_out;
135 	uint8_t *cq;
136 };
137 
138 static struct versal_pki versal_pki;
139 
140 /*
141  * PKI Engine Descriptors
142  */
143 
144 #define PKI_DESC_LEN_BYTES		0x20
145 
146 #define PKI_DESC_TAG_START		0x00000002
147 #define PKI_DESC_TAG_TFRI(sz)		((sz) << 16 | 0x0006)
148 #define PKI_DESC_TAG_TFRO(sz)		((sz) << 16 | 0x000E)
149 #define PKI_DESC_TAG_NTFY(id)		((id) << 16 | 0x0016)
150 
151 #define PKI_DESC_OPTYPE_MOD_ADD		0x01
152 #define PKI_DESC_OPTYPE_ECC_POINTMUL	0x22
153 #define PKI_DESC_OPTYPE_ECDSA_SIGN	0x30
154 #define PKI_DESC_OPTYPE_ECDSA_VERIFY	0x31
155 
156 #define PKI_DESC_ECC_FIELD_GFP		0x0
157 
158 #define PKI_DESC_OPSIZE_P256		0x1F
159 #define PKI_DESC_OPSIZE_P384		0x2F
160 #define PKI_DESC_OPSIZE_P521		0x41
161 
162 #define PKI_DESC_SELCURVE_P256		0x1
163 #define PKI_DESC_SELCURVE_P384		0x2
164 #define PKI_DESC_SELCURVE_P521		0x3
165 
166 #define PKI_DESC_TAG_START_CMD(op, opsize, selcurve, field) \
167 	((op) | ((field) << 7) | ((opsize) << 8) | ((selcurve) << 20))
168 
169 #define PKI_SIGN_INPUT_OP_COUNT		3
170 #define PKI_VERIFY_INPUT_OP_COUNT	5
171 #define PKI_MOD_ADD_INPUT_OP_COUNT	3
172 #define PKI_ECC_POINTMUL_INPUT_OP_COUNT	3
173 
174 #define PKI_SIGN_OUTPUT_OP_COUNT		2
175 #define PKI_VERIFY_OUTPUT_OP_COUNT		0
176 #define PKI_MOD_ADD_OUTPUT_OP_COUNT		1
177 #define PKI_ECC_POINTMUL_OUTPUT_OP_COUNT	2
178 
179 #define PKI_SIGN_P521_PADD_BYTES	2
180 #define PKI_VERIFY_P521_PADD_BYTES	6
181 
182 /*
183  * PKI Engine Completions
184  */
185 
186 #define PKI_PK_STATUS_INTERNALDEBUG		GENMASK_32(3, 0)
187 #define PKI_PK_STATUS_POINTNOTONCURVE		BIT(4)
188 #define PKI_PK_STATUS_UNUSED1			BIT(5)
189 #define PKI_PK_STATUS_OUTOFRANGE		BIT(6)
190 #define PKI_PK_STATUS_INVALIDMODULUS		BIT(7)
191 #define PKI_PK_STATUS_NOTIMPLEMENTED		BIT(8)
192 #define PKI_PK_STATUS_SIGNATURENOTVALID		BIT(9)
193 #define PKI_PK_STATUS_PARAMNOTVALID		BIT(10)
194 #define PKI_PK_STATUS_NOTINVERTIBLE		BIT(11)
195 #define PKI_PK_STATUS_COMPOSITE			BIT(12)
196 #define PKI_PK_STATUS_NOTQUADRATICRESIDUE	BIT(13)
197 #define PKI_PK_STATUS_UNUSED2			GENMASK_32(15, 14)
198 #define PKI_PK_STATUS_ERROR_MASK \
199 	(PKI_PK_STATUS_POINTNOTONCURVE | PKI_PK_STATUS_OUTOFRANGE | \
200 	 PKI_PK_STATUS_INVALIDMODULUS | PKI_PK_STATUS_NOTIMPLEMENTED | \
201 	 PKI_PK_STATUS_SIGNATURENOTVALID | PKI_PK_STATUS_PARAMNOTVALID | \
202 	 PKI_PK_STATUS_NOTINVERTIBLE | PKI_PK_STATUS_NOTQUADRATICRESIDUE)
203 
204 #define PKI_CQ_VALUE_SRC		BIT(1)
205 #define PKI_CQ_VALUE_GEN		BIT(0)
206 #define PKI_CQ_VALUE_REQID_SHIFT	16
207 #define PKI_CQ_VALUE_REQID_MASK	\
208 	GENMASK_32(PKI_CQ_VALUE_REQID_SHIFT + 15, PKI_CQ_VALUE_REQID_SHIFT)
209 
210 #define PKI_DEFAULT_REQID		UINT16_C(0xB04EU)
211 
212 #define PKI_EXPECTED_CQ_VALUE \
213 	(SHIFT_U32(PKI_DEFAULT_REQID, PKI_CQ_VALUE_REQID_SHIFT) | \
214 	 PKI_CQ_VALUE_GEN)
215 
216 #define PKI_RESET_DELAY_US		10
217 
pki_get_opsize(uint32_t curve,uint32_t op,size_t * in_sz,size_t * out_sz)218 static TEE_Result pki_get_opsize(uint32_t curve, uint32_t op, size_t *in_sz,
219 				 size_t *out_sz)
220 {
221 	TEE_Result ret = TEE_SUCCESS;
222 	size_t bits = 0;
223 	size_t bytes = 0;
224 
225 	ret = pki_ecc_get_key_size(curve, &bytes, &bits);
226 	if (ret)
227 		return ret;
228 
229 	switch (op) {
230 	case PKI_DESC_OPTYPE_ECDSA_SIGN:
231 		*in_sz = bytes * PKI_SIGN_INPUT_OP_COUNT;
232 		*out_sz = bytes * PKI_SIGN_OUTPUT_OP_COUNT;
233 		break;
234 	case PKI_DESC_OPTYPE_ECDSA_VERIFY:
235 		*in_sz = bytes * PKI_VERIFY_INPUT_OP_COUNT;
236 		*out_sz = bytes * PKI_VERIFY_OUTPUT_OP_COUNT;
237 		break;
238 	case PKI_DESC_OPTYPE_MOD_ADD:
239 		*in_sz = bytes * PKI_MOD_ADD_INPUT_OP_COUNT;
240 		*out_sz = bytes * PKI_MOD_ADD_OUTPUT_OP_COUNT;
241 		break;
242 	case PKI_DESC_OPTYPE_ECC_POINTMUL:
243 		*in_sz = bytes * PKI_ECC_POINTMUL_INPUT_OP_COUNT;
244 		*out_sz = bytes * PKI_ECC_POINTMUL_OUTPUT_OP_COUNT;
245 		break;
246 	default:
247 		return TEE_ERROR_NOT_SUPPORTED;
248 	}
249 
250 	return TEE_SUCCESS;
251 }
252 
pki_build_descriptors(uint32_t curve,uint32_t op,uint32_t * descs)253 static TEE_Result pki_build_descriptors(uint32_t curve, uint32_t op,
254 					uint32_t *descs)
255 {
256 	TEE_Result ret = TEE_SUCCESS;
257 	size_t in_sz = 0;
258 	size_t out_sz = 0;
259 	uint32_t opsize = 0;
260 	uint32_t selcurve = 0;
261 
262 	ret = pki_get_opsize(curve, op, &in_sz, &out_sz);
263 	if (ret)
264 		return ret;
265 
266 	switch (curve) {
267 	case TEE_ECC_CURVE_NIST_P256:
268 		opsize = PKI_DESC_OPSIZE_P256;
269 		selcurve = PKI_DESC_SELCURVE_P256;
270 		break;
271 	case TEE_ECC_CURVE_NIST_P384:
272 		opsize = PKI_DESC_OPSIZE_P384;
273 		selcurve = PKI_DESC_SELCURVE_P384;
274 		break;
275 	case TEE_ECC_CURVE_NIST_P521:
276 		opsize = PKI_DESC_OPSIZE_P521;
277 		selcurve = PKI_DESC_SELCURVE_P521;
278 		break;
279 	default:
280 		return TEE_ERROR_NOT_SUPPORTED;
281 	}
282 
283 	/* SelCurve must be zero for ModAdd */
284 	if (op == PKI_DESC_OPTYPE_MOD_ADD)
285 		selcurve = 0;
286 
287 	descs[0] = PKI_DESC_TAG_START;
288 	descs[1] = PKI_DESC_TAG_START_CMD(op, opsize, selcurve,
289 					  PKI_DESC_ECC_FIELD_GFP);
290 	descs[2] = PKI_DESC_TAG_TFRI(in_sz);
291 	descs[3] = 0;
292 	descs[4] = PKI_DESC_TAG_TFRO(out_sz);
293 	descs[5] = PKI_QUEUE_BUF_SIZE;
294 	descs[6] = PKI_DESC_TAG_NTFY(PKI_DEFAULT_REQID);
295 	descs[7] = 0;
296 
297 	return TEE_SUCCESS;
298 }
299 
pki_start_operation(uint32_t reqval)300 static TEE_Result pki_start_operation(uint32_t reqval)
301 {
302 	/* Soft reset */
303 	io_write32(versal_pki.regs + PKI_CRYPTO_SOFT_RESET_OFFSET, 1);
304 	udelay(PKI_RESET_DELAY_US);
305 	io_write32(versal_pki.regs + PKI_CRYPTO_SOFT_RESET_OFFSET, 0);
306 
307 	cache_operation(TEE_CACHEFLUSH, versal_pki.rq_in, PKI_QUEUE_BUF_SIZE);
308 	cache_operation(TEE_CACHEFLUSH, versal_pki.rq_out, PKI_QUEUE_BUF_SIZE);
309 	cache_operation(TEE_CACHEFLUSH, versal_pki.cq, PKI_QUEUE_BUF_SIZE);
310 
311 	io_write32(versal_pki.regs + PKI_RQ_CFG_PERMISSIONS_OFFSET,
312 		   PKI_RQ_CFG_PERMISSIONS_SAFE);
313 	io_write64(versal_pki.regs + PKI_RQ_CFG_PAGE_ADDR_IN_OFFSET,
314 		   virt_to_phys(versal_pki.rq_in));
315 	io_write64(versal_pki.regs + PKI_RQ_CFG_PAGE_ADDR_OUT_OFFSET,
316 		   virt_to_phys(versal_pki.rq_out));
317 	io_write64(versal_pki.regs + PKI_CQ_CFG_ADDR_OFFSET,
318 		   virt_to_phys(versal_pki.cq));
319 	io_write32(versal_pki.regs + PKI_RQ_CFG_PAGE_SIZE_OFFSET,
320 		   PKI_RQ_CFG_PAGE_SIZE_4096);
321 	io_write32(versal_pki.regs + PKI_RQ_CFG_CQID_OFFSET, PKI_RQ_CFG_CQID);
322 	io_write32(versal_pki.regs + PKI_CQ_CFG_SIZE_OFFSET,
323 		   PKI_CQ_CFG_SIZE_4096);
324 	io_write32(versal_pki.regs + PKI_CQ_CFG_IRQ_IDX_OFFSET,
325 		   PKI_CQ_CFG_IRQ_ID_VAL);
326 	io_write32(versal_pki.regs + PKI_RQ_CFG_QUEUE_DEPTH_OFFSET,
327 		   PKI_RQ_CFG_QUEUE_DEPTH_VAL);
328 	io_write64(versal_pki.regs + PKI_CRYPTO_IRQ_ENABLE_OFFSET,
329 		   PKI_IRQ_ENABLE_VAL);
330 
331 	io_write32(versal_pki.regs + PKI_CQ_CTL_TRIGPOS_OFFSET,
332 		   PKI_CQ_CTL_TRIGPOS_VAL);
333 	io_write64(versal_pki.regs + PKI_RQ_CTL_NEW_REQUEST_OFFSET, reqval);
334 
335 	/* Wait for completion */
336 	for (unsigned int retries = 0; retries < PKI_MAX_RETRY_COUNT;
337 	     retries++) {
338 		uint64_t irq_status = io_read64(versal_pki.regs +
339 						PKI_CRYPTO_IRQ_STATUS_OFFSET);
340 		if (irq_status == PKI_IRQ_DONE_STATUS_VAL) {
341 			io_write64(versal_pki.regs +
342 				   PKI_CRYPTO_IRQ_RESET_OFFSET,
343 				   PKI_IRQ_DONE_STATUS_VAL);
344 			return TEE_SUCCESS;
345 		}
346 	}
347 
348 	return TEE_ERROR_TIMEOUT;
349 }
350 
pki_check_status(void)351 static TEE_Result pki_check_status(void)
352 {
353 	uint32_t cq_status = 0;
354 	uint32_t cq_value = 0;
355 
356 	cache_operation(TEE_CACHEFLUSH, versal_pki.cq, PKI_QUEUE_BUF_SIZE);
357 
358 	cq_status = io_read32((vaddr_t)versal_pki.cq);
359 	cq_value = io_read32((vaddr_t)versal_pki.cq + 4);
360 
361 	if (cq_value != PKI_EXPECTED_CQ_VALUE) {
362 		if (!(cq_value & PKI_CQ_VALUE_GEN))
363 			EMSG("PKI bad completion, not marked as new");
364 		else if ((cq_value & PKI_CQ_VALUE_REQID_MASK) !=
365 			 SHIFT_U32(PKI_DEFAULT_REQID, PKI_CQ_VALUE_REQID_SHIFT))
366 			EMSG("PKI bad completion, unexpected request id 0x%04x",
367 			     (cq_value & PKI_CQ_VALUE_REQID_MASK) >>
368 			     PKI_CQ_VALUE_REQID_SHIFT);
369 
370 		if (cq_value & PKI_CQ_VALUE_SRC)
371 			EMSG("PKI bad completion, DMA or Scheduler error");
372 
373 		EMSG("PKI bad completion, CQ VALUE: 0x%08x, STATUS: 0x%04x",
374 		     cq_value, cq_status);
375 
376 		return TEE_ERROR_GENERIC;
377 	}
378 
379 	if (cq_status & PKI_PK_STATUS_ERROR_MASK) {
380 		if (cq_status & PKI_PK_STATUS_NOTIMPLEMENTED)
381 			return TEE_ERROR_NOT_SUPPORTED;
382 		if (cq_status & PKI_PK_STATUS_SIGNATURENOTVALID)
383 			return TEE_ERROR_SIGNATURE_INVALID;
384 
385 		EMSG("PKI Engine error: PK STATUS: 0x%04x", cq_status);
386 
387 		if (cq_status & (PKI_PK_STATUS_POINTNOTONCURVE |
388 				 PKI_PK_STATUS_OUTOFRANGE |
389 				 PKI_PK_STATUS_INVALIDMODULUS |
390 				 PKI_PK_STATUS_PARAMNOTVALID |
391 				 PKI_PK_STATUS_NOTINVERTIBLE |
392 				 PKI_PK_STATUS_NOTQUADRATICRESIDUE))
393 			return TEE_ERROR_BAD_PARAMETERS;
394 
395 		return TEE_ERROR_GENERIC;
396 	}
397 
398 	return TEE_SUCCESS;
399 }
400 
versal_ecc_verify(uint32_t algo,struct ecc_public_key * key,const uint8_t * msg,size_t msg_len,const uint8_t * sig,size_t sig_len)401 TEE_Result versal_ecc_verify(uint32_t algo, struct ecc_public_key *key,
402 			     const uint8_t *msg, size_t msg_len,
403 			     const uint8_t *sig, size_t sig_len)
404 {
405 	TEE_Result ret = TEE_SUCCESS;
406 	size_t bits = 0;
407 	size_t bytes = 0;
408 	size_t len = 0;
409 	uint8_t *addr = versal_pki.rq_in;
410 
411 	ret = pki_ecc_get_key_size(key->curve, &bytes, &bits);
412 	if (ret)
413 		return ret;
414 
415 	/*
416 	 * NOTE: Amount of data written into PKI request queue
417 	 *       (PKI_QUEUE_BUF_SIZE) is - in case of signature verification -
418 	 *       at most (NIST P521):
419 	 *
420 	 *         public key (2x 66 Bytes)
421 	 *       + signature (2x 66 Bytes)
422 	 *       + message/hash (SHA2-512: 64 Bytes) + padding (2 Bytes)
423 	 *       + P521 padding for uint32_t (6 Bytes)
424 	 *       + 8 descriptor words (8x 4 Bytes)
425 	 *       = 368 Bytes
426 	 */
427 
428 	/* Copy public key */
429 	pki_crypto_bignum_bn2bin_eswap(key->curve, key->x, addr);
430 	addr += bytes;
431 	pki_crypto_bignum_bn2bin_eswap(key->curve, key->y, addr);
432 	addr += bytes;
433 
434 	/* Copy signature */
435 	pki_memcpy_swp(addr, sig, sig_len / 2);
436 	addr += sig_len / 2;
437 	pki_memcpy_swp(addr, sig + sig_len / 2, sig_len / 2);
438 	addr += sig_len / 2;
439 
440 	/* Copy hash */
441 	ret = pki_ecc_prepare_msg(algo, msg, msg_len, &len, addr);
442 	if (ret)
443 		return ret;
444 	if (len < bytes)
445 		memset(addr + len, 0, bytes - len);
446 	addr += bytes;
447 
448 	if (key->curve == TEE_ECC_CURVE_NIST_P521) {
449 		memset(addr, 0, PKI_VERIFY_P521_PADD_BYTES);
450 		addr += PKI_VERIFY_P521_PADD_BYTES;
451 	}
452 
453 	/* Build descriptors */
454 	if (!IS_ALIGNED_WITH_TYPE(addr, uint64_t))
455 		return TEE_ERROR_BAD_PARAMETERS;
456 
457 	ret = pki_build_descriptors(key->curve, PKI_DESC_OPTYPE_ECDSA_VERIFY,
458 				    (void *)addr);
459 	if (ret)
460 		return ret;
461 
462 	ret = pki_start_operation(PKI_NEW_REQUEST_MASK & (vaddr_t)addr);
463 	if (ret)
464 		return ret;
465 
466 	ret = pki_check_status();
467 	if (ret)
468 		return ret;
469 
470 	/* Clear memory */
471 	memset(versal_pki.rq_in, 0, PKI_QUEUE_BUF_SIZE);
472 	memset(versal_pki.cq, 0, PKI_QUEUE_BUF_SIZE);
473 
474 	return TEE_SUCCESS;
475 }
476 
versal_ecc_sign(uint32_t algo,struct ecc_keypair * key,const uint8_t * msg,size_t msg_len,uint8_t * sig,size_t * sig_len)477 TEE_Result versal_ecc_sign(uint32_t algo, struct ecc_keypair *key,
478 			   const uint8_t *msg, size_t msg_len,
479 			   uint8_t *sig, size_t *sig_len)
480 {
481 	TEE_Result ret = TEE_SUCCESS;
482 	size_t bits = 0;
483 	size_t bytes = 0;
484 	struct ecc_keypair ephemeral = { };
485 
486 	ret = pki_ecc_get_key_size(key->curve, &bytes, &bits);
487 	if (ret)
488 		return ret;
489 
490 	/* Ephemeral private key */
491 	ret = drvcrypt_asym_alloc_ecc_keypair(&ephemeral,
492 					      TEE_TYPE_ECDSA_KEYPAIR, bits);
493 	if (ret) {
494 		EMSG("Versal, can't allocate the ephemeral key");
495 		return ret;
496 	}
497 
498 	ephemeral.curve = key->curve;
499 	ret = versal_ecc_gen_keypair(&ephemeral);
500 	if (ret) {
501 		EMSG("Versal, can't generate the ephemeral key");
502 		goto out;
503 	}
504 
505 	ret = versal_ecc_sign_ephemeral(algo, bytes, key, &ephemeral, msg,
506 					msg_len, sig, sig_len);
507 
508 out:
509 	crypto_bignum_free(&ephemeral.d);
510 	crypto_bignum_free(&ephemeral.x);
511 	crypto_bignum_free(&ephemeral.y);
512 
513 	return ret;
514 }
515 
versal_ecc_sign_ephemeral(uint32_t algo,size_t bytes,struct ecc_keypair * key,struct ecc_keypair * ephemeral,const uint8_t * msg,size_t msg_len,uint8_t * sig,size_t * sig_len)516 TEE_Result versal_ecc_sign_ephemeral(uint32_t algo, size_t bytes,
517 				     struct ecc_keypair *key,
518 				     struct ecc_keypair *ephemeral,
519 				     const uint8_t *msg, size_t msg_len,
520 				     uint8_t *sig, size_t *sig_len)
521 {
522 	TEE_Result ret = TEE_SUCCESS;
523 	size_t len = 0;
524 	uint8_t *addr = versal_pki.rq_in;
525 
526 	/*
527 	 * NOTE: Amount of data written into PKI request queue
528 	 *       (PKI_QUEUE_BUF_SIZE) is - in case of signature generation - at
529 	 *       most (NIST P521):
530 	 *
531 	 *         private key (66 Bytes)
532 	 *       + ephemeral key (66 Bytes)
533 	 *       + message/hash (SHA2-512: 64 Bytes) + padding (2 Bytes)
534 	 *       + P521 padding for uint32_t (2 Bytes)
535 	 *       + 8 descriptor words (8x 4 Bytes)
536 	 *       = 232 Bytes
537 	 */
538 
539 	/* Copy private key */
540 	pki_crypto_bignum_bn2bin_eswap(key->curve, key->d, addr);
541 	addr += bytes;
542 
543 	/* Copy ephemeral key */
544 	pki_crypto_bignum_bn2bin_eswap(key->curve, ephemeral->d,
545 				       (uint8_t *)addr);
546 	addr += bytes;
547 
548 	/* Copy hash */
549 	ret = pki_ecc_prepare_msg(algo, msg, msg_len, &len, addr);
550 	if (ret)
551 		return ret;
552 	if (len < bytes)
553 		memset(addr + len, 0, bytes - len);
554 	addr += bytes;
555 
556 	if (key->curve == TEE_ECC_CURVE_NIST_P521) {
557 		memset(addr, 0, PKI_SIGN_P521_PADD_BYTES);
558 		addr += PKI_SIGN_P521_PADD_BYTES;
559 	}
560 
561 	/* Build descriptors */
562 	if (!IS_ALIGNED_WITH_TYPE(addr, uint64_t))
563 		return TEE_ERROR_BAD_PARAMETERS;
564 
565 	ret = pki_build_descriptors(key->curve, PKI_DESC_OPTYPE_ECDSA_SIGN,
566 				    (void *)addr);
567 	if (ret)
568 		return ret;
569 
570 	ret = pki_start_operation(PKI_NEW_REQUEST_MASK & (vaddr_t)addr);
571 	if (ret)
572 		return ret;
573 
574 	ret = pki_check_status();
575 	if (ret)
576 		return ret;
577 
578 	/* Copy signature back */
579 	*sig_len = 2 * bytes;
580 
581 	cache_operation(TEE_CACHEFLUSH, versal_pki.rq_out, PKI_QUEUE_BUF_SIZE);
582 
583 	pki_memcpy_swp(sig, versal_pki.rq_out, bytes);
584 	pki_memcpy_swp(sig + bytes, versal_pki.rq_out + bytes, bytes);
585 
586 	/* Clear memory */
587 	memset(versal_pki.rq_in, 0, PKI_QUEUE_BUF_SIZE);
588 	memset(versal_pki.rq_out, 0, PKI_QUEUE_BUF_SIZE);
589 	memset(versal_pki.cq, 0, PKI_QUEUE_BUF_SIZE);
590 
591 	return ret;
592 }
593 
594 static const uint8_t order_p256[] = {
595 	0x51, 0x25, 0x63, 0xfc, 0xc2, 0xca, 0xb9, 0xf3,
596 	0x84, 0x9e, 0x17, 0xa7, 0xad, 0xfa, 0xe6, 0xbc,
597 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
598 	0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
599 };
600 
601 static const uint8_t order_p384[] = {
602 	0x73, 0x29, 0xc5, 0xcc, 0x6a, 0x19, 0xec, 0xec,
603 	0x7a, 0xa7, 0xb0, 0x48, 0xb2, 0x0d, 0x1a, 0x58,
604 	0xdf, 0x2d, 0x37, 0xf4, 0x81, 0x4d, 0x63, 0xc7,
605 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
607 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 };
609 
610 static const uint8_t order_p521[] = {
611 	0x09, 0x64, 0x38, 0x91, 0x1e, 0xb7, 0x6f, 0xbb,
612 	0xae, 0x47, 0x9c, 0x89, 0xb8, 0xc9, 0xb5, 0x3b,
613 	0xd0, 0xa5, 0x09, 0xf7, 0x48, 0x01, 0xcc, 0x7f,
614 	0x6b, 0x96, 0x2f, 0xbf, 0x83, 0x87, 0x86, 0x51,
615 	0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
616 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
617 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
618 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
619 	0xff, 0x01
620 };
621 
622 static const uint8_t ecdsa_gpoint_p256_gx[] = {
623 	0x96, 0xc2, 0x98, 0xd8, 0x45, 0x39, 0xa1, 0xf4,
624 	0xa0, 0x33, 0xeb, 0x2d, 0x81, 0x7d, 0x03, 0x77,
625 	0xf2, 0x40, 0xa4, 0x63, 0xe5, 0xe6, 0xbc, 0xf8,
626 	0x47, 0x42, 0x2c, 0xe1, 0xf2, 0xd1, 0x17, 0x6b
627 };
628 
629 static const uint8_t ecdsa_gpoint_p256_gy[] = {
630 	0xf5, 0x51, 0xbf, 0x37, 0x68, 0x40, 0xb6, 0xcb,
631 	0xce, 0x5e, 0x31, 0x6b, 0x57, 0x33, 0xce, 0x2b,
632 	0x16, 0x9e, 0x0f, 0x7c, 0x4a, 0xeb, 0xe7, 0x8e,
633 	0x9b, 0x7f, 0x1a, 0xfe, 0xe2, 0x42, 0xe3, 0x4f
634 };
635 
636 static const uint8_t ecdsa_gpoint_p384_gx[] = {
637 	0xb7, 0x0a, 0x76, 0x72, 0x38, 0x5e, 0x54, 0x3a,
638 	0x6c, 0x29, 0x55, 0xbf, 0x5d, 0xf2, 0x02, 0x55,
639 	0x38, 0x2a, 0x54, 0x82, 0xe0, 0x41, 0xf7, 0x59,
640 	0x98, 0x9b, 0xa7, 0x8b, 0x62, 0x3b, 0x1d, 0x6e,
641 	0x74, 0xad, 0x20, 0xf3, 0x1e, 0xc7, 0xb1, 0x8e,
642 	0x37, 0x05, 0x8b, 0xbe, 0x22, 0xca, 0x87, 0xaa
643 };
644 
645 static const uint8_t ecdsa_gpoint_p384_gy[] = {
646 	0x5f, 0x0e, 0xea, 0x90, 0x7c, 0x1d, 0x43, 0x7a,
647 	0x9d, 0x81, 0x7e, 0x1d, 0xce, 0xb1, 0x60, 0x0a,
648 	0xc0, 0xb8, 0xf0, 0xb5, 0x13, 0x31, 0xda, 0xe9,
649 	0x7c, 0x14, 0x9a, 0x28, 0xbd, 0x1d, 0xf4, 0xf8,
650 	0x29, 0xdc, 0x92, 0x92, 0xbf, 0x98, 0x9e, 0x5d,
651 	0x6f, 0x2c, 0x26, 0x96, 0x4a, 0xde, 0x17, 0x36
652 };
653 
654 static const uint8_t ecdsa_gpoint_p521_gx[] = {
655 	0x66, 0xbd, 0xe5, 0xc2, 0x31, 0x7e, 0x7e, 0xf9,
656 	0x9b, 0x42, 0x6a, 0x85, 0xc1, 0xb3, 0x48, 0x33,
657 	0xde, 0xa8, 0xff, 0xa2, 0x27, 0xc1, 0x1d, 0xfe,
658 	0x28, 0x59, 0xe7, 0xef, 0x77, 0x5e, 0x4b, 0xa1,
659 	0xba, 0x3d, 0x4d, 0x6b, 0x60, 0xaf, 0x28, 0xf8,
660 	0x21, 0xb5, 0x3f, 0x05, 0x39, 0x81, 0x64, 0x9c,
661 	0x42, 0xb4, 0x95, 0x23, 0x66, 0xcb, 0x3e, 0x9e,
662 	0xcd, 0xe9, 0x04, 0x04, 0xb7, 0x06, 0x8e, 0x85,
663 	0xc6, 0x00
664 };
665 
666 static const uint8_t ecdsa_gpoint_p521_gy[] = {
667 	0x50, 0x66, 0xd1, 0x9f, 0x76, 0x94, 0xbe, 0x88,
668 	0x40, 0xc2, 0x72, 0xa2, 0x86, 0x70, 0x3c, 0x35,
669 	0x61, 0x07, 0xad, 0x3f, 0x01, 0xb9, 0x50, 0xc5,
670 	0x40, 0x26, 0xf4, 0x5e, 0x99, 0x72, 0xee, 0x97,
671 	0x2c, 0x66, 0x3e, 0x27, 0x17, 0xbd, 0xaf, 0x17,
672 	0x68, 0x44, 0x9b, 0x57, 0x49, 0x44, 0xf5, 0x98,
673 	0xd9, 0x1b, 0x7d, 0x2c, 0xb4, 0x5f, 0x8a, 0x5c,
674 	0x04, 0xc0, 0x3b, 0x9a, 0x78, 0x6a, 0x29, 0x39,
675 	0x18, 0x01
676 };
677 
versal_ecc_gen_private_key(uint32_t curve,uint8_t * priv,size_t bytes)678 static TEE_Result versal_ecc_gen_private_key(uint32_t curve, uint8_t *priv,
679 					     size_t bytes)
680 {
681 	TEE_Result ret = TEE_SUCCESS;
682 	const uint8_t *order = NULL;
683 	uint8_t *addr = versal_pki.rq_in;
684 
685 	switch (curve) {
686 	case TEE_ECC_CURVE_NIST_P256:
687 		order = order_p256;
688 		break;
689 	case TEE_ECC_CURVE_NIST_P384:
690 		order = order_p384;
691 		break;
692 	case TEE_ECC_CURVE_NIST_P521:
693 		order = order_p521;
694 		break;
695 	default:
696 		return TEE_ERROR_NOT_SUPPORTED;
697 	}
698 
699 	/*
700 	 * NOTE: Amount of data written into PKI request queue
701 	 *       (PKI_QUEUE_BUF_SIZE) is - in case of signature generation - at
702 	 *       most (NIST P521):
703 	 *
704 	 *         order n (66 Bytes)
705 	 *       + random number (66 Bytes)
706 	 *       + constant 1 (66 Bytes)
707 	 *       + P521 padding for uint32_t (2 Bytes)
708 	 *       + 8 descriptor words (8x 4 Bytes)
709 	 *       = 232 Bytes
710 	 */
711 
712 	/* Copy curve order N */
713 	memcpy(addr, order, bytes);
714 	addr += bytes;
715 
716 	/* Copy A = random */
717 	ret = versal_ecc_trng_get_random_bytes(addr, bytes);
718 	if (ret)
719 		return ret;
720 	addr += bytes;
721 
722 	/* Copy B = 1 */
723 	addr[0] = 1;
724 	memset(addr + 1, 0, bytes - 1);
725 	addr += bytes;
726 
727 	if (curve == TEE_ECC_CURVE_NIST_P521) {
728 		memset(addr, 0, PKI_SIGN_P521_PADD_BYTES);
729 		addr += PKI_SIGN_P521_PADD_BYTES;
730 	}
731 
732 	/* Build descriptors */
733 	if (!IS_ALIGNED_WITH_TYPE(addr, uint64_t))
734 		return TEE_ERROR_BAD_PARAMETERS;
735 
736 	ret = pki_build_descriptors(curve, PKI_DESC_OPTYPE_MOD_ADD,
737 				    (void *)addr);
738 	if (ret)
739 		return ret;
740 
741 	/* Use PKI engine to compute A+B mod N */
742 	ret = pki_start_operation(PKI_NEW_REQUEST_MASK & (vaddr_t)addr);
743 	if (ret)
744 		return ret;
745 
746 	ret = pki_check_status();
747 	if (ret)
748 		return ret;
749 
750 	cache_operation(TEE_CACHEFLUSH, versal_pki.rq_out, PKI_QUEUE_BUF_SIZE);
751 
752 	/* Copy back result */
753 	memcpy(priv, versal_pki.rq_out, bytes);
754 
755 	return TEE_SUCCESS;
756 }
757 
versal_ecc_gen_keypair(struct ecc_keypair * s)758 TEE_Result versal_ecc_gen_keypair(struct ecc_keypair *s)
759 {
760 	TEE_Result ret = TEE_SUCCESS;
761 	size_t bytes = 0;
762 	size_t bits = 0;
763 	const uint8_t *gx = NULL;
764 	const uint8_t *gy = NULL;
765 	uint8_t *addr = versal_pki.rq_in;
766 
767 	ret = pki_ecc_get_key_size(s->curve, &bytes, &bits);
768 	if (ret)
769 		return ret;
770 
771 	switch (s->curve) {
772 	case TEE_ECC_CURVE_NIST_P256:
773 		gx = ecdsa_gpoint_p256_gx;
774 		gy = ecdsa_gpoint_p256_gy;
775 		break;
776 	case TEE_ECC_CURVE_NIST_P384:
777 		gx = ecdsa_gpoint_p384_gx;
778 		gy = ecdsa_gpoint_p384_gy;
779 		break;
780 	case TEE_ECC_CURVE_NIST_P521:
781 		gx = ecdsa_gpoint_p521_gx;
782 		gy = ecdsa_gpoint_p521_gy;
783 		break;
784 	default:
785 		return TEE_ERROR_NOT_SUPPORTED;
786 	}
787 
788 	/*
789 	 * NOTE: Amount of data written into PKI request queue
790 	 *       (PKI_QUEUE_BUF_SIZE) is - in case of signature generation - at
791 	 *       most (NIST P521):
792 	 *
793 	 *         private key (66 Bytes)
794 	 *       + generator point (2x 66 Bytes)
795 	 *       + P521 padding for uint32_t (2 Bytes)
796 	 *       + 8 descriptor words (8x 4 Bytes)
797 	 *       = 232 Bytes
798 	 */
799 
800 	/* Generate private key */
801 	ret = versal_ecc_gen_private_key(s->curve, addr, bytes);
802 	if (ret)
803 		return ret;
804 	addr += bytes;
805 
806 	/* Copy generator point x coordinate */
807 	memcpy(addr, gx, bytes);
808 	addr += bytes;
809 
810 	/* Copy generator point y coordinate */
811 	memcpy(addr, gy, bytes);
812 	addr += bytes;
813 
814 	if (s->curve == TEE_ECC_CURVE_NIST_P521) {
815 		memset(addr, 0, PKI_SIGN_P521_PADD_BYTES);
816 		addr += PKI_SIGN_P521_PADD_BYTES;
817 	}
818 
819 	/* Build descriptors */
820 	if (!IS_ALIGNED_WITH_TYPE(addr, uint64_t))
821 		return TEE_ERROR_BAD_PARAMETERS;
822 
823 	ret = pki_build_descriptors(s->curve, PKI_DESC_OPTYPE_ECC_POINTMUL,
824 				    (void *)addr);
825 	if (ret)
826 		return ret;
827 
828 	/* Use PKI engine to compute Q = priv * G */
829 	ret = pki_start_operation(PKI_NEW_REQUEST_MASK & (vaddr_t)addr);
830 	if (ret)
831 		return ret;
832 
833 	ret = pki_check_status();
834 	if (ret)
835 		return ret;
836 
837 	cache_operation(TEE_CACHEFLUSH, versal_pki.rq_out, PKI_QUEUE_BUF_SIZE);
838 
839 	/* Copy private and public keys back */
840 	pki_crypto_bignum_bin2bn_eswap(versal_pki.rq_in, bytes, s->d);
841 	pki_crypto_bignum_bin2bn_eswap(versal_pki.rq_out, bytes, s->x);
842 	pki_crypto_bignum_bin2bn_eswap(versal_pki.rq_out + bytes, bytes, s->y);
843 
844 	/* Clear memory */
845 	memset(versal_pki.rq_in, 0, PKI_QUEUE_BUF_SIZE);
846 	memset(versal_pki.rq_out, 0, PKI_QUEUE_BUF_SIZE);
847 	memset(versal_pki.cq, 0, PKI_QUEUE_BUF_SIZE);
848 
849 	return TEE_SUCCESS;
850 }
851 
versal_pki_engine_reset(void)852 static TEE_Result versal_pki_engine_reset(void)
853 {
854 	vaddr_t reset = 0;
855 
856 	/* Reset the PKI engine */
857 	reset = (vaddr_t)core_mmu_add_mapping(MEM_AREA_IO_SEC,
858 					      PSX_CRF_RST_PKI,
859 					      SMALL_PAGE_SIZE);
860 	if (!reset)
861 		return TEE_ERROR_GENERIC;
862 
863 	io_write32(reset, PKI_ASSERT_RESET);
864 	udelay(PKI_RESET_DELAY_US);
865 	io_write32(reset, PKI_DEASSERT_RESET);
866 
867 	core_mmu_remove_mapping(MEM_AREA_IO_SEC, (void *)reset,
868 				SMALL_PAGE_SIZE);
869 
870 	return TEE_SUCCESS;
871 }
872 
versal_pki_engine_slcr_config(void)873 static TEE_Result versal_pki_engine_slcr_config(void)
874 {
875 	vaddr_t fpd_slcr = 0;
876 
877 	fpd_slcr = (vaddr_t)core_mmu_add_mapping(MEM_AREA_IO_SEC,
878 						 FPD_SLCR_BASEADDR,
879 						 FPD_SLCR_SIZE);
880 	if (!fpd_slcr)
881 		return TEE_ERROR_GENERIC;
882 
883 	/* Clear FPD SCLR write protect reg */
884 	io_write32(fpd_slcr + FPD_SLCR_WPROT0_OFFSET,
885 		   FPD_CLEAR_WRITE_PROTECT);
886 
887 	/* PKI mux selection */
888 	io_mask32(fpd_slcr + FPD_SLCR_PKI_MUX_SEL_OFFSET,
889 		  PKI_MUX_SELECT, PKI_MUX_SEL_MASK);
890 
891 	/* Re-enable FPD SCLR write protect */
892 	io_write32(fpd_slcr + FPD_SLCR_WPROT0_OFFSET,
893 		   FPD_ENABLE_WRITE_PROTECT);
894 
895 	core_mmu_remove_mapping(MEM_AREA_IO_SEC,
896 				(void *)fpd_slcr, FPD_SLCR_SIZE);
897 
898 	return TEE_SUCCESS;
899 }
900 
versal_pki_engine_config(void)901 static TEE_Result versal_pki_engine_config(void)
902 {
903 	vaddr_t regs = 0;
904 	uint64_t val = 0;
905 
906 	regs = (vaddr_t)core_mmu_add_mapping(MEM_AREA_IO_SEC,
907 					     FPD_PKI_CTRLSTAT_BASEADDR,
908 					     FPD_PKI_SIZE);
909 	if (!regs)
910 		return TEE_ERROR_GENERIC;
911 
912 	/* Counter-measures configuration */
913 	val = io_read64(regs + PKI_ENGINE_CTRL_OFFSET);
914 	if (IS_ENABLED(CFG_VERSAL_PKI_COUNTER_MEASURES))
915 		val &= ~PKI_ENGINE_CTRL_CM_MASK;
916 	else
917 		val |= PKI_ENGINE_CTRL_CM_MASK;
918 	io_write64(regs + PKI_ENGINE_CTRL_OFFSET, val);
919 
920 	/* Mark PKI engine transactions as secure */
921 	val = io_read64(regs + PKI_ENGINE_GEN_CTRL_OFFSET);
922 	val &= ~PKI_ENGINE_GEN_CTRL_TZ;
923 	io_write64(regs + PKI_ENGINE_GEN_CTRL_OFFSET, val);
924 
925 	core_mmu_remove_mapping(MEM_AREA_IO_SEC,
926 				(void *)regs, FPD_PKI_SIZE);
927 
928 	return TEE_SUCCESS;
929 }
930 
versal_ecc_hw_init(void)931 TEE_Result versal_ecc_hw_init(void)
932 {
933 	TEE_Result ret = TEE_SUCCESS;
934 
935 	ret = versal_pki_engine_slcr_config();
936 	if (ret)
937 		return ret;
938 
939 	ret = versal_pki_engine_reset();
940 	if (ret)
941 		return ret;
942 
943 	ret = versal_pki_engine_config();
944 	if (ret)
945 		return ret;
946 
947 	if (IS_ENABLED(CFG_VERSAL_RNG_DRV)) {
948 		ret = versal_ecc_trng_init();
949 		if (ret)
950 			return ret;
951 	}
952 
953 	versal_pki.regs = (vaddr_t)core_mmu_add_mapping(MEM_AREA_IO_SEC,
954 							FPD_PKI_CRYPTO_BASEADDR,
955 							FPD_PKI_SIZE);
956 	if (!versal_pki.regs)
957 		return TEE_ERROR_GENERIC;
958 
959 	/* Allocate queues */
960 	versal_pki.rq_in = memalign(PKI_QUEUE_BUF_SIZE, PKI_QUEUE_BUF_SIZE);
961 	if (!versal_pki.rq_in)
962 		goto error;
963 
964 	versal_pki.rq_out = memalign(PKI_QUEUE_BUF_SIZE, PKI_QUEUE_BUF_SIZE);
965 	if (!versal_pki.rq_out)
966 		goto error;
967 
968 	versal_pki.cq = memalign(PKI_QUEUE_BUF_SIZE, PKI_QUEUE_BUF_SIZE);
969 	if (!versal_pki.cq)
970 		goto error;
971 
972 	return TEE_SUCCESS;
973 
974 error:
975 	free(versal_pki.rq_in);
976 	free(versal_pki.rq_out);
977 
978 	core_mmu_remove_mapping(MEM_AREA_IO_SEC,
979 				(void *)versal_pki.regs, FPD_PKI_SIZE);
980 
981 	return TEE_ERROR_GENERIC;
982 }
983