xref: /optee_os/core/drivers/crypto/stm32/stm32_cryp.c (revision 1d8b1184c370e85e18ef84acf7681f7c20ba7b26)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021, STMicroelectronics - All Rights Reserved
4  */
5 #include <assert.h>
6 #include <config.h>
7 #include <drivers/clk.h>
8 #include <drivers/clk_dt.h>
9 #include <drivers/rstctrl.h>
10 #include <initcall.h>
11 #include <io.h>
12 #include <kernel/boot.h>
13 #include <kernel/delay.h>
14 #include <kernel/dt.h>
15 #include <kernel/dt_driver.h>
16 #include <kernel/mutex.h>
17 #include <libfdt.h>
18 #include <mm/core_memprot.h>
19 #include <stdint.h>
20 #include <stm32_util.h>
21 #include <string.h>
22 #include <utee_defines.h>
23 #include <util.h>
24 
25 #include "stm32_cryp.h"
26 #include "common.h"
27 
28 /* CRYP control register */
29 #define _CRYP_CR			0x0U
30 /* CRYP status register */
31 #define _CRYP_SR			0x04U
32 /* CRYP data input register */
33 #define _CRYP_DIN			0x08U
34 /* CRYP data output register */
35 #define _CRYP_DOUT			0x0CU
36 /* CRYP DMA control register */
37 #define _CRYP_DMACR			0x10U
38 /* CRYP interrupt mask set/clear register */
39 #define _CRYP_IMSCR			0x14U
40 /* CRYP raw interrupt status register */
41 #define _CRYP_RISR			0x18U
42 /* CRYP masked interrupt status register */
43 #define _CRYP_MISR			0x1CU
44 /* CRYP key registers */
45 #define _CRYP_K0LR			0x20U
46 #define _CRYP_K0RR			0x24U
47 #define _CRYP_K1LR			0x28U
48 #define _CRYP_K1RR			0x2CU
49 #define _CRYP_K2LR			0x30U
50 #define _CRYP_K2RR			0x34U
51 #define _CRYP_K3LR			0x38U
52 #define _CRYP_K3RR			0x3CU
53 /* CRYP initialization vector registers */
54 #define _CRYP_IV0LR			0x40U
55 #define _CRYP_IV0RR			0x44U
56 #define _CRYP_IV1LR			0x48U
57 #define _CRYP_IV1RR			0x4CU
58 /* CRYP context swap GCM-CCM registers */
59 #define _CRYP_CSGCMCCM0R		0x50U
60 #define _CRYP_CSGCMCCM1R		0x54U
61 #define _CRYP_CSGCMCCM2R		0x58U
62 #define _CRYP_CSGCMCCM3R		0x5CU
63 #define _CRYP_CSGCMCCM4R		0x60U
64 #define _CRYP_CSGCMCCM5R		0x64U
65 #define _CRYP_CSGCMCCM6R		0x68U
66 #define _CRYP_CSGCMCCM7R		0x6CU
67 /* CRYP context swap GCM registers */
68 #define _CRYP_CSGCM0R			0x70U
69 #define _CRYP_CSGCM1R			0x74U
70 #define _CRYP_CSGCM2R			0x78U
71 #define _CRYP_CSGCM3R			0x7CU
72 #define _CRYP_CSGCM4R			0x80U
73 #define _CRYP_CSGCM5R			0x84U
74 #define _CRYP_CSGCM6R			0x88U
75 #define _CRYP_CSGCM7R			0x8CU
76 /* CRYP hardware configuration register */
77 #define _CRYP_HWCFGR			0x3F0U
78 /* CRYP HW version register */
79 #define _CRYP_VERR			0x3F4U
80 /* CRYP identification */
81 #define _CRYP_IPIDR			0x3F8U
82 /* CRYP HW magic ID */
83 #define _CRYP_MID			0x3FCU
84 
85 #define CRYP_TIMEOUT_US			1000000U
86 #define TIMEOUT_US_1MS			1000U
87 
88 /* CRYP control register fields */
89 #define _CRYP_CR_RESET_VALUE		0x0U
90 #define _CRYP_CR_NPBLB_MSK		GENMASK_32(23, 20)
91 #define _CRYP_CR_NPBLB_OFF		20U
92 #define _CRYP_CR_GCM_CCMPH_MSK		GENMASK_32(17, 16)
93 #define _CRYP_CR_GCM_CCMPH_OFF		16U
94 #define _CRYP_CR_GCM_CCMPH_INIT		0U
95 #define _CRYP_CR_GCM_CCMPH_HEADER	1U
96 #define _CRYP_CR_GCM_CCMPH_PAYLOAD	2U
97 #define _CRYP_CR_GCM_CCMPH_FINAL	3U
98 #define _CRYP_CR_CRYPEN			BIT(15)
99 #define _CRYP_CR_FFLUSH			BIT(14)
100 #define _CRYP_CR_KEYSIZE_MSK		GENMASK_32(9, 8)
101 #define _CRYP_CR_KEYSIZE_OFF		8U
102 #define _CRYP_CR_KSIZE_128		0U
103 #define _CRYP_CR_KSIZE_192		1U
104 #define _CRYP_CR_KSIZE_256		2U
105 #define _CRYP_CR_DATATYPE_MSK		GENMASK_32(7, 6)
106 #define _CRYP_CR_DATATYPE_OFF		6U
107 #define _CRYP_CR_DATATYPE_NONE		0U
108 #define _CRYP_CR_DATATYPE_HALF_WORD	1U
109 #define _CRYP_CR_DATATYPE_BYTE		2U
110 #define _CRYP_CR_DATATYPE_BIT		3U
111 #define _CRYP_CR_ALGOMODE_MSK		(BIT(19) | GENMASK_32(5, 3))
112 #define _CRYP_CR_ALGOMODE_OFF		3U
113 #define _CRYP_CR_ALGOMODE_TDES_ECB	0x0U
114 #define _CRYP_CR_ALGOMODE_TDES_CBC	0x1U
115 #define _CRYP_CR_ALGOMODE_DES_ECB	0x2U
116 #define _CRYP_CR_ALGOMODE_DES_CBC	0x3U
117 #define _CRYP_CR_ALGOMODE_AES_ECB	0x4U
118 #define _CRYP_CR_ALGOMODE_AES_CBC	0x5U
119 #define _CRYP_CR_ALGOMODE_AES_CTR	0x6U
120 #define _CRYP_CR_ALGOMODE_AES		0x7U
121 #define _CRYP_CR_ALGOMODE_AES_GCM	BIT(16)
122 #define _CRYP_CR_ALGOMODE_AES_CCM	(BIT(16) | BIT(0))
123 #define _CRYP_CR_ALGODIR		BIT(2)
124 #define _CRYP_CR_ALGODIR_ENC		0U
125 #define _CRYP_CR_ALGODIR_DEC		BIT(2)
126 
127 /* CRYP status register fields */
128 #define _CRYP_SR_BUSY			BIT(4)
129 #define _CRYP_SR_OFFU			BIT(3)
130 #define _CRYP_SR_OFNE			BIT(2)
131 #define _CRYP_SR_IFNF			BIT(1)
132 #define _CRYP_SR_IFEM			BIT(0)
133 
134 /* CRYP DMA control register fields */
135 #define _CRYP_DMACR_DOEN		BIT(1)
136 #define _CRYP_DMACR_DIEN		BIT(0)
137 
138 /* CRYP interrupt fields */
139 #define _CRYP_I_OUT			BIT(1)
140 #define _CRYP_I_IN			BIT(0)
141 
142 /* CRYP hardware configuration register fields */
143 #define _CRYP_HWCFGR_CFG1_MSK		GENMASK_32(3, 0)
144 #define _CRYP_HWCFGR_CFG1_OFF		0U
145 #define _CRYP_HWCFGR_CFG2_MSK		GENMASK_32(7, 4)
146 #define _CRYP_HWCFGR_CFG2_OFF		4U
147 #define _CRYP_HWCFGR_CFG3_MSK		GENMASK_32(11, 8)
148 #define _CRYP_HWCFGR_CFG3_OFF		8U
149 #define _CRYP_HWCFGR_CFG4_MSK		GENMASK_32(15, 12)
150 #define _CRYP_HWCFGR_CFG4_OFF		12U
151 
152 /* CRYP HW version register */
153 #define _CRYP_VERR_MSK			GENMASK_32(7, 0)
154 #define _CRYP_VERR_OFF			0U
155 
156 /*
157  * Macro to manage bit manipulation when we work on a local variable
158  * before writing only once to the hardware register.
159  */
160 #define CLRBITS(v, bits)		((v) &= ~(bits))
161 #define SETBITS(v, bits)		((v) |= (bits))
162 
163 #define IS_ALGOMODE(cr, mod) \
164 	(((cr) & _CRYP_CR_ALGOMODE_MSK) == (_CRYP_CR_ALGOMODE_##mod << \
165 					  _CRYP_CR_ALGOMODE_OFF))
166 
167 #define SET_ALGOMODE(mod, cr) \
168 	clrsetbits(&(cr), _CRYP_CR_ALGOMODE_MSK, (_CRYP_CR_ALGOMODE_##mod << \
169 						  _CRYP_CR_ALGOMODE_OFF))
170 
171 #define GET_ALGOMODE(cr) \
172 	(((cr) & _CRYP_CR_ALGOMODE_MSK) >> _CRYP_CR_ALGOMODE_OFF)
173 
174 static struct stm32_cryp_platdata cryp_pdata;
175 static struct mutex cryp_lock = MUTEX_INITIALIZER;
176 
177 static void clrsetbits(uint32_t *v, uint32_t mask, uint32_t bits)
178 {
179 	*v = (*v & ~mask) | bits;
180 }
181 
182 static bool algo_mode_needs_iv(uint32_t cr)
183 {
184 	return !IS_ALGOMODE(cr, TDES_ECB) && !IS_ALGOMODE(cr, DES_ECB) &&
185 	       !IS_ALGOMODE(cr, AES_ECB);
186 }
187 
188 static bool algo_mode_is_ecb_cbc(uint32_t cr)
189 {
190 	return GET_ALGOMODE(cr) < _CRYP_CR_ALGOMODE_AES_CTR;
191 }
192 
193 static bool algo_mode_is_aes(uint32_t cr)
194 {
195 	return ((cr & _CRYP_CR_ALGOMODE_MSK) >> _CRYP_CR_ALGOMODE_OFF) >=
196 	       _CRYP_CR_ALGOMODE_AES_ECB;
197 }
198 
199 static bool is_decrypt(uint32_t cr)
200 {
201 	return (cr & _CRYP_CR_ALGODIR) == _CRYP_CR_ALGODIR_DEC;
202 }
203 
204 static bool is_encrypt(uint32_t cr)
205 {
206 	return !is_decrypt(cr);
207 }
208 
209 static bool does_need_npblb(uint32_t cr)
210 {
211 	return (IS_ALGOMODE(cr, AES_GCM) && is_encrypt(cr)) ||
212 	       (IS_ALGOMODE(cr, AES_CCM) && is_decrypt(cr));
213 }
214 
215 static TEE_Result wait_sr_bits(vaddr_t base, uint32_t bits)
216 {
217 	uint64_t timeout_ref = timeout_init_us(CRYP_TIMEOUT_US);
218 
219 	while ((io_read32(base + _CRYP_SR) & bits) != bits)
220 		if (timeout_elapsed(timeout_ref))
221 			break;
222 
223 	if ((io_read32(base + _CRYP_SR) & bits) != bits)
224 		return TEE_ERROR_BUSY;
225 
226 	return TEE_SUCCESS;
227 }
228 
229 static TEE_Result wait_end_busy(vaddr_t base)
230 {
231 	uint64_t timeout_ref = timeout_init_us(CRYP_TIMEOUT_US);
232 
233 	while (io_read32(base + _CRYP_SR) & _CRYP_SR_BUSY)
234 		if (timeout_elapsed(timeout_ref))
235 			break;
236 
237 	if (io_read32(base + _CRYP_SR) & _CRYP_SR_BUSY)
238 		return TEE_ERROR_BUSY;
239 
240 	return TEE_SUCCESS;
241 }
242 
243 static TEE_Result wait_end_enable(vaddr_t base)
244 {
245 	uint64_t timeout_ref = timeout_init_us(CRYP_TIMEOUT_US);
246 
247 	while (io_read32(base + _CRYP_CR) & _CRYP_CR_CRYPEN)
248 		if (timeout_elapsed(timeout_ref))
249 			break;
250 
251 	if (io_read32(base + _CRYP_CR) & _CRYP_CR_CRYPEN)
252 		return TEE_ERROR_BUSY;
253 
254 	return TEE_SUCCESS;
255 }
256 
257 static TEE_Result __must_check write_align_block(struct stm32_cryp_context *ctx,
258 						 uint32_t *data)
259 {
260 	TEE_Result res = TEE_SUCCESS;
261 	unsigned int i = 0;
262 
263 	res = wait_sr_bits(ctx->base, _CRYP_SR_IFNF);
264 	if (res)
265 		return res;
266 
267 	for (i = 0; i < ctx->block_u32; i++) {
268 		/* No need to htobe() as we configure the HW to swap bytes */
269 		io_write32(ctx->base + _CRYP_DIN, data[i]);
270 	}
271 
272 	return TEE_SUCCESS;
273 }
274 
275 static TEE_Result __must_check write_block(struct stm32_cryp_context *ctx,
276 					   uint8_t *data)
277 {
278 	if (!IS_ALIGNED_WITH_TYPE(data, uint32_t)) {
279 		uint32_t data_u32[MAX_BLOCK_NB_U32] = { 0 };
280 
281 		memcpy(data_u32, data, ctx->block_u32 * sizeof(uint32_t));
282 		return write_align_block(ctx, data_u32);
283 	}
284 
285 	return write_align_block(ctx, (void *)data);
286 }
287 
288 static TEE_Result __must_check read_align_block(struct stm32_cryp_context *ctx,
289 						uint32_t *data)
290 {
291 	TEE_Result res = TEE_SUCCESS;
292 	unsigned int i = 0;
293 
294 	res = wait_sr_bits(ctx->base, _CRYP_SR_OFNE);
295 	if (res)
296 		return res;
297 
298 	for (i = 0; i < ctx->block_u32; i++) {
299 		/* No need to htobe() as we configure the HW to swap bytes */
300 		data[i] = io_read32(ctx->base + _CRYP_DOUT);
301 	}
302 
303 	return TEE_SUCCESS;
304 }
305 
306 static TEE_Result __must_check read_block(struct stm32_cryp_context *ctx,
307 					  uint8_t *data)
308 {
309 	if (!IS_ALIGNED_WITH_TYPE(data, uint32_t)) {
310 		TEE_Result res = TEE_SUCCESS;
311 		uint32_t data_u32[MAX_BLOCK_NB_U32] = { 0 };
312 
313 		res = read_align_block(ctx, data_u32);
314 		if (res)
315 			return res;
316 
317 		memcpy(data, data_u32, ctx->block_u32 * sizeof(uint32_t));
318 
319 		return TEE_SUCCESS;
320 	}
321 
322 	return read_align_block(ctx, (void *)data);
323 }
324 
325 static void cryp_end(struct stm32_cryp_context *ctx, TEE_Result prev_error)
326 {
327 	if (prev_error) {
328 		if (cryp_pdata.reset &&
329 		    rstctrl_assert_to(cryp_pdata.reset, TIMEOUT_US_1MS))
330 			panic();
331 		if (cryp_pdata.reset &&
332 		    rstctrl_deassert_to(cryp_pdata.reset, TIMEOUT_US_1MS))
333 			panic();
334 	}
335 
336 	/* Disable the CRYP peripheral */
337 	io_clrbits32(ctx->base + _CRYP_CR, _CRYP_CR_CRYPEN);
338 }
339 
340 static void cryp_write_iv(struct stm32_cryp_context *ctx)
341 {
342 	if (algo_mode_needs_iv(ctx->cr)) {
343 		unsigned int i = 0;
344 
345 		/* Restore the _CRYP_IVRx */
346 		for (i = 0; i < ctx->block_u32; i++)
347 			io_write32(ctx->base + _CRYP_IV0LR + i *
348 				   sizeof(uint32_t), ctx->iv[i]);
349 	}
350 }
351 
352 static void cryp_save_suspend(struct stm32_cryp_context *ctx)
353 {
354 	unsigned int i = 0;
355 
356 	if (IS_ALGOMODE(ctx->cr, AES_GCM) || IS_ALGOMODE(ctx->cr, AES_CCM))
357 		for (i = 0; i < ARRAY_SIZE(ctx->pm_gcmccm); i++)
358 			ctx->pm_gcmccm[i] = io_read32(ctx->base +
359 						      _CRYP_CSGCMCCM0R +
360 						      i * sizeof(uint32_t));
361 
362 	if (IS_ALGOMODE(ctx->cr, AES_GCM))
363 		for (i = 0; i < ARRAY_SIZE(ctx->pm_gcm); i++)
364 			ctx->pm_gcm[i] = io_read32(ctx->base + _CRYP_CSGCM0R +
365 						   i * sizeof(uint32_t));
366 }
367 
368 static void cryp_restore_suspend(struct stm32_cryp_context *ctx)
369 {
370 	unsigned int i = 0;
371 
372 	if (IS_ALGOMODE(ctx->cr, AES_GCM) || IS_ALGOMODE(ctx->cr, AES_CCM))
373 		for (i = 0; i < ARRAY_SIZE(ctx->pm_gcmccm); i++)
374 			io_write32(ctx->base + _CRYP_CSGCMCCM0R +
375 				   i * sizeof(uint32_t), ctx->pm_gcmccm[i]);
376 
377 	if (IS_ALGOMODE(ctx->cr, AES_GCM))
378 		for (i = 0; i < ARRAY_SIZE(ctx->pm_gcm); i++)
379 			io_write32(ctx->base + _CRYP_CSGCM0R +
380 				   i * sizeof(uint32_t), ctx->pm_gcm[i]);
381 }
382 
383 static void cryp_write_key(struct stm32_cryp_context *ctx)
384 {
385 	vaddr_t reg = 0;
386 	int i = 0;
387 	uint32_t algo = GET_ALGOMODE(ctx->cr);
388 
389 	if (algo == _CRYP_CR_ALGOMODE_DES_ECB ||
390 	    algo == _CRYP_CR_ALGOMODE_DES_CBC)
391 		reg = ctx->base + _CRYP_K1RR;
392 	else
393 		reg = ctx->base + _CRYP_K3RR;
394 
395 	for (i = ctx->key_size / sizeof(uint32_t) - 1;
396 	     i >= 0;
397 	     i--, reg -= sizeof(uint32_t))
398 		io_write32(reg, ctx->key[i]);
399 }
400 
401 static TEE_Result cryp_prepare_key(struct stm32_cryp_context *ctx)
402 {
403 	TEE_Result res = TEE_SUCCESS;
404 
405 	/*
406 	 * For AES ECB/CBC decryption, key preparation mode must be selected
407 	 * to populate the key.
408 	 */
409 	if (is_decrypt(ctx->cr) && (IS_ALGOMODE(ctx->cr, AES_ECB) ||
410 				    IS_ALGOMODE(ctx->cr, AES_CBC))) {
411 		/* Select Algomode "prepare key" */
412 		io_clrsetbits32(ctx->base + _CRYP_CR, _CRYP_CR_ALGOMODE_MSK,
413 				_CRYP_CR_ALGOMODE_AES << _CRYP_CR_ALGOMODE_OFF);
414 
415 		cryp_write_key(ctx);
416 
417 		/* Enable CRYP */
418 		io_setbits32(ctx->base + _CRYP_CR, _CRYP_CR_CRYPEN);
419 
420 		res = wait_end_busy(ctx->base);
421 		if (res)
422 			return res;
423 
424 		/* Reset 'real' algomode */
425 		io_clrsetbits32(ctx->base + _CRYP_CR, _CRYP_CR_ALGOMODE_MSK,
426 				ctx->cr & _CRYP_CR_ALGOMODE_MSK);
427 	} else {
428 		cryp_write_key(ctx);
429 	}
430 
431 	return TEE_SUCCESS;
432 }
433 
434 static TEE_Result save_context(struct stm32_cryp_context *ctx)
435 {
436 	/* Device should not be in a processing phase */
437 	if (io_read32(ctx->base + _CRYP_SR) & _CRYP_SR_BUSY)
438 		return TEE_ERROR_BAD_STATE;
439 
440 	/* Disable the CRYP peripheral */
441 	io_clrbits32(ctx->base + _CRYP_CR, _CRYP_CR_CRYPEN);
442 
443 	/* Save CR */
444 	ctx->cr = io_read32(ctx->base + _CRYP_CR);
445 
446 	cryp_save_suspend(ctx);
447 
448 	/* If algo mode needs to save current IV */
449 	if (algo_mode_needs_iv(ctx->cr)) {
450 		unsigned int i = 0;
451 
452 		/* Save IV */
453 		for (i = 0; i < ctx->block_u32; i++)
454 			ctx->iv[i] = io_read32(ctx->base + _CRYP_IV0LR + i *
455 					       sizeof(uint32_t));
456 	}
457 
458 	return TEE_SUCCESS;
459 }
460 
461 /* To resume the processing of a message */
462 static TEE_Result restore_context(struct stm32_cryp_context *ctx)
463 {
464 	TEE_Result res = TEE_SUCCESS;
465 
466 	/* IP should be disabled */
467 	if (io_read32(ctx->base + _CRYP_CR) & _CRYP_CR_CRYPEN) {
468 		DMSG("Device is still enabled");
469 		return TEE_ERROR_BAD_STATE;
470 	}
471 
472 	/* Restore the _CRYP_CR */
473 	io_write32(ctx->base + _CRYP_CR, ctx->cr);
474 
475 	/* Write key and, in case of AES_CBC or AES_ECB decrypt, prepare it */
476 	res = cryp_prepare_key(ctx);
477 	if (res)
478 		return res;
479 
480 	cryp_restore_suspend(ctx);
481 
482 	cryp_write_iv(ctx);
483 
484 	/* Flush internal fifo */
485 	io_setbits32(ctx->base + _CRYP_CR, _CRYP_CR_FFLUSH);
486 
487 	/* Enable the CRYP peripheral */
488 	io_setbits32(ctx->base + _CRYP_CR, _CRYP_CR_CRYPEN);
489 
490 	return TEE_SUCCESS;
491 }
492 
493 /*
494  * Translate a byte index in an array of BE uint32_t into the index of same
495  * byte in the corresponding LE uint32_t array.
496  */
497 static size_t be_index(size_t index)
498 {
499 	return (index & ~0x3) + 3 - (index & 0x3);
500 }
501 
502 static TEE_Result ccm_first_context(struct stm32_cryp_context *ctx)
503 {
504 	TEE_Result res = TEE_SUCCESS;
505 	uint32_t b0[AES_BLOCK_NB_U32] = { 0 };
506 	uint8_t *iv = (uint8_t *)ctx->iv;
507 	size_t l = 0;
508 	size_t i = 15;
509 
510 	/* IP should be disabled */
511 	if (io_read32(ctx->base + _CRYP_CR) & _CRYP_CR_CRYPEN)
512 		return TEE_ERROR_BAD_STATE;
513 
514 	/* Write the _CRYP_CR */
515 	io_write32(ctx->base + _CRYP_CR, ctx->cr);
516 
517 	/* Write key */
518 	res = cryp_prepare_key(ctx);
519 	if (res)
520 		return res;
521 
522 	/* Save full IV that will be b0 */
523 	memcpy(b0, iv, sizeof(b0));
524 
525 	/*
526 	 * Update IV to become CTR0/1 before setting it.
527 	 * IV is saved as LE uint32_t[4] as expected by hardware,
528 	 * but CCM RFC defines bytes to update in a BE array.
529 	 */
530 	/* Set flag bits to 0 (5 higher bits), keep 3 low bits */
531 	iv[be_index(0)] &= 0x7;
532 	/* Get size of length field (can be from 2 to 8) */
533 	l = iv[be_index(0)] + 1;
534 	/* Set Q to 0 */
535 	for (i = 15; i >= 15 - l + 1; i--)
536 		iv[be_index(i)] = 0;
537 	/* Save CTR0 */
538 	memcpy(ctx->ctr0_ccm, iv, sizeof(b0));
539 	/* Increment Q */
540 	iv[be_index(15)] |= 0x1;
541 
542 	cryp_write_iv(ctx);
543 
544 	/* Enable the CRYP peripheral */
545 	io_setbits32(ctx->base + _CRYP_CR, _CRYP_CR_CRYPEN);
546 
547 	res = write_align_block(ctx, b0);
548 
549 	return res;
550 }
551 
552 static TEE_Result do_from_init_to_phase(struct stm32_cryp_context *ctx,
553 					uint32_t new_phase)
554 {
555 	TEE_Result res = TEE_SUCCESS;
556 
557 	/*
558 	 * We didn't run the init phase yet
559 	 * CCM need a specific restore_context phase for the init phase
560 	 */
561 	if (IS_ALGOMODE(ctx->cr, AES_CCM))
562 		res = ccm_first_context(ctx);
563 	else
564 		res = restore_context(ctx);
565 
566 	if (res)
567 		return res;
568 
569 	res = wait_end_enable(ctx->base);
570 	if (res)
571 		return res;
572 
573 	/* Move to 'new_phase' */
574 	io_clrsetbits32(ctx->base + _CRYP_CR, _CRYP_CR_GCM_CCMPH_MSK,
575 			new_phase << _CRYP_CR_GCM_CCMPH_OFF);
576 
577 	/* Enable the CRYP peripheral (init disabled it) */
578 	io_setbits32(ctx->base + _CRYP_CR, _CRYP_CR_CRYPEN);
579 
580 	return TEE_SUCCESS;
581 }
582 
583 static TEE_Result do_from_header_to_phase(struct stm32_cryp_context *ctx,
584 					  uint32_t new_phase)
585 {
586 	TEE_Result res = TEE_SUCCESS;
587 
588 	res = restore_context(ctx);
589 	if (res)
590 		return res;
591 
592 	if (ctx->extra_size) {
593 		/* Manage unaligned header data before moving to next phase */
594 		memset((uint8_t *)ctx->extra + ctx->extra_size, 0,
595 		       ctx->block_u32 * sizeof(uint32_t) - ctx->extra_size);
596 
597 		res = write_align_block(ctx, ctx->extra);
598 		if (res)
599 			return res;
600 
601 		ctx->assoc_len += (ctx->extra_size) * INT8_BIT;
602 		ctx->extra_size = 0;
603 	}
604 
605 	/* Move to 'new_phase' */
606 	io_clrsetbits32(ctx->base + _CRYP_CR, _CRYP_CR_GCM_CCMPH_MSK,
607 			new_phase << _CRYP_CR_GCM_CCMPH_OFF);
608 
609 	return TEE_SUCCESS;
610 }
611 
612 /**
613  * @brief Start a AES computation.
614  * @param ctx: CRYP process context
615  * @param is_dec: true if decryption, false if encryption
616  * @param algo: define the algo mode
617  * @param key: pointer to key
618  * @param key_size: key size
619  * @param iv: pointer to initialization vector (unused if algo is ECB)
620  * @param iv_size: iv size
621  * @note this function doesn't access to hardware but stores in ctx the values
622  *
623  * @retval TEE_SUCCESS if OK.
624  */
625 TEE_Result stm32_cryp_init(struct stm32_cryp_context *ctx, bool is_dec,
626 			   enum stm32_cryp_algo_mode algo,
627 			   const void *key, size_t key_size, const void *iv,
628 			   size_t iv_size)
629 {
630 	unsigned int i = 0;
631 	const uint32_t *iv_u32 = NULL;
632 	uint32_t local_iv[4] = { 0 };
633 	const uint32_t *key_u32 = NULL;
634 	uint32_t local_key[8] = { 0 };
635 
636 	ctx->assoc_len = 0;
637 	ctx->load_len = 0;
638 	ctx->extra_size = 0;
639 	ctx->lock = &cryp_lock;
640 
641 	ctx->base = io_pa_or_va(&cryp_pdata.base, 1);
642 	ctx->cr = _CRYP_CR_RESET_VALUE;
643 
644 	/* We want buffer to be u32 aligned */
645 	if (IS_ALIGNED_WITH_TYPE(key, uint32_t)) {
646 		key_u32 = key;
647 	} else {
648 		memcpy(local_key, key, key_size);
649 		key_u32 = local_key;
650 	}
651 
652 	if (IS_ALIGNED_WITH_TYPE(iv, uint32_t)) {
653 		iv_u32 = iv;
654 	} else {
655 		memcpy(local_iv, iv, iv_size);
656 		iv_u32 = local_iv;
657 	}
658 
659 	if (is_dec)
660 		SETBITS(ctx->cr, _CRYP_CR_ALGODIR);
661 	else
662 		CLRBITS(ctx->cr, _CRYP_CR_ALGODIR);
663 
664 	/* Save algo mode */
665 	switch (algo) {
666 	case STM32_CRYP_MODE_TDES_ECB:
667 		SET_ALGOMODE(TDES_ECB, ctx->cr);
668 		break;
669 	case STM32_CRYP_MODE_TDES_CBC:
670 		SET_ALGOMODE(TDES_CBC, ctx->cr);
671 		break;
672 	case STM32_CRYP_MODE_DES_ECB:
673 		SET_ALGOMODE(DES_ECB, ctx->cr);
674 		break;
675 	case STM32_CRYP_MODE_DES_CBC:
676 		SET_ALGOMODE(DES_CBC, ctx->cr);
677 		break;
678 	case STM32_CRYP_MODE_AES_ECB:
679 		SET_ALGOMODE(AES_ECB, ctx->cr);
680 		break;
681 	case STM32_CRYP_MODE_AES_CBC:
682 		SET_ALGOMODE(AES_CBC, ctx->cr);
683 		break;
684 	case STM32_CRYP_MODE_AES_CTR:
685 		SET_ALGOMODE(AES_CTR, ctx->cr);
686 		break;
687 	case STM32_CRYP_MODE_AES_GCM:
688 		SET_ALGOMODE(AES_GCM, ctx->cr);
689 		break;
690 	case STM32_CRYP_MODE_AES_CCM:
691 		SET_ALGOMODE(AES_CCM, ctx->cr);
692 		break;
693 	default:
694 		return TEE_ERROR_BAD_PARAMETERS;
695 	}
696 
697 	/*
698 	 * We will use HW Byte swap (_CRYP_CR_DATATYPE_BYTE) for data.
699 	 * So we won't need to
700 	 * TEE_U32_TO_BIG_ENDIAN(data) before write to DIN register
701 	 * nor
702 	 * TEE_U32_FROM_BIG_ENDIAN after reading from DOUT register.
703 	 */
704 	clrsetbits(&ctx->cr, _CRYP_CR_DATATYPE_MSK,
705 		   _CRYP_CR_DATATYPE_BYTE << _CRYP_CR_DATATYPE_OFF);
706 
707 	/*
708 	 * Configure keysize for AES algorithms
709 	 * And save block size
710 	 */
711 	if (algo_mode_is_aes(ctx->cr)) {
712 		switch (key_size) {
713 		case AES_KEYSIZE_128:
714 			clrsetbits(&ctx->cr, _CRYP_CR_KEYSIZE_MSK,
715 				   _CRYP_CR_KSIZE_128 << _CRYP_CR_KEYSIZE_OFF);
716 			break;
717 		case AES_KEYSIZE_192:
718 			clrsetbits(&ctx->cr, _CRYP_CR_KEYSIZE_MSK,
719 				   _CRYP_CR_KSIZE_192 << _CRYP_CR_KEYSIZE_OFF);
720 			break;
721 		case AES_KEYSIZE_256:
722 			clrsetbits(&ctx->cr, _CRYP_CR_KEYSIZE_MSK,
723 				   _CRYP_CR_KSIZE_256 << _CRYP_CR_KEYSIZE_OFF);
724 			break;
725 		default:
726 			return TEE_ERROR_BAD_PARAMETERS;
727 		}
728 
729 		/* And set block size */
730 		ctx->block_u32 = AES_BLOCK_NB_U32;
731 	} else {
732 		/* And set DES/TDES block size */
733 		ctx->block_u32 = DES_BLOCK_NB_U32;
734 	}
735 
736 	/* Save key in HW order */
737 	ctx->key_size = key_size;
738 	for (i = 0; i < key_size / sizeof(uint32_t); i++)
739 		ctx->key[i] = TEE_U32_TO_BIG_ENDIAN(key_u32[i]);
740 
741 	/* Save IV */
742 	if (algo_mode_needs_iv(ctx->cr)) {
743 		if (!iv || iv_size != ctx->block_u32 * sizeof(uint32_t))
744 			return TEE_ERROR_BAD_PARAMETERS;
745 
746 		/*
747 		 * We save IV in the byte order expected by the
748 		 * IV registers
749 		 */
750 		for (i = 0; i < ctx->block_u32; i++)
751 			ctx->iv[i] = TEE_U32_TO_BIG_ENDIAN(iv_u32[i]);
752 	}
753 
754 	/* Reset suspend registers */
755 	memset(ctx->pm_gcmccm, 0, sizeof(ctx->pm_gcmccm));
756 	memset(ctx->pm_gcm, 0, sizeof(ctx->pm_gcm));
757 
758 	return TEE_SUCCESS;
759 }
760 
761 /**
762  * @brief Update (or start) a AES authenticate process of
763  *        associated data (CCM or GCM).
764  * @param ctx: CRYP process context
765  * @param data: pointer to associated data
766  * @param data_size: data size
767  * @retval TEE_SUCCESS if OK.
768  */
769 TEE_Result stm32_cryp_update_assodata(struct stm32_cryp_context *ctx,
770 				      uint8_t *data, size_t data_size)
771 {
772 	TEE_Result res = TEE_SUCCESS;
773 	unsigned int i = 0;
774 	uint32_t previous_phase = 0;
775 
776 	/* If no associated data, nothing to do */
777 	if (!data || !data_size)
778 		return TEE_SUCCESS;
779 
780 	mutex_lock(ctx->lock);
781 
782 	previous_phase = (ctx->cr & _CRYP_CR_GCM_CCMPH_MSK) >>
783 			 _CRYP_CR_GCM_CCMPH_OFF;
784 
785 	switch (previous_phase) {
786 	case _CRYP_CR_GCM_CCMPH_INIT:
787 		res = do_from_init_to_phase(ctx, _CRYP_CR_GCM_CCMPH_HEADER);
788 		break;
789 	case _CRYP_CR_GCM_CCMPH_HEADER:
790 		/*
791 		 * Function update_assodata was already called.
792 		 * We only need to restore the context.
793 		 */
794 		res = restore_context(ctx);
795 		break;
796 	default:
797 		assert(0);
798 		res = TEE_ERROR_BAD_STATE;
799 	}
800 
801 	if (res)
802 		goto out;
803 
804 	/* Manage if remaining data from a previous update_assodata call */
805 	if (ctx->extra_size &&
806 	    (ctx->extra_size + data_size >=
807 	     ctx->block_u32 * sizeof(uint32_t))) {
808 		uint32_t block[MAX_BLOCK_NB_U32] = { 0 };
809 
810 		memcpy(block, ctx->extra, ctx->extra_size);
811 		memcpy((uint8_t *)block + ctx->extra_size, data,
812 		       ctx->block_u32 * sizeof(uint32_t) - ctx->extra_size);
813 
814 		res = write_align_block(ctx, block);
815 		if (res)
816 			goto out;
817 
818 		i += ctx->block_u32 * sizeof(uint32_t) - ctx->extra_size;
819 		ctx->extra_size = 0;
820 		ctx->assoc_len += ctx->block_u32 * sizeof(uint32_t) * INT8_BIT;
821 	}
822 
823 	while (data_size - i >= ctx->block_u32 * sizeof(uint32_t)) {
824 		res = write_block(ctx, data + i);
825 		if (res)
826 			goto out;
827 
828 		/* Process next block */
829 		i += ctx->block_u32 * sizeof(uint32_t);
830 		ctx->assoc_len += ctx->block_u32 * sizeof(uint32_t) * INT8_BIT;
831 	}
832 
833 	/*
834 	 * Manage last block if not a block size multiple:
835 	 * Save remaining data to manage them later (potentially with new
836 	 * associated data).
837 	 */
838 	if (i < data_size) {
839 		memcpy((uint8_t *)ctx->extra + ctx->extra_size, data + i,
840 		       data_size - i);
841 		ctx->extra_size += data_size - i;
842 	}
843 
844 	res = save_context(ctx);
845 out:
846 	if (res)
847 		cryp_end(ctx, res);
848 
849 	mutex_unlock(ctx->lock);
850 
851 	return res;
852 }
853 
854 /**
855  * @brief Update (or start) a AES authenticate and de/encrypt with
856  *        payload data (CCM or GCM).
857  * @param ctx: CRYP process context
858  * @param data_in: pointer to payload
859  * @param data_out: pointer where to save de/encrypted payload
860  * @param data_size: payload size
861  *
862  * @retval TEE_SUCCESS if OK.
863  */
864 TEE_Result stm32_cryp_update_load(struct stm32_cryp_context *ctx,
865 				  uint8_t *data_in, uint8_t *data_out,
866 				  size_t data_size)
867 {
868 	TEE_Result res = TEE_SUCCESS;
869 	unsigned int i = 0;
870 	uint32_t previous_phase = 0;
871 
872 	if (!data_in || !data_size)
873 		return TEE_SUCCESS;
874 
875 	mutex_lock(ctx->lock);
876 
877 	previous_phase = (ctx->cr & _CRYP_CR_GCM_CCMPH_MSK) >>
878 			 _CRYP_CR_GCM_CCMPH_OFF;
879 
880 	switch (previous_phase) {
881 	case _CRYP_CR_GCM_CCMPH_INIT:
882 		res = do_from_init_to_phase(ctx, _CRYP_CR_GCM_CCMPH_PAYLOAD);
883 		break;
884 	case _CRYP_CR_GCM_CCMPH_HEADER:
885 		res = do_from_header_to_phase(ctx, _CRYP_CR_GCM_CCMPH_PAYLOAD);
886 		break;
887 	case _CRYP_CR_GCM_CCMPH_PAYLOAD:
888 		/* new update_load call, we only need to restore context */
889 		res = restore_context(ctx);
890 		break;
891 	default:
892 		assert(0);
893 		res = TEE_ERROR_BAD_STATE;
894 	}
895 
896 	if (res)
897 		goto out;
898 
899 	/* Manage if incomplete block from a previous update_load call */
900 	if (ctx->extra_size &&
901 	    (ctx->extra_size + data_size >=
902 	     ctx->block_u32 * sizeof(uint32_t))) {
903 		uint32_t block_out[MAX_BLOCK_NB_U32] = { 0 };
904 
905 		memcpy((uint8_t *)ctx->extra + ctx->extra_size, data_in + i,
906 		       ctx->block_u32 * sizeof(uint32_t) - ctx->extra_size);
907 
908 		res = write_align_block(ctx, ctx->extra);
909 		if (res)
910 			goto out;
911 
912 		res = read_align_block(ctx, block_out);
913 		if (res)
914 			goto out;
915 
916 		memcpy(data_out + i, (uint8_t *)block_out + ctx->extra_size,
917 		       ctx->block_u32 * sizeof(uint32_t) - ctx->extra_size);
918 
919 		i += ctx->block_u32 * sizeof(uint32_t) - ctx->extra_size;
920 		ctx->extra_size = 0;
921 
922 		ctx->load_len += ctx->block_u32 * sizeof(uint32_t) * INT8_BIT;
923 	}
924 
925 	while (data_size - i >= ctx->block_u32 * sizeof(uint32_t)) {
926 		res = write_block(ctx, data_in + i);
927 		if (res)
928 			goto out;
929 
930 		res = read_block(ctx, data_out + i);
931 		if (res)
932 			goto out;
933 
934 		/* Process next block */
935 		i += ctx->block_u32 * sizeof(uint32_t);
936 		ctx->load_len += ctx->block_u32 * sizeof(uint32_t) * INT8_BIT;
937 	}
938 
939 	res = save_context(ctx);
940 	if (res)
941 		goto out;
942 
943 	/*
944 	 * Manage last block if not a block size multiple
945 	 * We saved context,
946 	 * Complete block with 0 and send to CRYP to get {en,de}crypted data
947 	 * Store data to resend as last block in final()
948 	 * or to complete next update_load() to get correct tag.
949 	 */
950 	if (i < data_size) {
951 		uint32_t block_out[MAX_BLOCK_NB_U32] = { 0 };
952 		size_t prev_extra_size = ctx->extra_size;
953 
954 		/* Re-enable the CRYP peripheral */
955 		io_setbits32(ctx->base + _CRYP_CR, _CRYP_CR_CRYPEN);
956 
957 		memcpy((uint8_t *)ctx->extra + ctx->extra_size, data_in + i,
958 		       data_size - i);
959 		ctx->extra_size += data_size - i;
960 		memset((uint8_t *)ctx->extra + ctx->extra_size, 0,
961 		       ctx->block_u32 * sizeof(uint32_t) - ctx->extra_size);
962 
963 		res = write_align_block(ctx, ctx->extra);
964 		if (res)
965 			goto out;
966 
967 		res = read_align_block(ctx, block_out);
968 		if (res)
969 			goto out;
970 
971 		memcpy(data_out + i, (uint8_t *)block_out + prev_extra_size,
972 		       data_size - i);
973 
974 		/* Disable the CRYP peripheral */
975 		io_clrbits32(ctx->base + _CRYP_CR, _CRYP_CR_CRYPEN);
976 	}
977 
978 out:
979 	if (res)
980 		cryp_end(ctx, res);
981 
982 	mutex_unlock(ctx->lock);
983 
984 	return res;
985 }
986 
987 /**
988  * @brief Get authentication tag for AES authenticated algorithms (CCM or GCM).
989  * @param ctx: CRYP process context
990  * @param tag: pointer where to save the tag
991  * @param data_size: tag size
992  *
993  * @retval TEE_SUCCESS if OK.
994  */
995 TEE_Result stm32_cryp_final(struct stm32_cryp_context *ctx, uint8_t *tag,
996 			    size_t tag_size)
997 {
998 	TEE_Result res = TEE_SUCCESS;
999 	uint32_t tag_u32[4] = { 0 };
1000 	uint32_t previous_phase = 0;
1001 
1002 	mutex_lock(ctx->lock);
1003 
1004 	previous_phase = (ctx->cr & _CRYP_CR_GCM_CCMPH_MSK) >>
1005 			 _CRYP_CR_GCM_CCMPH_OFF;
1006 
1007 	switch (previous_phase) {
1008 	case _CRYP_CR_GCM_CCMPH_INIT:
1009 		res = do_from_init_to_phase(ctx, _CRYP_CR_GCM_CCMPH_FINAL);
1010 		break;
1011 	case _CRYP_CR_GCM_CCMPH_HEADER:
1012 		res = do_from_header_to_phase(ctx, _CRYP_CR_GCM_CCMPH_FINAL);
1013 		break;
1014 	case _CRYP_CR_GCM_CCMPH_PAYLOAD:
1015 		res = restore_context(ctx);
1016 		if (res)
1017 			break;
1018 
1019 		/* Manage if incomplete block from a previous update_load() */
1020 		if (ctx->extra_size) {
1021 			uint32_t block_out[MAX_BLOCK_NB_U32] = { 0 };
1022 			size_t sz = ctx->block_u32 * sizeof(uint32_t) -
1023 				    ctx->extra_size;
1024 
1025 			if (does_need_npblb(ctx->cr)) {
1026 				io_clrsetbits32(ctx->base + _CRYP_CR,
1027 						_CRYP_CR_NPBLB_MSK,
1028 						sz << _CRYP_CR_NPBLB_OFF);
1029 			}
1030 
1031 			memset((uint8_t *)ctx->extra + ctx->extra_size, 0, sz);
1032 
1033 			res = write_align_block(ctx, ctx->extra);
1034 			if (res)
1035 				break;
1036 
1037 			/* Don't care {en,de}crypted data, already saved */
1038 			res = read_align_block(ctx, block_out);
1039 			if (res)
1040 				break;
1041 
1042 			ctx->load_len += (ctx->extra_size * INT8_BIT);
1043 			ctx->extra_size = 0;
1044 		}
1045 
1046 		/* Move to final phase */
1047 		io_clrsetbits32(ctx->base + _CRYP_CR, _CRYP_CR_GCM_CCMPH_MSK,
1048 				_CRYP_CR_GCM_CCMPH_FINAL <<
1049 				_CRYP_CR_GCM_CCMPH_OFF);
1050 		break;
1051 	default:
1052 		assert(0);
1053 		res = TEE_ERROR_BAD_STATE;
1054 	}
1055 
1056 	if (res)
1057 		goto out;
1058 
1059 	if (IS_ALGOMODE(ctx->cr, AES_GCM)) {
1060 		/* No need to htobe() as we configure the HW to swap bytes */
1061 		io_write32(ctx->base + _CRYP_DIN, 0U);
1062 		io_write32(ctx->base + _CRYP_DIN, ctx->assoc_len);
1063 		io_write32(ctx->base + _CRYP_DIN, 0U);
1064 		io_write32(ctx->base + _CRYP_DIN, ctx->load_len);
1065 	} else if (IS_ALGOMODE(ctx->cr, AES_CCM)) {
1066 		/* No need to htobe() in this phase */
1067 		res = write_align_block(ctx, ctx->ctr0_ccm);
1068 		if (res)
1069 			goto out;
1070 	}
1071 
1072 	res = read_align_block(ctx, tag_u32);
1073 	if (res)
1074 		goto out;
1075 
1076 	memcpy(tag, tag_u32, MIN(sizeof(tag_u32), tag_size));
1077 
1078 out:
1079 	cryp_end(ctx, res);
1080 	mutex_unlock(ctx->lock);
1081 
1082 	return res;
1083 }
1084 
1085 /**
1086  * @brief Update (or start) a de/encrypt process.
1087  * @param ctx: CRYP process context
1088  * @param last_block: true if last payload data block
1089  * @param data_in: pointer to payload
1090  * @param data_out: pointer where to save de/encrypted payload
1091  * @param data_size: payload size
1092  *
1093  * @retval TEE_SUCCESS if OK.
1094  */
1095 TEE_Result stm32_cryp_update(struct stm32_cryp_context *ctx, bool last_block,
1096 			     uint8_t *data_in, uint8_t *data_out,
1097 			     size_t data_size)
1098 {
1099 	TEE_Result res = TEE_SUCCESS;
1100 	unsigned int i = 0;
1101 
1102 	mutex_lock(ctx->lock);
1103 
1104 	/*
1105 	 * In CBC and ECB encryption we need to manage specifically last
1106 	 * 2 blocks if total size in not aligned to a block size.
1107 	 * Currently return TEE_ERROR_NOT_IMPLEMENTED. Moreover as we need to
1108 	 * know last 2 blocks, if unaligned and call with less than two blocks,
1109 	 * return TEE_ERROR_BAD_STATE.
1110 	 */
1111 	if (last_block && algo_mode_is_ecb_cbc(ctx->cr) &&
1112 	    is_encrypt(ctx->cr) &&
1113 	    (ROUNDDOWN(data_size, ctx->block_u32 * sizeof(uint32_t)) !=
1114 	     data_size)) {
1115 		if (data_size < ctx->block_u32 * sizeof(uint32_t) * 2) {
1116 			/*
1117 			 * If CBC, size of the last part should be at
1118 			 * least 2*BLOCK_SIZE
1119 			 */
1120 			EMSG("Unexpected last block size");
1121 			res = TEE_ERROR_BAD_STATE;
1122 			goto out;
1123 		}
1124 		/*
1125 		 * Moreover the ECB/CBC specific padding for encrypt is not
1126 		 * yet implemented, and not used in OPTEE
1127 		 */
1128 		res = TEE_ERROR_NOT_IMPLEMENTED;
1129 		goto out;
1130 	}
1131 
1132 	/* Manage remaining CTR mask from previous update call */
1133 	if (IS_ALGOMODE(ctx->cr, AES_CTR) && ctx->extra_size) {
1134 		unsigned int j = 0;
1135 		uint8_t *mask = (uint8_t *)ctx->extra;
1136 
1137 		for (j = 0; j < ctx->extra_size && i < data_size; j++, i++)
1138 			data_out[i] = data_in[i] ^ mask[j];
1139 
1140 		if (j != ctx->extra_size) {
1141 			/*
1142 			 * We didn't consume all saved mask,
1143 			 * but no more data.
1144 			 */
1145 
1146 			/* We save remaining mask and its new size */
1147 			memmove(ctx->extra, ctx->extra + j,
1148 				ctx->extra_size - j);
1149 			ctx->extra_size -= j;
1150 
1151 			/*
1152 			 * We don't need to save HW context we didn't
1153 			 * modify HW state.
1154 			 */
1155 			res = TEE_SUCCESS;
1156 			goto out;
1157 		}
1158 
1159 		/* All extra mask consumed */
1160 		ctx->extra_size = 0;
1161 	}
1162 
1163 	res = restore_context(ctx);
1164 	if (res)
1165 		goto out;
1166 
1167 	while (data_size - i >= ctx->block_u32 * sizeof(uint32_t)) {
1168 		/*
1169 		 * We only write/read one block at a time
1170 		 * but CRYP use a in (and out) FIFO of 8 * uint32_t
1171 		 */
1172 		res = write_block(ctx, data_in + i);
1173 		if (res)
1174 			goto out;
1175 
1176 		res = read_block(ctx, data_out + i);
1177 		if (res)
1178 			goto out;
1179 
1180 		/* Process next block */
1181 		i += ctx->block_u32 * sizeof(uint32_t);
1182 	}
1183 
1184 	/* Manage last block if not a block size multiple */
1185 	if (i < data_size) {
1186 		uint32_t block_in[MAX_BLOCK_NB_U32] = { 0 };
1187 		uint32_t block_out[MAX_BLOCK_NB_U32] = { 0 };
1188 
1189 		if (!IS_ALGOMODE(ctx->cr, AES_CTR)) {
1190 			/*
1191 			 * Other algorithm than CTR can manage only multiple
1192 			 * of block_size.
1193 			 */
1194 			res = TEE_ERROR_BAD_PARAMETERS;
1195 			goto out;
1196 		}
1197 
1198 		/*
1199 		 * For CTR we save the generated mask to use it at next
1200 		 * update call.
1201 		 */
1202 		memcpy(block_in, data_in + i, data_size - i);
1203 
1204 		res = write_align_block(ctx, block_in);
1205 		if (res)
1206 			goto out;
1207 
1208 		res = read_align_block(ctx, block_out);
1209 		if (res)
1210 			goto out;
1211 
1212 		memcpy(data_out + i, block_out, data_size - i);
1213 
1214 		/* Save mask for possibly next call */
1215 		ctx->extra_size = ctx->block_u32 * sizeof(uint32_t) -
1216 			(data_size - i);
1217 		memcpy(ctx->extra, (uint8_t *)block_out + data_size - i,
1218 		       ctx->extra_size);
1219 	}
1220 
1221 	if (!last_block)
1222 		res = save_context(ctx);
1223 
1224 out:
1225 	/* If last block or error, end of CRYP process */
1226 	if (last_block || res)
1227 		cryp_end(ctx, res);
1228 
1229 	mutex_unlock(ctx->lock);
1230 
1231 	return res;
1232 }
1233 
1234 static TEE_Result stm32_cryp_probe(const void *fdt, int node,
1235 				   const void *compt_data __unused)
1236 {
1237 	TEE_Result res = TEE_SUCCESS;
1238 	struct dt_node_info dt_cryp = { };
1239 	struct rstctrl *rstctrl = NULL;
1240 	struct clk *clk = NULL;
1241 
1242 	fdt_fill_device_info(fdt, &dt_cryp, node);
1243 
1244 	if (dt_cryp.reg == DT_INFO_INVALID_REG ||
1245 	    dt_cryp.reg_size == DT_INFO_INVALID_REG_SIZE)
1246 		panic();
1247 
1248 	res = clk_dt_get_by_index(fdt, node, 0, &clk);
1249 	if (res)
1250 		return res;
1251 
1252 	res = rstctrl_dt_get_by_index(fdt, node, 0, &rstctrl);
1253 	if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND)
1254 		return res;
1255 
1256 	cryp_pdata.clock = clk;
1257 	cryp_pdata.reset = rstctrl;
1258 	cryp_pdata.base.pa = dt_cryp.reg;
1259 
1260 	io_pa_or_va_secure(&cryp_pdata.base, dt_cryp.reg_size);
1261 	if (!cryp_pdata.base.va)
1262 		panic();
1263 
1264 	stm32mp_register_secure_periph_iomem(cryp_pdata.base.pa);
1265 
1266 	if (clk_enable(cryp_pdata.clock))
1267 		panic();
1268 
1269 	if (rstctrl && rstctrl_assert_to(cryp_pdata.reset, TIMEOUT_US_1MS))
1270 		panic();
1271 
1272 	if (rstctrl && rstctrl_deassert_to(cryp_pdata.reset, TIMEOUT_US_1MS))
1273 		panic();
1274 
1275 	if (IS_ENABLED(CFG_CRYPTO_DRV_AUTHENC)) {
1276 		res = stm32_register_authenc();
1277 		if (res) {
1278 			EMSG("Failed to register to authenc: %#"PRIx32, res);
1279 			panic();
1280 		}
1281 	}
1282 
1283 	if (IS_ENABLED(CFG_CRYPTO_DRV_CIPHER)) {
1284 		res = stm32_register_cipher(CRYP_IP);
1285 		if (res) {
1286 			EMSG("Failed to register to cipher: %#"PRIx32, res);
1287 			panic();
1288 		}
1289 	}
1290 
1291 	return TEE_SUCCESS;
1292 }
1293 
1294 static const struct dt_device_match stm32_cryp_match_table[] = {
1295 	{ .compatible = "st,stm32mp1-cryp" },
1296 	{ }
1297 };
1298 
1299 DEFINE_DT_DRIVER(stm32_cryp_dt_driver) = {
1300 	.name = "stm32-cryp",
1301 	.match_table = stm32_cryp_match_table,
1302 	.probe = stm32_cryp_probe,
1303 };
1304