xref: /optee_os/core/drivers/imx/dcp/dcp.c (revision 77bdbf67c42209142ef43129e01113d29d9c62f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2020 NXP
4  */
5 
6 #include <config.h>
7 #include <dcp_utils.h>
8 #include <drivers/imx/dcp.h>
9 #include <imx-regs.h>
10 #include <io.h>
11 #include <kernel/boot.h>
12 #include <kernel/dt.h>
13 #include <kernel/mutex.h>
14 #include <kernel/spinlock.h>
15 #include <libfdt.h>
16 #include <local.h>
17 #include <mm/core_memprot.h>
18 #include <tee/cache.h>
19 #include <utee_defines.h>
20 
21 static const uint8_t sha1_null_msg[] = {
22 	0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55,
23 	0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09,
24 };
25 
26 static const uint8_t sha256_null_msg[] = {
27 	0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4,
28 	0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b,
29 	0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
30 };
31 
32 static vaddr_t dcp_base;
33 static bool driver_initialized;
34 static unsigned int clk_refcount;
35 static unsigned int key_store_spinlock = SPINLOCK_UNLOCK;
36 static unsigned int clock_spinlock = SPINLOCK_UNLOCK;
37 static struct dcp_align_buf hw_context_buffer;
38 
39 static struct mutex lock_channel[DCP_NB_CHANNELS] = {
40 	[DCP_CHANN0] = MUTEX_INITIALIZER,
41 	[DCP_CHANN1] = MUTEX_INITIALIZER,
42 	[DCP_CHANN2] = MUTEX_INITIALIZER,
43 	[DCP_CHANN3] = MUTEX_INITIALIZER,
44 };
45 
46 static const struct dcp_hashalg hash_alg[2] = {
47 	[DCP_SHA1] = {
48 		.type = DCP_CONTROL1_HASH_SELECT_SHA1,
49 		.size = TEE_SHA1_HASH_SIZE,
50 	},
51 	[DCP_SHA256] = {
52 		.type = DCP_CONTROL1_HASH_SELECT_SHA256,
53 		.size = TEE_SHA256_HASH_SIZE,
54 	},
55 };
56 
57 /*
58  * Enable/disable DCP clock.
59  *
60  * @enable   Enable the clock if true, disable if false.
61  */
62 static void dcp_clk_enable(bool enable)
63 {
64 	vaddr_t ccm_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC);
65 	uint32_t clock_except = cpu_spin_lock_xsave(&clock_spinlock);
66 
67 	if (enable) {
68 		if (clk_refcount > 0) {
69 			clk_refcount++;
70 			goto out;
71 		} else {
72 			clk_refcount++;
73 			io_setbits32(ccm_base + CCM_CCGR0, DCP_CLK_ENABLE_MASK);
74 		}
75 	} else {
76 		assert(clk_refcount != 0);
77 
78 		clk_refcount--;
79 		if (clk_refcount > 0)
80 			goto out;
81 		else
82 			io_clrbits32(ccm_base + CCM_CCGR0, DCP_CLK_ENABLE_MASK);
83 	}
84 out:
85 	cpu_spin_unlock_xrestore(&clock_spinlock, clock_except);
86 }
87 
88 /*
89  * Lock the given channel with a mutex.
90  *
91  * @chan   DCP channel to lock
92  */
93 static TEE_Result dcp_lock_known_channel(enum dcp_channel chan)
94 {
95 	if (mutex_trylock(&lock_channel[chan]))
96 		return TEE_SUCCESS;
97 	else
98 		return TEE_ERROR_BUSY;
99 }
100 
101 /*
102  * Lock a DCP channel
103  *
104  * @channel    Pointer on operation channel parameter
105  */
106 static TEE_Result dcp_lock_channel(enum dcp_channel *channel)
107 {
108 	TEE_Result ret = TEE_ERROR_BUSY;
109 	enum dcp_channel chan = DCP_CHANN0;
110 
111 	for (chan = DCP_CHANN0; chan < DCP_NB_CHANNELS; chan++) {
112 		ret = dcp_lock_known_channel(chan);
113 		if (ret == TEE_SUCCESS) {
114 			*channel = chan;
115 			return ret;
116 		}
117 	}
118 
119 	EMSG("All channels are busy");
120 
121 	return ret;
122 }
123 
124 /*
125  * Unlock the given channel.
126  *
127  * @chan   DCP channel to unlock
128  */
129 static void dcp_unlock_channel(enum dcp_channel chan)
130 {
131 	mutex_unlock(&lock_channel[chan]);
132 }
133 
134 /*
135  * Start the DCP operation.
136  *
137  * @dcp_data   Structure containing dcp_descriptor configuration and channel to
138  *	       use.
139  */
140 static TEE_Result dcp_run(struct dcp_data *dcp_data)
141 {
142 	TEE_Result ret = TEE_SUCCESS;
143 	unsigned int timeout = 0;
144 	uint32_t val = 0;
145 
146 	dcp_data->desc.next = 0;
147 	cache_operation(TEE_CACHEFLUSH, &dcp_data->desc,
148 			sizeof(dcp_data->desc));
149 
150 	/* Enable clock if it's not done */
151 	dcp_clk_enable(true);
152 
153 	/* Clear DCP_STAT IRQ field for the channel used by the operation */
154 	io_clrbits32(dcp_base + DCP_STAT, BIT32(dcp_data->channel));
155 
156 	/* Clear CH_N_STAT to clear IRQ and error codes */
157 	io_write32(dcp_base + DCP_CH_N_STAT(dcp_data->channel), 0x0);
158 
159 	/* Update descriptor structure to be processed for the channel */
160 	io_write32(dcp_base + DCP_CH_N_CMDPTR(dcp_data->channel),
161 		   virt_to_phys(&dcp_data->desc));
162 
163 	/* Increment the semaphore to start the transfer */
164 	io_write32(dcp_base + DCP_CH_N_SEMA(dcp_data->channel), 0x1);
165 
166 	for (timeout = 0; timeout < DCP_MAX_TIMEOUT; timeout++) {
167 		dcp_udelay(10);
168 		val = io_read32(dcp_base + DCP_STAT);
169 		if (val & BIT(dcp_data->channel))
170 			break;
171 	}
172 
173 	if (timeout == DCP_MAX_TIMEOUT) {
174 		EMSG("Timeout elapsed before operation");
175 		ret = TEE_ERROR_GENERIC;
176 		goto out;
177 	}
178 
179 	val = io_read32(dcp_base + DCP_CH_N_STAT(dcp_data->channel));
180 	if (val & DCP_CH_STAT_ERROR_MASK) {
181 		EMSG("Error operation, 0x%" PRIx32, val);
182 		ret = TEE_ERROR_GENERIC;
183 	}
184 
185 out:
186 	dcp_clk_enable(false);
187 
188 	return ret;
189 }
190 
191 static TEE_Result dcp_cmac_subkey_generation(struct dcp_cipher_init *init,
192 					     uint8_t *k1, uint8_t *k2)
193 {
194 	TEE_Result ret = TEE_ERROR_GENERIC;
195 	struct dcp_cipher_data data = { };
196 	uint8_t l[16] = { };
197 	uint8_t tmp[16] = { };
198 	uint8_t const_zero[16] = { };
199 	uint8_t const_rb[16] = { [15] = 0x87 };
200 
201 	ret = dcp_cipher_do_init(&data, init);
202 	if (ret != TEE_SUCCESS)
203 		return ret;
204 
205 	ret = dcp_cipher_do_update(&data, const_zero, l, sizeof(l));
206 	if (ret != TEE_SUCCESS)
207 		goto out;
208 
209 	if ((l[0] & BIT(7)) == 0) {
210 		dcp_left_shift_buffer(l, k1, 16);
211 	} else {
212 		dcp_left_shift_buffer(l, tmp, 16);
213 		dcp_xor(tmp, const_rb, k1, 16);
214 	}
215 
216 	if ((k1[0] & BIT(7)) == 0) {
217 		dcp_left_shift_buffer(k1, k2, 16);
218 	} else {
219 		dcp_left_shift_buffer(k1, tmp, 16);
220 		dcp_xor(tmp, const_rb, k2, 16);
221 	}
222 
223 	ret = TEE_SUCCESS;
224 out:
225 	dcp_cipher_do_final(&data);
226 
227 	return ret;
228 }
229 
230 TEE_Result dcp_store_key(uint32_t *key, unsigned int index)
231 {
232 	uint32_t val = 0;
233 	unsigned int i = 0;
234 	uint32_t key_store_except = 0;
235 
236 	if (!key)
237 		return TEE_ERROR_BAD_PARAMETERS;
238 
239 	if (index > DCP_SRAM_KEY_NB_SUBWORD - 1) {
240 		EMSG("Bad parameters, index must be < %u",
241 		     DCP_SRAM_KEY_NB_SUBWORD);
242 		return TEE_ERROR_BAD_PARAMETERS;
243 	}
244 
245 	key_store_except = cpu_spin_lock_xsave(&key_store_spinlock);
246 
247 	dcp_clk_enable(true);
248 
249 	val = DCP_SRAM_KEY_INDEX(index);
250 	io_write32(dcp_base + DCP_KEY, val);
251 
252 	/*
253 	 * Key is stored as four uint32 values, starting with subword0
254 	 * (least-significant word)
255 	 */
256 	for (i = 0; i < DCP_SRAM_KEY_NB_SUBWORD; i++) {
257 		val = TEE_U32_TO_BIG_ENDIAN(key[i]);
258 		io_write32(dcp_base + DCP_KEYDATA, val);
259 	}
260 
261 	dcp_clk_enable(false);
262 
263 	cpu_spin_unlock_xrestore(&key_store_spinlock, key_store_except);
264 
265 	return TEE_SUCCESS;
266 }
267 
268 TEE_Result dcp_cmac(struct dcp_cipher_init *init, uint8_t *input,
269 		    size_t input_size, uint8_t *output)
270 {
271 	TEE_Result ret = TEE_ERROR_GENERIC;
272 	uint8_t key1[DCP_AES128_KEY_SIZE] = { };
273 	uint8_t key2[DCP_AES128_KEY_SIZE] = { };
274 	unsigned int nb_blocks = 0;
275 	bool block_complete = false;
276 	struct dcp_cipher_data data = { };
277 	uint8_t y[DCP_AES128_BLOCK_SIZE] = { };
278 	uint8_t x[DCP_AES128_BLOCK_SIZE] = { };
279 	uint8_t last[DCP_AES128_BLOCK_SIZE] = { };
280 	unsigned int i = 0;
281 	uint8_t offset = 0;
282 
283 	if (!output || !init)
284 		return TEE_ERROR_BAD_PARAMETERS;
285 
286 	if (!input && input_size)
287 		return TEE_ERROR_BAD_PARAMETERS;
288 
289 	ret = dcp_cipher_do_init(&data, init);
290 	if (ret != TEE_SUCCESS) {
291 		ret = TEE_ERROR_OUT_OF_MEMORY;
292 		goto out;
293 	}
294 
295 	/* Generate CMAC subkeys */
296 	ret = dcp_cmac_subkey_generation(init, key1, key2);
297 	if (ret != TEE_SUCCESS)
298 		goto out;
299 
300 	/* Get number of block */
301 	nb_blocks = ROUNDUP(input_size, DCP_AES128_BLOCK_SIZE) /
302 		    DCP_AES128_BLOCK_SIZE;
303 
304 	block_complete = nb_blocks && !(input_size % DCP_AES128_BLOCK_SIZE);
305 	if (nb_blocks == 0)
306 		nb_blocks = 1;
307 
308 	for (i = 0; i < nb_blocks - 1; i++) {
309 		dcp_xor(x, input + offset, y, DCP_AES128_BLOCK_SIZE);
310 		ret = dcp_cipher_do_update(&data, y, x,
311 					   DCP_AES128_BLOCK_SIZE);
312 		if (ret)
313 			goto out;
314 		offset += DCP_AES128_BLOCK_SIZE;
315 	}
316 
317 	/* Process the last block */
318 	memcpy(last, input + offset, input_size - offset);
319 
320 	if (block_complete) {
321 		dcp_xor(last, key1, last, DCP_AES128_BLOCK_SIZE);
322 	} else {
323 		dcp_cmac_padding(last, input_size % DCP_AES128_BLOCK_SIZE);
324 		dcp_xor(last, key2, last, DCP_AES128_BLOCK_SIZE);
325 	}
326 
327 	dcp_xor(x, last, y, DCP_AES128_BLOCK_SIZE);
328 	ret = dcp_cipher_do_update(&data, y, x,
329 				   DCP_AES128_BLOCK_SIZE);
330 	if (ret)
331 		goto out;
332 
333 	memcpy(output, x, DCP_AES128_BLOCK_SIZE);
334 
335 out:
336 	dcp_cipher_do_final(&data);
337 
338 	return ret;
339 }
340 
341 TEE_Result dcp_cipher_do_init(struct dcp_cipher_data *data,
342 			      struct dcp_cipher_init *init)
343 {
344 	struct dcp_descriptor *desc = NULL;
345 	TEE_Result ret = TEE_ERROR_GENERIC;
346 
347 	if (!init || !data)
348 		return TEE_ERROR_BAD_PARAMETERS;
349 
350 	ret = dcp_lock_channel(&data->dcp_data.channel);
351 	if (ret != TEE_SUCCESS)
352 		return ret;
353 
354 	desc = &data->dcp_data.desc;
355 
356 	desc->ctrl0 = DCP_CONTROL0_DECR_SEMAPHORE | DCP_CONTROL0_ENABLE_CIPHER |
357 		      DCP_CONTROL0_INTERRUPT_ENABLE;
358 	desc->ctrl1 = DCP_CONTROL1_CIPHER_SELECT_AES128;
359 
360 	if (init->op == DCP_ENCRYPT)
361 		desc->ctrl0 |= DCP_CONTROL0_CIPHER_ENCRYPT;
362 
363 	if (init->key_mode == DCP_OTP) {
364 		desc->ctrl0 |= DCP_CONTROL0_OTP_KEY;
365 		desc->ctrl1 |= DCP_CONTROL1_KEY_SELECT_OTP_CRYPTO;
366 	} else if (init->key_mode == DCP_PAYLOAD) {
367 		desc->ctrl0 |= DCP_CONTROL0_PAYLOAD_KEY;
368 		if (!init->key)
369 			return TEE_ERROR_BAD_PARAMETERS;
370 		memcpy(data->key, init->key, DCP_AES128_KEY_SIZE);
371 	} else {
372 		desc->ctrl1 |= SHIFT_U32(init->key_mode, 8);
373 	}
374 
375 	if (init->mode == DCP_CBC) {
376 		desc->ctrl0 |= DCP_CONTROL0_CIPHER_INIT;
377 		desc->ctrl1 |= DCP_CONTROL1_CIPHER_MODE_CBC;
378 		if (!init->iv)
379 			return TEE_ERROR_BAD_PARAMETERS;
380 		memcpy(data->iv, init->iv, DCP_AES128_IV_SIZE);
381 	}
382 
383 	/* Allocate aligned buffer for dcp iv and key */
384 	ret = dcp_calloc_align_buf(&data->payload,
385 				   DCP_AES128_IV_SIZE + DCP_AES128_KEY_SIZE);
386 	if (ret != TEE_SUCCESS)
387 		return ret;
388 
389 	desc->src_buffer = 0;
390 	desc->dest_buffer = 0;
391 	desc->status = 0;
392 	desc->buff_size = 0;
393 	desc->next = virt_to_phys(desc);
394 
395 	data->initialized = true;
396 
397 	return ret;
398 }
399 
400 TEE_Result dcp_cipher_do_update(struct dcp_cipher_data *data,
401 				const uint8_t *src, uint8_t *dst, size_t size)
402 {
403 	TEE_Result ret = TEE_ERROR_GENERIC;
404 	struct dcp_align_buf output = { };
405 	struct dcp_align_buf input = { };
406 	struct dcp_descriptor *desc = NULL;
407 
408 	if (!data || !src || !dst)
409 		return TEE_ERROR_BAD_PARAMETERS;
410 
411 	if (!data->initialized) {
412 		EMSG("Error, please call dcp_aes_do_init() before");
413 		return TEE_ERROR_BAD_STATE;
414 	}
415 
416 	if (size % DCP_AES128_BLOCK_SIZE) {
417 		EMSG("Input size has to be a multiple of %zu bytes",
418 		     DCP_AES128_BLOCK_SIZE);
419 		return TEE_ERROR_BAD_PARAMETERS;
420 	}
421 
422 	ret = dcp_calloc_align_buf(&output, size);
423 	if (ret != TEE_SUCCESS)
424 		goto out;
425 
426 	ret = dcp_calloc_align_buf(&input, size);
427 	if (ret != TEE_SUCCESS)
428 		goto out;
429 
430 	desc = &data->dcp_data.desc;
431 
432 	/* Copy input data */
433 	memcpy(input.data, src, size);
434 
435 	/* Copy key and IV */
436 	memcpy(data->payload.data, data->key, DCP_AES128_KEY_SIZE);
437 	data->payload_size = DCP_AES128_KEY_SIZE;
438 	if (desc->ctrl0 & DCP_CONTROL0_CIPHER_INIT) {
439 		memcpy(data->payload.data + DCP_AES128_KEY_SIZE, data->iv,
440 		       DCP_AES128_IV_SIZE);
441 		data->payload_size += DCP_AES128_IV_SIZE;
442 	}
443 
444 	desc->src_buffer = input.paddr;
445 	desc->dest_buffer = output.paddr;
446 	desc->payload = data->payload.paddr;
447 	desc->buff_size = size;
448 
449 	cache_operation(TEE_CACHECLEAN, data->payload.data,
450 			data->payload_size);
451 	cache_operation(TEE_CACHECLEAN, input.data, size);
452 	cache_operation(TEE_CACHEINVALIDATE, output.data, size);
453 
454 	ret = dcp_run(&data->dcp_data);
455 	if (ret)
456 		goto out;
457 
458 	cache_operation(TEE_CACHEINVALIDATE, output.data, size);
459 
460 	desc->ctrl0 &= ~DCP_CONTROL0_CIPHER_INIT;
461 
462 	memcpy(dst, output.data, size);
463 out:
464 	dcp_free(&output);
465 	dcp_free(&input);
466 
467 	return ret;
468 }
469 
470 void dcp_cipher_do_final(struct dcp_cipher_data *data)
471 {
472 	if (data)
473 		data->initialized = false;
474 
475 	dcp_free(&data->payload);
476 	dcp_unlock_channel(data->dcp_data.channel);
477 }
478 
479 TEE_Result dcp_sha_do_init(struct dcp_hash_data *hashdata)
480 {
481 	struct dcp_descriptor *desc = NULL;
482 	TEE_Result ret = TEE_ERROR_GENERIC;
483 
484 	if (!hashdata) {
485 		EMSG("Bad parameters, hashdata is NULL");
486 		return TEE_ERROR_BAD_PARAMETERS;
487 	}
488 
489 	desc = &hashdata->dcp_data.desc;
490 
491 	/* DCP descriptor init */
492 	desc->status = 0;
493 	desc->payload = 0;
494 	desc->dest_buffer = 0;
495 	desc->ctrl0 = DCP_CONTROL0_ENABLE_HASH | DCP_CONTROL0_INTERRUPT_ENABLE |
496 		      DCP_CONTROL0_DECR_SEMAPHORE | DCP_CONTROL0_HASH_INIT;
497 	desc->ctrl1 = hash_alg[hashdata->alg].type;
498 	desc->buff_size = 0;
499 	desc->next = 0;
500 	desc->src_buffer = 0;
501 
502 	ret = dcp_lock_channel(&hashdata->dcp_data.channel);
503 	if (ret != TEE_SUCCESS) {
504 		EMSG("Channel is busy, can't start operation now");
505 		return ret;
506 	}
507 
508 	/* Allocate context data */
509 	ret = dcp_calloc_align_buf(&hashdata->ctx, DCP_SHA_BLOCK_SIZE);
510 	if (ret != TEE_SUCCESS)
511 		return ret;
512 
513 	hashdata->initialized = true;
514 	hashdata->ctx_size = 0;
515 
516 	return ret;
517 }
518 
519 TEE_Result dcp_sha_do_update(struct dcp_hash_data *hashdata,
520 			     const uint8_t *data, size_t len)
521 {
522 	TEE_Result ret = TEE_ERROR_GENERIC;
523 	struct dcp_descriptor *desc = NULL;
524 	struct dcp_align_buf input = { };
525 	uint32_t offset = 0;
526 	uint32_t nb_blocks = 0;
527 	size_t size_todo = 0;
528 	size_t size_left = 0;
529 	size_t size_total = 0;
530 
531 	if (!hashdata || !data || !len)
532 		return TEE_ERROR_BAD_PARAMETERS;
533 
534 	if (!hashdata->initialized) {
535 		EMSG("hashdata is uninitialized");
536 		return TEE_ERROR_BAD_STATE;
537 	}
538 
539 	/* Get number of blocks */
540 	if (ADD_OVERFLOW(hashdata->ctx_size, len, &size_total))
541 		return TEE_ERROR_BAD_PARAMETERS;
542 
543 	nb_blocks = size_total / DCP_SHA_BLOCK_SIZE;
544 	size_todo = nb_blocks * DCP_SHA_BLOCK_SIZE;
545 	size_left = len - size_todo + hashdata->ctx_size;
546 	desc = &hashdata->dcp_data.desc;
547 
548 	if (size_todo) {
549 		/* Allocate buffer as input */
550 		ret = dcp_calloc_align_buf(&input, size_todo);
551 		if (ret != TEE_SUCCESS)
552 			return ret;
553 
554 		/* Copy previous data if any */
555 		offset = size_todo - hashdata->ctx_size;
556 		memcpy(input.data, hashdata->ctx.data, hashdata->ctx_size);
557 		memcpy(input.data + hashdata->ctx_size, data, offset);
558 		hashdata->ctx_size = 0;
559 
560 		desc->src_buffer = input.paddr;
561 		desc->buff_size = size_todo;
562 
563 		cache_operation(TEE_CACHECLEAN, input.data, size_todo);
564 
565 		ret = dcp_run(&hashdata->dcp_data);
566 		desc->ctrl0 &= ~DCP_CONTROL0_HASH_INIT;
567 
568 		dcp_free(&input);
569 	} else {
570 		size_left = len;
571 		offset = 0;
572 		ret = TEE_SUCCESS;
573 	}
574 
575 	/* Save any data left */
576 	memcpy(hashdata->ctx.data + hashdata->ctx_size, data + offset,
577 	       size_left);
578 	hashdata->ctx_size += size_left;
579 
580 	return ret;
581 }
582 
583 TEE_Result dcp_sha_do_final(struct dcp_hash_data *hashdata, uint8_t *digest,
584 			    size_t digest_size)
585 {
586 	TEE_Result ret = TEE_ERROR_GENERIC;
587 	size_t payload_size = 0;
588 	struct dcp_descriptor *desc = NULL;
589 	struct dcp_align_buf payload = { };
590 
591 	if (!hashdata || !digest)
592 		return TEE_ERROR_BAD_PARAMETERS;
593 
594 	if (!hashdata->initialized) {
595 		EMSG("hashdata is uninitialized");
596 		return TEE_ERROR_BAD_STATE;
597 	}
598 
599 	if (digest_size < hash_alg[hashdata->alg].size) {
600 		EMSG("Digest buffer size is to small, should be %" PRId32,
601 		     hash_alg[hashdata->alg].size);
602 		return TEE_ERROR_BAD_PARAMETERS;
603 	}
604 
605 	desc = &hashdata->dcp_data.desc;
606 	payload_size = hash_alg[hashdata->alg].size;
607 
608 	/* Handle the case where the input message is NULL */
609 	if ((desc->ctrl0 & DCP_CONTROL0_HASH_INIT) && hashdata->ctx_size == 0) {
610 		if (hashdata->alg == DCP_SHA1)
611 			memcpy(digest, sha1_null_msg, payload_size);
612 		if (hashdata->alg == DCP_SHA256)
613 			memcpy(digest, sha256_null_msg, payload_size);
614 		ret = TEE_SUCCESS;
615 	} else {
616 		/* Allocate buffer for the digest */
617 		ret = dcp_calloc_align_buf(&payload, payload_size);
618 		if (ret != TEE_SUCCESS)
619 			return ret;
620 
621 		/* Set work packet for last iteration */
622 		desc->ctrl0 |= DCP_CONTROL0_HASH_TERM;
623 		desc->src_buffer = hashdata->ctx.paddr;
624 		desc->buff_size = hashdata->ctx_size;
625 		desc->payload = payload.paddr;
626 
627 		cache_operation(TEE_CACHECLEAN, hashdata->ctx.data,
628 				hashdata->ctx_size);
629 		cache_operation(TEE_CACHEINVALIDATE, payload.data,
630 				payload_size);
631 
632 		ret = dcp_run(&hashdata->dcp_data);
633 
634 		/* Copy the result */
635 		cache_operation(TEE_CACHEINVALIDATE, payload.data,
636 				payload_size);
637 		/* DCP payload result is flipped */
638 		dcp_reverse(payload.data, digest, payload_size);
639 
640 		dcp_free(&payload);
641 	}
642 
643 	dcp_free(&hashdata->ctx);
644 
645 	/* Reset hashdata strcuture */
646 	hashdata->initialized = false;
647 
648 	dcp_unlock_channel(hashdata->dcp_data.channel);
649 
650 	return ret;
651 }
652 
653 #ifdef CFG_DT
654 static const char *const dt_ctrl_match_table[] = {
655 	"fsl,imx28-dcp",
656 	"fsl,imx6sl-dcp",
657 };
658 
659 /*
660  * Fetch DCP base address from DT
661  *
662  * @base        [out] DCP base address
663  */
664 static TEE_Result dcp_pbase(paddr_t *base)
665 {
666 	void *fdt = NULL;
667 	int node = -1;
668 	unsigned int i = 0;
669 
670 	fdt = get_dt();
671 	if (!fdt) {
672 		EMSG("DTB no present");
673 		return TEE_ERROR_ITEM_NOT_FOUND;
674 	}
675 
676 	for (i = 0; i < ARRAY_SIZE(dt_ctrl_match_table); i++) {
677 		node = fdt_node_offset_by_compatible(fdt, 0,
678 						     dt_ctrl_match_table[i]);
679 		if (node >= 0)
680 			break;
681 	}
682 
683 	if (node < 0) {
684 		EMSG("DCP node not found err = %d", node);
685 		return TEE_ERROR_ITEM_NOT_FOUND;
686 	}
687 
688 	if (_fdt_get_status(fdt, node) == DT_STATUS_DISABLED)
689 		return TEE_ERROR_ITEM_NOT_FOUND;
690 
691 	/* Force secure-status = "okay" and status="disabled" */
692 	if (dt_enable_secure_status(fdt, node)) {
693 		EMSG("Not able to set DCP Control DTB entry secure");
694 		return TEE_ERROR_NOT_SUPPORTED;
695 	}
696 
697 	*base = _fdt_reg_base_address(fdt, node);
698 	if (*base == DT_INFO_INVALID_REG) {
699 		EMSG("Unable to get the DCP Base address");
700 		return TEE_ERROR_ITEM_NOT_FOUND;
701 	}
702 
703 	return TEE_SUCCESS;
704 }
705 #endif /* CFG_DT */
706 
707 TEE_Result dcp_init(void)
708 {
709 	TEE_Result ret = TEE_ERROR_GENERIC;
710 	paddr_t pbase = 0;
711 
712 	if (driver_initialized)
713 		return TEE_SUCCESS;
714 
715 	dcp_clk_enable(true);
716 
717 	ret = dcp_pbase(&pbase);
718 	if (ret != TEE_SUCCESS)
719 		pbase = DCP_BASE;
720 
721 	dcp_base = core_mmu_get_va(pbase, MEM_AREA_IO_SEC);
722 	if (!dcp_base) {
723 		EMSG("Unable to get DCP physical address");
724 		return TEE_ERROR_ITEM_NOT_FOUND;
725 	}
726 
727 	/* Context switching buffer memory allocation */
728 	ret = dcp_calloc_align_buf(&hw_context_buffer, DCP_CONTEXT_BUFFER_SIZE);
729 	if (ret != TEE_SUCCESS) {
730 		EMSG("hw_context_buffer allocation failed");
731 		return ret;
732 	}
733 
734 	/*
735 	 * Initialize control register.
736 	 * Enable normal DCP operation (SFTRST & CLKGATE bits set to 0)
737 	 */
738 	io_write32(dcp_base + DCP_CTRL_CLR, DCP_CTRL_SFTRST | DCP_CTRL_CLKGATE);
739 
740 	io_write32(dcp_base + DCP_CTRL_SET,
741 		   DCP_CTRL_GATHER_RESIDUAL_WRITES |
742 			   DCP_CTRL_ENABLE_CONTEXT_SWITCHING);
743 
744 	/* Enable all DCP channels */
745 	io_write32(dcp_base + DCP_CHANNELCTRL,
746 		   DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK);
747 
748 	/* Clear DCP_STAT register */
749 	io_write32(dcp_base + DCP_STAT_CLR, DCP_STAT_CLEAR);
750 
751 	/* Copy context switching buffer address in DCP_CONTEXT register */
752 	io_write32(dcp_base + DCP_CONTEXT, (uint32_t)hw_context_buffer.paddr);
753 
754 	driver_initialized = true;
755 
756 	dcp_clk_enable(false);
757 
758 	return ret;
759 }
760