1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright 2020 NXP
4 */
5
6 #include <config.h>
7 #include <dcp_utils.h>
8 #include <drivers/imx/dcp.h>
9 #include <imx-regs.h>
10 #include <io.h>
11 #include <kernel/boot.h>
12 #include <kernel/dt.h>
13 #include <kernel/mutex.h>
14 #include <kernel/spinlock.h>
15 #include <libfdt.h>
16 #include <local.h>
17 #include <mm/core_memprot.h>
18 #include <tee/cache.h>
19 #include <utee_defines.h>
20
21 static const uint8_t sha1_null_msg[] = {
22 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55,
23 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09,
24 };
25
26 static const uint8_t sha256_null_msg[] = {
27 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4,
28 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b,
29 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
30 };
31
32 static vaddr_t dcp_base;
33 static bool driver_initialized;
34 static unsigned int clk_refcount;
35 static unsigned int key_store_spinlock = SPINLOCK_UNLOCK;
36 static unsigned int clock_spinlock = SPINLOCK_UNLOCK;
37 static struct dcp_align_buf hw_context_buffer;
38
39 static struct mutex lock_channel[DCP_NB_CHANNELS] = {
40 [DCP_CHANN0] = MUTEX_INITIALIZER,
41 [DCP_CHANN1] = MUTEX_INITIALIZER,
42 [DCP_CHANN2] = MUTEX_INITIALIZER,
43 [DCP_CHANN3] = MUTEX_INITIALIZER,
44 };
45
46 static const struct dcp_hashalg hash_alg[2] = {
47 [DCP_SHA1] = {
48 .type = DCP_CONTROL1_HASH_SELECT_SHA1,
49 .size = TEE_SHA1_HASH_SIZE,
50 },
51 [DCP_SHA256] = {
52 .type = DCP_CONTROL1_HASH_SELECT_SHA256,
53 .size = TEE_SHA256_HASH_SIZE,
54 },
55 };
56
57 /*
58 * Enable/disable DCP clock.
59 *
60 * @enable Enable the clock if true, disable if false.
61 */
dcp_clk_enable(bool enable)62 static void dcp_clk_enable(bool enable)
63 {
64 vaddr_t ccm_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC,
65 CCM_CCGR0 + sizeof(uint32_t));
66 uint32_t clock_except = cpu_spin_lock_xsave(&clock_spinlock);
67
68 if (enable) {
69 if (clk_refcount > 0) {
70 clk_refcount++;
71 goto out;
72 } else {
73 clk_refcount++;
74 io_setbits32(ccm_base + CCM_CCGR0, DCP_CLK_ENABLE_MASK);
75 }
76 } else {
77 assert(clk_refcount != 0);
78
79 clk_refcount--;
80 if (clk_refcount > 0)
81 goto out;
82 else
83 io_clrbits32(ccm_base + CCM_CCGR0, DCP_CLK_ENABLE_MASK);
84 }
85 out:
86 cpu_spin_unlock_xrestore(&clock_spinlock, clock_except);
87 }
88
89 /*
90 * Lock the given channel with a mutex.
91 *
92 * @chan DCP channel to lock
93 */
dcp_lock_known_channel(enum dcp_channel chan)94 static TEE_Result dcp_lock_known_channel(enum dcp_channel chan)
95 {
96 if (mutex_trylock(&lock_channel[chan]))
97 return TEE_SUCCESS;
98 else
99 return TEE_ERROR_BUSY;
100 }
101
102 /*
103 * Lock a DCP channel
104 *
105 * @channel Pointer on operation channel parameter
106 */
dcp_lock_channel(enum dcp_channel * channel)107 static TEE_Result dcp_lock_channel(enum dcp_channel *channel)
108 {
109 TEE_Result ret = TEE_ERROR_BUSY;
110 enum dcp_channel chan = DCP_CHANN0;
111
112 for (chan = DCP_CHANN0; chan < DCP_NB_CHANNELS; chan++) {
113 ret = dcp_lock_known_channel(chan);
114 if (ret == TEE_SUCCESS) {
115 *channel = chan;
116 return ret;
117 }
118 }
119
120 EMSG("All channels are busy");
121
122 return ret;
123 }
124
125 /*
126 * Unlock the given channel.
127 *
128 * @chan DCP channel to unlock
129 */
dcp_unlock_channel(enum dcp_channel chan)130 static void dcp_unlock_channel(enum dcp_channel chan)
131 {
132 mutex_unlock(&lock_channel[chan]);
133 }
134
135 /*
136 * Start the DCP operation.
137 *
138 * @dcp_data Structure containing dcp_descriptor configuration and channel to
139 * use.
140 */
dcp_run(struct dcp_data * dcp_data)141 static TEE_Result dcp_run(struct dcp_data *dcp_data)
142 {
143 TEE_Result ret = TEE_SUCCESS;
144 unsigned int timeout = 0;
145 uint32_t val = 0;
146
147 dcp_data->desc.next = 0;
148 cache_operation(TEE_CACHEFLUSH, &dcp_data->desc,
149 sizeof(dcp_data->desc));
150
151 /* Enable clock if it's not done */
152 dcp_clk_enable(true);
153
154 /* Clear DCP_STAT IRQ field for the channel used by the operation */
155 io_clrbits32(dcp_base + DCP_STAT, BIT32(dcp_data->channel));
156
157 /* Clear CH_N_STAT to clear IRQ and error codes */
158 io_write32(dcp_base + DCP_CH_N_STAT(dcp_data->channel), 0x0);
159
160 /* Update descriptor structure to be processed for the channel */
161 io_write32(dcp_base + DCP_CH_N_CMDPTR(dcp_data->channel),
162 virt_to_phys(&dcp_data->desc));
163
164 /* Increment the semaphore to start the transfer */
165 io_write32(dcp_base + DCP_CH_N_SEMA(dcp_data->channel), 0x1);
166
167 for (timeout = 0; timeout < DCP_MAX_TIMEOUT; timeout++) {
168 dcp_udelay(10);
169 val = io_read32(dcp_base + DCP_STAT);
170 if (val & BIT(dcp_data->channel))
171 break;
172 }
173
174 if (timeout == DCP_MAX_TIMEOUT) {
175 EMSG("Timeout elapsed before operation");
176 ret = TEE_ERROR_GENERIC;
177 goto out;
178 }
179
180 val = io_read32(dcp_base + DCP_CH_N_STAT(dcp_data->channel));
181 if (val & DCP_CH_STAT_ERROR_MASK) {
182 EMSG("Error operation, 0x%" PRIx32, val);
183 ret = TEE_ERROR_GENERIC;
184 }
185
186 out:
187 dcp_clk_enable(false);
188
189 return ret;
190 }
191
dcp_cmac_subkey_generation(struct dcp_cipher_init * init,uint8_t * k1,uint8_t * k2)192 static TEE_Result dcp_cmac_subkey_generation(struct dcp_cipher_init *init,
193 uint8_t *k1, uint8_t *k2)
194 {
195 TEE_Result ret = TEE_ERROR_GENERIC;
196 struct dcp_cipher_data data = { };
197 uint8_t l[16] = { };
198 uint8_t tmp[16] = { };
199 uint8_t const_zero[16] = { };
200 uint8_t const_rb[16] = { [15] = 0x87 };
201
202 ret = dcp_cipher_do_init(&data, init);
203 if (ret != TEE_SUCCESS)
204 return ret;
205
206 ret = dcp_cipher_do_update(&data, const_zero, l, sizeof(l));
207 if (ret != TEE_SUCCESS)
208 goto out;
209
210 if ((l[0] & BIT(7)) == 0) {
211 dcp_left_shift_buffer(l, k1, 16);
212 } else {
213 dcp_left_shift_buffer(l, tmp, 16);
214 dcp_xor(tmp, const_rb, k1, 16);
215 }
216
217 if ((k1[0] & BIT(7)) == 0) {
218 dcp_left_shift_buffer(k1, k2, 16);
219 } else {
220 dcp_left_shift_buffer(k1, tmp, 16);
221 dcp_xor(tmp, const_rb, k2, 16);
222 }
223
224 ret = TEE_SUCCESS;
225 out:
226 dcp_cipher_do_final(&data);
227
228 return ret;
229 }
230
dcp_store_key(uint32_t * key,unsigned int index)231 TEE_Result dcp_store_key(uint32_t *key, unsigned int index)
232 {
233 uint32_t val = 0;
234 unsigned int i = 0;
235 uint32_t key_store_except = 0;
236
237 if (!key)
238 return TEE_ERROR_BAD_PARAMETERS;
239
240 if (index > DCP_SRAM_KEY_NB_SUBWORD - 1) {
241 EMSG("Bad parameters, index must be < %u",
242 DCP_SRAM_KEY_NB_SUBWORD);
243 return TEE_ERROR_BAD_PARAMETERS;
244 }
245
246 key_store_except = cpu_spin_lock_xsave(&key_store_spinlock);
247
248 dcp_clk_enable(true);
249
250 val = DCP_SRAM_KEY_INDEX(index);
251 io_write32(dcp_base + DCP_KEY, val);
252
253 /*
254 * Key is stored as four uint32 values, starting with subword0
255 * (least-significant word)
256 */
257 for (i = 0; i < DCP_SRAM_KEY_NB_SUBWORD; i++) {
258 val = TEE_U32_TO_BIG_ENDIAN(key[i]);
259 io_write32(dcp_base + DCP_KEYDATA, val);
260 }
261
262 dcp_clk_enable(false);
263
264 cpu_spin_unlock_xrestore(&key_store_spinlock, key_store_except);
265
266 return TEE_SUCCESS;
267 }
268
dcp_cmac(struct dcp_cipher_init * init,uint8_t * input,size_t input_size,uint8_t * output)269 TEE_Result dcp_cmac(struct dcp_cipher_init *init, uint8_t *input,
270 size_t input_size, uint8_t *output)
271 {
272 TEE_Result ret = TEE_ERROR_GENERIC;
273 uint8_t key1[DCP_AES128_KEY_SIZE] = { };
274 uint8_t key2[DCP_AES128_KEY_SIZE] = { };
275 unsigned int nb_blocks = 0;
276 bool block_complete = false;
277 struct dcp_cipher_data data = { };
278 uint8_t y[DCP_AES128_BLOCK_SIZE] = { };
279 uint8_t x[DCP_AES128_BLOCK_SIZE] = { };
280 uint8_t last[DCP_AES128_BLOCK_SIZE] = { };
281 unsigned int i = 0;
282 uint8_t offset = 0;
283
284 if (!output || !init)
285 return TEE_ERROR_BAD_PARAMETERS;
286
287 if (!input && input_size)
288 return TEE_ERROR_BAD_PARAMETERS;
289
290 ret = dcp_cipher_do_init(&data, init);
291 if (ret != TEE_SUCCESS) {
292 ret = TEE_ERROR_OUT_OF_MEMORY;
293 goto out;
294 }
295
296 /* Generate CMAC subkeys */
297 ret = dcp_cmac_subkey_generation(init, key1, key2);
298 if (ret != TEE_SUCCESS)
299 goto out;
300
301 /* Get number of block */
302 nb_blocks = ROUNDUP_DIV(input_size, DCP_AES128_BLOCK_SIZE);
303
304 block_complete = nb_blocks && !(input_size % DCP_AES128_BLOCK_SIZE);
305 if (nb_blocks == 0)
306 nb_blocks = 1;
307
308 for (i = 0; i < nb_blocks - 1; i++) {
309 dcp_xor(x, input + offset, y, DCP_AES128_BLOCK_SIZE);
310 ret = dcp_cipher_do_update(&data, y, x,
311 DCP_AES128_BLOCK_SIZE);
312 if (ret)
313 goto out;
314 offset += DCP_AES128_BLOCK_SIZE;
315 }
316
317 /* Process the last block */
318 memcpy(last, input + offset, input_size - offset);
319
320 if (block_complete) {
321 dcp_xor(last, key1, last, DCP_AES128_BLOCK_SIZE);
322 } else {
323 dcp_cmac_padding(last, input_size % DCP_AES128_BLOCK_SIZE);
324 dcp_xor(last, key2, last, DCP_AES128_BLOCK_SIZE);
325 }
326
327 dcp_xor(x, last, y, DCP_AES128_BLOCK_SIZE);
328 ret = dcp_cipher_do_update(&data, y, x,
329 DCP_AES128_BLOCK_SIZE);
330 if (ret)
331 goto out;
332
333 memcpy(output, x, DCP_AES128_BLOCK_SIZE);
334
335 out:
336 dcp_cipher_do_final(&data);
337
338 return ret;
339 }
340
dcp_cipher_do_init(struct dcp_cipher_data * data,struct dcp_cipher_init * init)341 TEE_Result dcp_cipher_do_init(struct dcp_cipher_data *data,
342 struct dcp_cipher_init *init)
343 {
344 struct dcp_descriptor *desc = NULL;
345 TEE_Result ret = TEE_ERROR_GENERIC;
346
347 if (!init || !data)
348 return TEE_ERROR_BAD_PARAMETERS;
349
350 ret = dcp_lock_channel(&data->dcp_data.channel);
351 if (ret != TEE_SUCCESS)
352 return ret;
353
354 desc = &data->dcp_data.desc;
355
356 desc->ctrl0 = DCP_CONTROL0_DECR_SEMAPHORE | DCP_CONTROL0_ENABLE_CIPHER |
357 DCP_CONTROL0_INTERRUPT_ENABLE;
358 desc->ctrl1 = DCP_CONTROL1_CIPHER_SELECT_AES128;
359
360 if (init->op == DCP_ENCRYPT)
361 desc->ctrl0 |= DCP_CONTROL0_CIPHER_ENCRYPT;
362
363 if (init->key_mode == DCP_OTP) {
364 desc->ctrl0 &= ~DCP_CONTROL0_OTP_KEY;
365 desc->ctrl1 |= DCP_CONTROL1_KEY_SELECT_OTP_CRYPTO;
366 } else if (init->key_mode == DCP_PAYLOAD) {
367 desc->ctrl0 |= DCP_CONTROL0_PAYLOAD_KEY;
368 if (!init->key)
369 return TEE_ERROR_BAD_PARAMETERS;
370 memcpy(data->key, init->key, DCP_AES128_KEY_SIZE);
371 } else {
372 desc->ctrl1 |= SHIFT_U32(init->key_mode, 8);
373 }
374
375 if (init->mode == DCP_CBC) {
376 desc->ctrl0 |= DCP_CONTROL0_CIPHER_INIT;
377 desc->ctrl1 |= DCP_CONTROL1_CIPHER_MODE_CBC;
378 if (!init->iv)
379 return TEE_ERROR_BAD_PARAMETERS;
380 memcpy(data->iv, init->iv, DCP_AES128_IV_SIZE);
381 }
382
383 /* Allocate aligned buffer for dcp iv and key */
384 ret = dcp_calloc_align_buf(&data->payload,
385 DCP_AES128_IV_SIZE + DCP_AES128_KEY_SIZE);
386 if (ret != TEE_SUCCESS)
387 return ret;
388
389 desc->src_buffer = 0;
390 desc->dest_buffer = 0;
391 desc->status = 0;
392 desc->buff_size = 0;
393 desc->next = virt_to_phys(desc);
394
395 data->initialized = true;
396
397 return ret;
398 }
399
dcp_cipher_do_update(struct dcp_cipher_data * data,const uint8_t * src,uint8_t * dst,size_t size)400 TEE_Result dcp_cipher_do_update(struct dcp_cipher_data *data,
401 const uint8_t *src, uint8_t *dst, size_t size)
402 {
403 TEE_Result ret = TEE_ERROR_GENERIC;
404 struct dcp_align_buf output = { };
405 struct dcp_align_buf input = { };
406 struct dcp_descriptor *desc = NULL;
407
408 if (!data || !src || !dst)
409 return TEE_ERROR_BAD_PARAMETERS;
410
411 if (!data->initialized) {
412 EMSG("Error, please call dcp_aes_do_init() before");
413 return TEE_ERROR_BAD_STATE;
414 }
415
416 if (size % DCP_AES128_BLOCK_SIZE) {
417 EMSG("Input size has to be a multiple of %zu bytes",
418 DCP_AES128_BLOCK_SIZE);
419 return TEE_ERROR_BAD_PARAMETERS;
420 }
421
422 ret = dcp_calloc_align_buf(&output, size);
423 if (ret != TEE_SUCCESS)
424 goto out;
425
426 ret = dcp_calloc_align_buf(&input, size);
427 if (ret != TEE_SUCCESS)
428 goto out;
429
430 desc = &data->dcp_data.desc;
431
432 /* Copy input data */
433 memcpy(input.data, src, size);
434
435 /* Copy key and IV */
436 memcpy(data->payload.data, data->key, DCP_AES128_KEY_SIZE);
437 data->payload_size = DCP_AES128_KEY_SIZE;
438 if (desc->ctrl0 & DCP_CONTROL0_CIPHER_INIT) {
439 memcpy(data->payload.data + DCP_AES128_KEY_SIZE, data->iv,
440 DCP_AES128_IV_SIZE);
441 data->payload_size += DCP_AES128_IV_SIZE;
442 }
443
444 desc->src_buffer = input.paddr;
445 desc->dest_buffer = output.paddr;
446 desc->payload = data->payload.paddr;
447 desc->buff_size = size;
448
449 cache_operation(TEE_CACHECLEAN, data->payload.data,
450 data->payload_size);
451 cache_operation(TEE_CACHECLEAN, input.data, size);
452 cache_operation(TEE_CACHEINVALIDATE, output.data, size);
453
454 ret = dcp_run(&data->dcp_data);
455 if (ret)
456 goto out;
457
458 cache_operation(TEE_CACHEINVALIDATE, output.data, size);
459
460 desc->ctrl0 &= ~DCP_CONTROL0_CIPHER_INIT;
461
462 memcpy(dst, output.data, size);
463 out:
464 dcp_free(&output);
465 dcp_free(&input);
466
467 return ret;
468 }
469
dcp_cipher_do_final(struct dcp_cipher_data * data)470 void dcp_cipher_do_final(struct dcp_cipher_data *data)
471 {
472 if (data)
473 data->initialized = false;
474
475 dcp_free(&data->payload);
476 dcp_unlock_channel(data->dcp_data.channel);
477 }
478
dcp_sha_do_init(struct dcp_hash_data * hashdata)479 TEE_Result dcp_sha_do_init(struct dcp_hash_data *hashdata)
480 {
481 struct dcp_descriptor *desc = NULL;
482 TEE_Result ret = TEE_ERROR_GENERIC;
483
484 if (!hashdata) {
485 EMSG("Bad parameters, hashdata is NULL");
486 return TEE_ERROR_BAD_PARAMETERS;
487 }
488
489 desc = &hashdata->dcp_data.desc;
490
491 /* DCP descriptor init */
492 desc->status = 0;
493 desc->payload = 0;
494 desc->dest_buffer = 0;
495 desc->ctrl0 = DCP_CONTROL0_ENABLE_HASH | DCP_CONTROL0_INTERRUPT_ENABLE |
496 DCP_CONTROL0_DECR_SEMAPHORE | DCP_CONTROL0_HASH_INIT;
497 desc->ctrl1 = hash_alg[hashdata->alg].type;
498 desc->buff_size = 0;
499 desc->next = 0;
500 desc->src_buffer = 0;
501
502 ret = dcp_lock_channel(&hashdata->dcp_data.channel);
503 if (ret != TEE_SUCCESS) {
504 EMSG("Channel is busy, can't start operation now");
505 return ret;
506 }
507
508 /* Allocate context data */
509 ret = dcp_calloc_align_buf(&hashdata->ctx, DCP_SHA_BLOCK_SIZE);
510 if (ret != TEE_SUCCESS)
511 return ret;
512
513 hashdata->initialized = true;
514 hashdata->ctx_size = 0;
515
516 return ret;
517 }
518
dcp_sha_do_update(struct dcp_hash_data * hashdata,const uint8_t * data,size_t len)519 TEE_Result dcp_sha_do_update(struct dcp_hash_data *hashdata,
520 const uint8_t *data, size_t len)
521 {
522 TEE_Result ret = TEE_ERROR_GENERIC;
523 struct dcp_descriptor *desc = NULL;
524 struct dcp_align_buf input = { };
525 uint32_t offset = 0;
526 uint32_t nb_blocks = 0;
527 size_t size_todo = 0;
528 size_t size_left = 0;
529 size_t size_total = 0;
530
531 if (!hashdata || !data || !len)
532 return TEE_ERROR_BAD_PARAMETERS;
533
534 if (!hashdata->initialized) {
535 EMSG("hashdata is uninitialized");
536 return TEE_ERROR_BAD_STATE;
537 }
538
539 /* Get number of blocks */
540 if (ADD_OVERFLOW(hashdata->ctx_size, len, &size_total))
541 return TEE_ERROR_BAD_PARAMETERS;
542
543 nb_blocks = size_total / DCP_SHA_BLOCK_SIZE;
544 size_todo = nb_blocks * DCP_SHA_BLOCK_SIZE;
545 size_left = len - size_todo + hashdata->ctx_size;
546 desc = &hashdata->dcp_data.desc;
547
548 if (size_todo) {
549 /* Allocate buffer as input */
550 ret = dcp_calloc_align_buf(&input, size_todo);
551 if (ret != TEE_SUCCESS)
552 return ret;
553
554 /* Copy previous data if any */
555 offset = size_todo - hashdata->ctx_size;
556 memcpy(input.data, hashdata->ctx.data, hashdata->ctx_size);
557 memcpy(input.data + hashdata->ctx_size, data, offset);
558 hashdata->ctx_size = 0;
559
560 desc->src_buffer = input.paddr;
561 desc->buff_size = size_todo;
562
563 cache_operation(TEE_CACHECLEAN, input.data, size_todo);
564
565 ret = dcp_run(&hashdata->dcp_data);
566 desc->ctrl0 &= ~DCP_CONTROL0_HASH_INIT;
567
568 dcp_free(&input);
569 } else {
570 size_left = len;
571 offset = 0;
572 ret = TEE_SUCCESS;
573 }
574
575 /* Save any data left */
576 memcpy(hashdata->ctx.data + hashdata->ctx_size, data + offset,
577 size_left);
578 hashdata->ctx_size += size_left;
579
580 return ret;
581 }
582
dcp_sha_do_final(struct dcp_hash_data * hashdata,uint8_t * digest,size_t digest_size)583 TEE_Result dcp_sha_do_final(struct dcp_hash_data *hashdata, uint8_t *digest,
584 size_t digest_size)
585 {
586 TEE_Result ret = TEE_ERROR_GENERIC;
587 size_t payload_size = 0;
588 struct dcp_descriptor *desc = NULL;
589 struct dcp_align_buf payload = { };
590
591 if (!hashdata || !digest)
592 return TEE_ERROR_BAD_PARAMETERS;
593
594 if (!hashdata->initialized) {
595 EMSG("hashdata is uninitialized");
596 return TEE_ERROR_BAD_STATE;
597 }
598
599 if (digest_size < hash_alg[hashdata->alg].size) {
600 EMSG("Digest buffer size is to small, should be %" PRId32,
601 hash_alg[hashdata->alg].size);
602 return TEE_ERROR_BAD_PARAMETERS;
603 }
604
605 desc = &hashdata->dcp_data.desc;
606 payload_size = hash_alg[hashdata->alg].size;
607
608 /* Handle the case where the input message is NULL */
609 if ((desc->ctrl0 & DCP_CONTROL0_HASH_INIT) && hashdata->ctx_size == 0) {
610 if (hashdata->alg == DCP_SHA1)
611 memcpy(digest, sha1_null_msg, payload_size);
612 if (hashdata->alg == DCP_SHA256)
613 memcpy(digest, sha256_null_msg, payload_size);
614 ret = TEE_SUCCESS;
615 } else {
616 /* Allocate buffer for the digest */
617 ret = dcp_calloc_align_buf(&payload, payload_size);
618 if (ret != TEE_SUCCESS)
619 return ret;
620
621 /* Set work packet for last iteration */
622 desc->ctrl0 |= DCP_CONTROL0_HASH_TERM;
623 desc->src_buffer = hashdata->ctx.paddr;
624 desc->buff_size = hashdata->ctx_size;
625 desc->payload = payload.paddr;
626
627 cache_operation(TEE_CACHECLEAN, hashdata->ctx.data,
628 hashdata->ctx_size);
629 cache_operation(TEE_CACHEINVALIDATE, payload.data,
630 payload_size);
631
632 ret = dcp_run(&hashdata->dcp_data);
633
634 /* Copy the result */
635 cache_operation(TEE_CACHEINVALIDATE, payload.data,
636 payload_size);
637 /* DCP payload result is flipped */
638 dcp_reverse(payload.data, digest, payload_size);
639
640 dcp_free(&payload);
641 }
642
643 dcp_free(&hashdata->ctx);
644
645 /* Reset hashdata strcuture */
646 hashdata->initialized = false;
647
648 dcp_unlock_channel(hashdata->dcp_data.channel);
649
650 return ret;
651 }
652
dcp_disable_unique_key(void)653 void dcp_disable_unique_key(void)
654 {
655 dcp_clk_enable(true);
656 io_setbits32(dcp_base + DCP_CAPABILITY0,
657 DCP_CAPABILITY0_DISABLE_UNIQUE_KEY);
658 dcp_clk_enable(false);
659 }
660
661 #ifdef CFG_DT
662 static const char *const dt_ctrl_match_table[] = {
663 "fsl,imx28-dcp",
664 "fsl,imx6sl-dcp",
665 };
666
667 /*
668 * Fetch DCP base address from DT
669 *
670 * @base [out] DCP base address
671 */
dcp_pbase(paddr_t * base)672 static TEE_Result dcp_pbase(paddr_t *base)
673 {
674 void *fdt = NULL;
675 int node = -1;
676 unsigned int i = 0;
677
678 fdt = get_dt();
679 if (!fdt) {
680 EMSG("DTB no present");
681 return TEE_ERROR_ITEM_NOT_FOUND;
682 }
683
684 for (i = 0; i < ARRAY_SIZE(dt_ctrl_match_table); i++) {
685 node = fdt_node_offset_by_compatible(fdt, 0,
686 dt_ctrl_match_table[i]);
687 if (node >= 0)
688 break;
689 }
690
691 if (node < 0) {
692 EMSG("DCP node not found err = %d", node);
693 return TEE_ERROR_ITEM_NOT_FOUND;
694 }
695
696 if (fdt_get_status(fdt, node) == DT_STATUS_DISABLED)
697 return TEE_ERROR_ITEM_NOT_FOUND;
698
699 /* Force secure-status = "okay" and status="disabled" */
700 if (dt_enable_secure_status(fdt, node)) {
701 EMSG("Not able to set DCP Control DTB entry secure");
702 return TEE_ERROR_NOT_SUPPORTED;
703 }
704
705 *base = fdt_reg_base_address(fdt, node);
706 if (*base == DT_INFO_INVALID_REG) {
707 EMSG("Unable to get the DCP Base address");
708 return TEE_ERROR_ITEM_NOT_FOUND;
709 }
710
711 return TEE_SUCCESS;
712 }
713 #endif /* CFG_DT */
714
dcp_init(void)715 TEE_Result dcp_init(void)
716 {
717 TEE_Result ret = TEE_ERROR_GENERIC;
718 paddr_t pbase = 0;
719
720 if (driver_initialized)
721 return TEE_SUCCESS;
722
723 dcp_clk_enable(true);
724
725 ret = dcp_pbase(&pbase);
726 if (ret != TEE_SUCCESS)
727 pbase = DCP_BASE;
728
729 dcp_base = core_mmu_get_va(pbase, MEM_AREA_IO_SEC, DCP_CONTEXT +
730 sizeof(uint32_t));
731 if (!dcp_base) {
732 EMSG("Unable to get DCP physical address");
733 return TEE_ERROR_ITEM_NOT_FOUND;
734 }
735
736 /* Context switching buffer memory allocation */
737 ret = dcp_calloc_align_buf(&hw_context_buffer, DCP_CONTEXT_BUFFER_SIZE);
738 if (ret != TEE_SUCCESS) {
739 EMSG("hw_context_buffer allocation failed");
740 return ret;
741 }
742
743 /*
744 * Reset the DCP before initialization. Depending on the SoC lifecycle
745 * state, the DCP needs to be reset to reload the OTP master key from
746 * the SNVS.
747 */
748 io_write32(dcp_base + DCP_CTRL_SET, DCP_CTRL_SFTRST | DCP_CTRL_CLKGATE);
749
750 /*
751 * Initialize control register.
752 * Enable normal DCP operation (SFTRST & CLKGATE bits set to 0)
753 */
754 io_write32(dcp_base + DCP_CTRL_CLR, DCP_CTRL_SFTRST | DCP_CTRL_CLKGATE);
755
756 io_write32(dcp_base + DCP_CTRL_SET,
757 DCP_CTRL_GATHER_RESIDUAL_WRITES |
758 DCP_CTRL_ENABLE_CONTEXT_SWITCHING);
759
760 /* Enable all DCP channels */
761 io_write32(dcp_base + DCP_CHANNELCTRL,
762 DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK);
763
764 /* Clear DCP_STAT register */
765 io_write32(dcp_base + DCP_STAT_CLR, DCP_STAT_CLEAR);
766
767 /* Copy context switching buffer address in DCP_CONTEXT register */
768 io_write32(dcp_base + DCP_CONTEXT, (uint32_t)hw_context_buffer.paddr);
769
770 driver_initialized = true;
771
772 dcp_clk_enable(false);
773
774 return ret;
775 }
776