1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (C) 2022 Foundries.io Ltd
4 * Jorge Ramirez-Ortiz <jorge@foundries.io>
5 */
6
7 #include <arm.h>
8 #include <drivers/versal_nvm.h>
9 #include <drivers/versal_pmc.h>
10 #include <initcall.h>
11 #include <kernel/panic.h>
12 #include <kernel/tee_misc.h>
13 #include <mm/core_memprot.h>
14 #include <string.h>
15 #include <tee/cache.h>
16
17 #define NVM_WORD_LEN 4
18
19 /* Protocol API with the remote processor */
20 #define NVM_MODULE_SHIFT 8
21 #define NVM_MODULE 11
22 #define NVM_API_ID(_id) (SHIFT_U32(NVM_MODULE, NVM_MODULE_SHIFT) | (_id))
23
24 #define __aligned_efuse __aligned(CACHELINE_LEN)
25
26 /* Internal */
27 struct versal_efuse_puf_fuse_addr {
28 uint64_t data_addr;
29 uint32_t start_row;
30 uint32_t num_rows;
31 uint8_t env_monitor_dis;
32 uint8_t prgm_puf_fuse;
33 uint8_t pad[46];
34 };
35
36 /*
37 * Max size of the buffer needed for the remote processor to DMA efuse _data_
38 * to/from
39 */
40 #define EFUSE_MAX_LEN (EFUSE_MAX_USER_FUSES * sizeof(uint32_t))
41
42 enum versal_nvm_api_id {
43 API_FEATURES = 0,
44 BBRAM_WRITE_AES_KEY = 1,
45 BBRAM_ZEROIZE = 2,
46 BBRAM_WRITE_USER_DATA = 3,
47 BBRAM_READ_USER_DATA = 4,
48 BBRAM_LOCK_WRITE_USER_DATA = 5,
49 EFUSE_WRITE = 6,
50 EFUSE_WRITE_PUF = 7,
51 EFUSE_PUF_USER_FUSE_WRITE = 8,
52 EFUSE_READ_IV = 9,
53 EFUSE_READ_REVOCATION_ID = 10,
54 EFUSE_READ_OFFCHIP_REVOCATION_ID = 11,
55 EFUSE_READ_USER_FUSES = 12,
56 EFUSE_READ_MISC_CTRL = 13,
57 EFUSE_READ_SEC_CTRL = 14,
58 EFUSE_READ_SEC_MISC1 = 15,
59 EFUSE_READ_BOOT_ENV_CTRL = 16,
60 EFUSE_READ_PUF_SEC_CTRL = 17,
61 EFUSE_READ_PPK_HASH = 18,
62 EFUSE_READ_DEC_EFUSE_ONLY = 19,
63 EFUSE_READ_DNA = 20,
64 EFUSE_READ_PUF_USER_FUSES = 21,
65 EFUSE_READ_PUF = 22,
66 EFUSE_INVALID = 23,
67 };
68
69 /* uint64_t are memory addresses */
70 struct versal_efuse_data {
71 uint64_t env_mon_dis_flag;
72 uint64_t aes_key_addr;
73 uint64_t ppk_hash_addr;
74 uint64_t dec_only_addr;
75 uint64_t sec_ctrl_addr;
76 uint64_t misc_ctrl_addr;
77 uint64_t revoke_id_addr;
78 uint64_t iv_addr;
79 uint64_t user_fuse_addr;
80 uint64_t glitch_cfg_addr;
81 uint64_t boot_env_ctrl_addr;
82 uint64_t misc1_ctrl_addr;
83 uint64_t offchip_id_addr;
84 uint8_t pad[24];
85 };
86
87 /* Helper read and write requests (not part of the protocol) */
88 struct versal_nvm_buf {
89 size_t len;
90 void *buf;
91 };
92
93 struct versal_nvm_read_req {
94 enum versal_nvm_api_id efuse_id;
95 enum versal_nvm_revocation_id revocation_id;
96 enum versal_nvm_offchip_id offchip_id;
97 enum versal_nvm_ppk_type ppk_type;
98 enum versal_nvm_iv_type iv_type;
99 struct versal_nvm_buf ibuf[VERSAL_MAX_IPI_BUF];
100 };
101
102 struct versal_bbram_data {
103 size_t aes_key_len;
104 uint32_t user_data;
105 };
106
107 struct versal_nvm_write_req {
108 struct versal_efuse_data data;
109 struct versal_bbram_data bbram;
110 struct versal_nvm_buf ibuf[VERSAL_MAX_IPI_BUF];
111 enum versal_nvm_api_id efuse_id;
112 };
113
114 static TEE_Result
prepare_cmd(struct versal_ipi_cmd * cmd,enum versal_nvm_api_id efuse,struct versal_nvm_buf * ibufs,uint32_t * arg)115 prepare_cmd(struct versal_ipi_cmd *cmd, enum versal_nvm_api_id efuse,
116 struct versal_nvm_buf *ibufs, uint32_t *arg)
117 {
118 uint32_t a = 0;
119 uint32_t b = 0;
120 size_t i = 0;
121
122 cmd->data[i++] = NVM_API_ID(efuse);
123 if (arg)
124 cmd->data[i++] = *arg;
125
126 if (!ibufs[0].buf)
127 return TEE_SUCCESS;
128
129 reg_pair_from_64(virt_to_phys(ibufs[0].buf), &b, &a);
130
131 cmd->data[i++] = a;
132 cmd->data[i++] = b;
133
134 for (i = 0; i < VERSAL_MAX_IPI_BUF; i++) {
135 cmd->ibuf[i].mem.alloc_len = ibufs[i].len;
136 cmd->ibuf[i].mem.buf = ibufs[i].buf;
137 }
138
139 return TEE_SUCCESS;
140 }
141
efuse_req(enum versal_nvm_api_id efuse,struct versal_nvm_buf * ibufs,uint32_t * arg)142 static TEE_Result efuse_req(enum versal_nvm_api_id efuse,
143 struct versal_nvm_buf *ibufs, uint32_t *arg)
144 {
145 struct versal_ipi_cmd cmd = { };
146 TEE_Result ret = TEE_SUCCESS;
147
148 ret = prepare_cmd(&cmd, efuse, ibufs, arg);
149 if (ret)
150 return ret;
151
152 ret = versal_pmc_notify(&cmd, NULL, NULL);
153 if (ret)
154 EMSG("Mailbox error");
155
156 return ret;
157 }
158
versal_alloc_read_buffer(struct versal_nvm_read_req * req)159 static TEE_Result versal_alloc_read_buffer(struct versal_nvm_read_req *req)
160 {
161 assert(req);
162 req->ibuf[0].len = 1024;
163 req->ibuf[0].buf = alloc_cache_aligned(req->ibuf[0].len);
164 if (!req->ibuf[0].buf)
165 return TEE_ERROR_OUT_OF_MEMORY;
166
167 return TEE_SUCCESS;
168 }
169
versal_free_read_buffer(struct versal_nvm_read_req * req)170 static void versal_free_read_buffer(struct versal_nvm_read_req *req)
171 {
172 assert(req);
173 free(req->ibuf[0].buf);
174 }
175
versal_get_read_buffer(struct versal_nvm_read_req * req)176 static void *versal_get_read_buffer(struct versal_nvm_read_req *req)
177 {
178 assert(req);
179 return req->ibuf[0].buf;
180 }
181
versal_nvm_read(struct versal_nvm_read_req * req)182 static TEE_Result versal_nvm_read(struct versal_nvm_read_req *req)
183 {
184 uint32_t *arg = NULL;
185 uint32_t val = 0;
186
187 if (!req)
188 return TEE_ERROR_GENERIC;
189
190 switch (req->efuse_id) {
191 case EFUSE_READ_DNA:
192 case EFUSE_READ_DEC_EFUSE_ONLY:
193 case EFUSE_READ_PUF_SEC_CTRL:
194 case EFUSE_READ_BOOT_ENV_CTRL:
195 case EFUSE_READ_SEC_CTRL:
196 case EFUSE_READ_MISC_CTRL:
197 case EFUSE_READ_SEC_MISC1:
198 case EFUSE_READ_USER_FUSES:
199 case EFUSE_READ_PUF_USER_FUSES:
200 case EFUSE_READ_PUF:
201 break;
202 case EFUSE_READ_OFFCHIP_REVOCATION_ID:
203 val = req->offchip_id;
204 arg = &val;
205 break;
206 case EFUSE_READ_REVOCATION_ID:
207 val = req->revocation_id;
208 arg = &val;
209 break;
210 case EFUSE_READ_IV:
211 val = req->iv_type;
212 arg = &val;
213 break;
214 case EFUSE_READ_PPK_HASH:
215 val = req->ppk_type;
216 arg = &val;
217 break;
218 case BBRAM_READ_USER_DATA:
219 break;
220 default:
221 return TEE_ERROR_GENERIC;
222 }
223
224 return efuse_req(req->efuse_id, req->ibuf, arg);
225 }
226
versal_nvm_write(struct versal_nvm_write_req * req)227 static TEE_Result versal_nvm_write(struct versal_nvm_write_req *req)
228 {
229 uint32_t *arg = NULL;
230 uint32_t val = 0;
231
232 switch (req->efuse_id) {
233 case BBRAM_WRITE_AES_KEY:
234 val = req->bbram.aes_key_len;
235 arg = &val;
236 break;
237 case BBRAM_ZEROIZE:
238 break;
239 case BBRAM_WRITE_USER_DATA:
240 val = req->bbram.user_data;
241 arg = &val;
242 break;
243 case EFUSE_PUF_USER_FUSE_WRITE:
244 case EFUSE_WRITE_PUF:
245 case EFUSE_WRITE:
246 break;
247 default:
248 return TEE_ERROR_GENERIC;
249 }
250
251 return efuse_req(req->efuse_id, req->ibuf, arg);
252 }
253
versal_efuse_read_user_data(uint32_t * buf,size_t len,uint32_t first,size_t num)254 TEE_Result versal_efuse_read_user_data(uint32_t *buf, size_t len,
255 uint32_t first, size_t num)
256 {
257 struct versal_efuse_user_data cfg __aligned_efuse = {
258 .start = first,
259 .num = num,
260 };
261 struct versal_nvm_read_req req = {
262 .efuse_id = EFUSE_READ_USER_FUSES,
263 };
264 void *rsp = NULL;
265
266 if (first + num > EFUSE_MAX_USER_FUSES || len < num * sizeof(uint32_t))
267 return TEE_ERROR_BAD_PARAMETERS;
268
269 rsp = alloc_cache_aligned(1024);
270 if (!rsp)
271 return TEE_ERROR_OUT_OF_MEMORY;
272
273 req.ibuf[0].buf = &cfg;
274 req.ibuf[0].len = sizeof(cfg);
275 req.ibuf[1].buf = rsp;
276 req.ibuf[1].len = 1024;
277
278 cfg.addr = virt_to_phys((void *)rsp);
279
280 if (versal_nvm_read(&req)) {
281 free(rsp);
282 return TEE_ERROR_GENERIC;
283 }
284
285 memcpy(buf, rsp, num * sizeof(uint32_t));
286 free(rsp);
287
288 return TEE_SUCCESS;
289 }
290
versal_efuse_read_dna(uint32_t * buf,size_t len)291 TEE_Result versal_efuse_read_dna(uint32_t *buf, size_t len)
292 {
293 struct versal_nvm_read_req req = {
294 .efuse_id = EFUSE_READ_DNA,
295 };
296
297 if (len < EFUSE_DNA_LEN)
298 return TEE_ERROR_BAD_PARAMETERS;
299
300 if (versal_alloc_read_buffer(&req))
301 return TEE_ERROR_OUT_OF_MEMORY;
302
303 if (versal_nvm_read(&req)) {
304 versal_free_read_buffer(&req);
305 return TEE_ERROR_GENERIC;
306 }
307
308 memcpy(buf, versal_get_read_buffer(&req), EFUSE_DNA_LEN);
309 versal_free_read_buffer(&req);
310
311 return TEE_SUCCESS;
312 }
313
versal_efuse_read_iv(uint32_t * buf,size_t len,enum versal_nvm_iv_type type)314 TEE_Result versal_efuse_read_iv(uint32_t *buf, size_t len,
315 enum versal_nvm_iv_type type)
316 {
317 struct versal_nvm_read_req req = {
318 .efuse_id = EFUSE_READ_IV,
319 .iv_type = type,
320 };
321
322 if (len < EFUSE_IV_LEN)
323 return TEE_ERROR_BAD_PARAMETERS;
324
325 if (versal_alloc_read_buffer(&req))
326 return TEE_ERROR_OUT_OF_MEMORY;
327
328 if (versal_nvm_read(&req)) {
329 versal_free_read_buffer(&req);
330 return TEE_ERROR_GENERIC;
331 }
332
333 memcpy(buf, versal_get_read_buffer(&req), EFUSE_IV_LEN);
334 versal_free_read_buffer(&req);
335
336 return TEE_SUCCESS;
337 }
338
versal_efuse_read_ppk(uint32_t * buf,size_t len,enum versal_nvm_ppk_type type)339 TEE_Result versal_efuse_read_ppk(uint32_t *buf, size_t len,
340 enum versal_nvm_ppk_type type)
341 {
342 struct versal_nvm_read_req req = {
343 req.efuse_id = EFUSE_READ_PPK_HASH,
344 .ppk_type = type,
345 };
346
347 if (len < EFUSE_PPK_LEN)
348 return TEE_ERROR_BAD_PARAMETERS;
349
350 if (versal_alloc_read_buffer(&req))
351 return TEE_ERROR_OUT_OF_MEMORY;
352
353 if (versal_nvm_read(&req))
354 return TEE_ERROR_GENERIC;
355
356 memcpy(buf, versal_get_read_buffer(&req), EFUSE_PPK_LEN);
357 versal_free_read_buffer(&req);
358
359 return TEE_SUCCESS;
360 }
361
versal_efuse_write_user_data(uint32_t * buf,size_t len,uint32_t first,size_t num)362 TEE_Result versal_efuse_write_user_data(uint32_t *buf, size_t len,
363 uint32_t first, size_t num)
364 {
365 uint32_t lbuf[EFUSE_MAX_USER_FUSES] __aligned_efuse = { 0 };
366 struct versal_efuse_user_data cfg __aligned_efuse = {
367 .addr = (uintptr_t)lbuf,
368 .start = first,
369 .num = num,
370 };
371 struct versal_nvm_write_req __aligned_efuse req = {
372 .data.user_fuse_addr = virt_to_phys(&cfg),
373 .data.env_mon_dis_flag = 1,
374 .efuse_id = EFUSE_WRITE,
375 };
376 size_t i = 0;
377
378 if (first + num > EFUSE_MAX_USER_FUSES || len < num * sizeof(uint32_t))
379 return TEE_ERROR_BAD_PARAMETERS;
380
381 req.data.user_fuse_addr = virt_to_phys((void *)req.data.user_fuse_addr);
382 cfg.addr = virt_to_phys(lbuf);
383
384 req.ibuf[0].buf = &req.data;
385 req.ibuf[0].len = sizeof(req.data);
386 req.ibuf[1].buf = &cfg;
387 req.ibuf[1].len = sizeof(cfg);
388 req.ibuf[2].buf = lbuf;
389 req.ibuf[2].len = sizeof(lbuf);
390
391 for (i = 0; i < cfg.num; i++)
392 lbuf[i] = buf[i];
393
394 return versal_nvm_write(&req);
395 }
396
versal_efuse_write_aes_keys(struct versal_efuse_aes_keys * keys)397 TEE_Result versal_efuse_write_aes_keys(struct versal_efuse_aes_keys *keys)
398 {
399 struct versal_efuse_aes_keys cfg __aligned_efuse = { };
400 struct versal_nvm_write_req req __aligned_efuse = {
401 .data.aes_key_addr = virt_to_phys(&cfg),
402 .data.env_mon_dis_flag = 1,
403 .efuse_id = EFUSE_WRITE,
404 };
405
406 memcpy(&cfg, keys, sizeof(cfg));
407
408 req.ibuf[0].buf = &req.data;
409 req.ibuf[0].len = sizeof(req.data);
410 req.ibuf[1].buf = &cfg;
411 req.ibuf[1].len = sizeof(cfg);
412
413 return versal_nvm_write(&req);
414 }
415
versal_efuse_write_ppk_hash(struct versal_efuse_ppk_hash * hash)416 TEE_Result versal_efuse_write_ppk_hash(struct versal_efuse_ppk_hash *hash)
417 {
418 struct versal_efuse_ppk_hash cfg __aligned_efuse = { };
419 struct versal_nvm_write_req req __aligned_efuse = {
420 .data.ppk_hash_addr = virt_to_phys(&cfg),
421 .data.env_mon_dis_flag = 1,
422 .efuse_id = EFUSE_WRITE,
423 };
424
425 memcpy(&cfg, hash, sizeof(cfg));
426
427 req.ibuf[0].buf = &req.data;
428 req.ibuf[0].len = sizeof(req.data);
429 req.ibuf[1].buf = &cfg;
430 req.ibuf[1].len = sizeof(cfg);
431
432 return versal_nvm_write(&req);
433 }
434
versal_efuse_write_iv(struct versal_efuse_ivs * p)435 TEE_Result versal_efuse_write_iv(struct versal_efuse_ivs *p)
436 {
437 struct versal_efuse_ivs cfg __aligned_efuse = { };
438 struct versal_nvm_write_req req __aligned_efuse = {
439 .data.iv_addr = virt_to_phys(&cfg),
440 .data.env_mon_dis_flag = 1,
441 .efuse_id = EFUSE_WRITE,
442 };
443
444 memcpy(&cfg, p, sizeof(cfg));
445
446 req.ibuf[0].buf = &req.data;
447 req.ibuf[0].len = sizeof(req.data);
448 req.ibuf[1].buf = &cfg;
449 req.ibuf[1].len = sizeof(cfg);
450
451 return versal_nvm_write(&req);
452 }
453
versal_efuse_write_dec_only(struct versal_efuse_dec_only * p)454 TEE_Result versal_efuse_write_dec_only(struct versal_efuse_dec_only *p)
455 {
456 struct versal_efuse_dec_only cfg __aligned_efuse = { };
457 struct versal_nvm_write_req req __aligned_efuse = {
458 .data.dec_only_addr = virt_to_phys(&cfg),
459 .data.env_mon_dis_flag = 1,
460 .efuse_id = EFUSE_WRITE,
461 };
462
463 memcpy(&cfg, p, sizeof(cfg));
464
465 req.ibuf[0].buf = &req.data;
466 req.ibuf[0].len = sizeof(req.data);
467 req.ibuf[1].buf = &cfg;
468 req.ibuf[1].len = sizeof(cfg);
469
470 return versal_nvm_write(&req);
471 }
472
versal_efuse_write_sec(struct versal_efuse_sec_ctrl_bits * p)473 TEE_Result versal_efuse_write_sec(struct versal_efuse_sec_ctrl_bits *p)
474 {
475 struct versal_efuse_sec_ctrl_bits cfg __aligned_efuse = { };
476 struct versal_nvm_write_req req __aligned_efuse = {
477 .data.sec_ctrl_addr = virt_to_phys(&cfg),
478 .data.env_mon_dis_flag = 1,
479 .efuse_id = EFUSE_WRITE,
480 };
481
482 memcpy(&cfg, p, sizeof(cfg));
483
484 req.ibuf[0].buf = &req.data;
485 req.ibuf[0].len = sizeof(req.data);
486 req.ibuf[1].buf = &cfg;
487 req.ibuf[1].len = sizeof(cfg);
488
489 return versal_nvm_write(&req);
490 }
491
versal_efuse_write_misc(struct versal_efuse_misc_ctrl_bits * p)492 TEE_Result versal_efuse_write_misc(struct versal_efuse_misc_ctrl_bits *p)
493 {
494 struct versal_efuse_misc_ctrl_bits cfg __aligned_efuse = { };
495 struct versal_nvm_write_req req __aligned_efuse = {
496 .data.misc_ctrl_addr = virt_to_phys(&cfg),
497 .data.env_mon_dis_flag = 1,
498 .efuse_id = EFUSE_WRITE,
499 };
500
501 memcpy(&cfg, p, sizeof(cfg));
502
503 req.ibuf[0].buf = &req.data;
504 req.ibuf[0].len = sizeof(req.data);
505 req.ibuf[1].buf = &cfg;
506 req.ibuf[1].len = sizeof(cfg);
507
508 return versal_nvm_write(&req);
509 }
510
versal_efuse_write_glitch_cfg(struct versal_efuse_glitch_cfg_bits * p)511 TEE_Result versal_efuse_write_glitch_cfg(struct versal_efuse_glitch_cfg_bits *p)
512 {
513 struct versal_efuse_glitch_cfg_bits cfg __aligned_efuse = { };
514 struct versal_nvm_write_req req __aligned_efuse = {
515 .data.glitch_cfg_addr = virt_to_phys(&cfg),
516 .data.env_mon_dis_flag = 1,
517 .efuse_id = EFUSE_WRITE,
518 };
519
520 memcpy(&cfg, p, sizeof(cfg));
521
522 req.ibuf[0].buf = &req.data;
523 req.ibuf[0].len = sizeof(req.data);
524 req.ibuf[1].buf = &cfg;
525 req.ibuf[1].len = sizeof(cfg);
526
527 return versal_nvm_write(&req);
528 }
529
versal_efuse_write_boot_env(struct versal_efuse_boot_env_ctrl_bits * p)530 TEE_Result versal_efuse_write_boot_env(struct versal_efuse_boot_env_ctrl_bits
531 *p)
532 {
533 struct versal_efuse_boot_env_ctrl_bits cfg __aligned_efuse = { };
534 struct versal_nvm_write_req req __aligned_efuse = {
535 .data.boot_env_ctrl_addr = virt_to_phys(&cfg),
536 .data.env_mon_dis_flag = 1,
537 .efuse_id = EFUSE_WRITE,
538 };
539
540 memcpy(&cfg, p, sizeof(cfg));
541
542 req.ibuf[0].buf = &req.data;
543 req.ibuf[0].len = sizeof(req.data);
544 req.ibuf[1].buf = &cfg;
545 req.ibuf[1].len = sizeof(cfg);
546
547 return versal_nvm_write(&req);
548 }
549
versal_efuse_write_sec_misc1(struct versal_efuse_sec_misc1_bits * p)550 TEE_Result versal_efuse_write_sec_misc1(struct versal_efuse_sec_misc1_bits *p)
551 {
552 struct versal_efuse_sec_misc1_bits cfg __aligned_efuse = { };
553 struct versal_nvm_write_req req __aligned_efuse = {
554 .data.misc1_ctrl_addr = virt_to_phys(&cfg),
555 .data.env_mon_dis_flag = 1,
556 .efuse_id = EFUSE_WRITE,
557 };
558
559 memcpy(&cfg, p, sizeof(cfg));
560
561 req.ibuf[0].buf = &req.data;
562 req.ibuf[0].len = sizeof(req.data);
563 req.ibuf[1].buf = &cfg;
564 req.ibuf[1].len = sizeof(cfg);
565
566 return versal_nvm_write(&req);
567 }
568
versal_efuse_write_offchip_ids(struct versal_efuse_offchip_ids * p)569 TEE_Result versal_efuse_write_offchip_ids(struct versal_efuse_offchip_ids *p)
570 {
571 struct versal_efuse_offchip_ids cfg __aligned_efuse = { };
572 struct versal_nvm_write_req req __aligned_efuse = {
573 .data.offchip_id_addr = virt_to_phys(&cfg),
574 .data.env_mon_dis_flag = 1,
575 .efuse_id = EFUSE_WRITE,
576 };
577
578 memcpy(&cfg, p, sizeof(cfg));
579
580 req.ibuf[0].buf = &req.data;
581 req.ibuf[0].len = sizeof(req.data);
582 req.ibuf[1].buf = &cfg;
583 req.ibuf[1].len = sizeof(cfg);
584
585 return versal_nvm_write(&req);
586 }
587
versal_efuse_write_revoke_ppk(enum versal_nvm_ppk_type type)588 TEE_Result versal_efuse_write_revoke_ppk(enum versal_nvm_ppk_type type)
589 {
590 struct versal_efuse_misc_ctrl_bits cfg __aligned_efuse = { };
591 struct versal_nvm_write_req req __aligned_efuse = {
592 .data.misc_ctrl_addr = virt_to_phys(&cfg),
593 .data.env_mon_dis_flag = 1,
594 .efuse_id = EFUSE_WRITE,
595 };
596
597 req.data.misc_ctrl_addr = virt_to_phys((void *)req.data.misc_ctrl_addr);
598 if (type == EFUSE_PPK0)
599 cfg.ppk0_invalid = 1;
600 else if (type == EFUSE_PPK1)
601 cfg.ppk1_invalid = 1;
602 else if (type == EFUSE_PPK2)
603 cfg.ppk2_invalid = 1;
604 else
605 return TEE_ERROR_BAD_PARAMETERS;
606
607 req.ibuf[0].buf = &req.data;
608 req.ibuf[0].len = sizeof(req.data);
609 req.ibuf[1].buf = &cfg;
610 req.ibuf[1].len = sizeof(cfg);
611
612 return versal_nvm_write(&req);
613 }
614
versal_efuse_write_revoke_id(uint32_t id)615 TEE_Result versal_efuse_write_revoke_id(uint32_t id)
616 {
617 struct versal_efuse_revoke_ids cfg __aligned_efuse = { };
618 struct versal_nvm_write_req req __aligned_efuse = {
619 .data.revoke_id_addr = virt_to_phys(&cfg),
620 .data.env_mon_dis_flag = 1,
621 .efuse_id = EFUSE_WRITE,
622 };
623 uint32_t row = 0;
624 uint32_t bit = 0;
625
626 row = id >> (NVM_WORD_LEN + 1);
627 bit = id & (NVM_WORD_LEN - 1);
628
629 cfg.revoke_id[row] = BIT(bit);
630 cfg.prgm_revoke_id = 1;
631
632 req.ibuf[0].buf = &req.data;
633 req.ibuf[0].len = sizeof(req.data);
634 req.ibuf[1].buf = &cfg;
635 req.ibuf[1].len = sizeof(cfg);
636
637 return versal_nvm_write(&req);
638 }
639
versal_efuse_read_revoke_id(uint32_t * buf,size_t len,enum versal_nvm_revocation_id id)640 TEE_Result versal_efuse_read_revoke_id(uint32_t *buf, size_t len,
641 enum versal_nvm_revocation_id id)
642 {
643 struct versal_nvm_read_req req = {
644 .efuse_id = EFUSE_READ_REVOCATION_ID,
645 .revocation_id = id,
646 };
647
648 if (len < EFUSE_REVOCATION_ID_LEN)
649 return TEE_ERROR_BAD_PARAMETERS;
650
651 if (versal_alloc_read_buffer(&req))
652 return TEE_ERROR_OUT_OF_MEMORY;
653
654 if (versal_nvm_read(&req)) {
655 versal_free_read_buffer(&req);
656 return TEE_ERROR_GENERIC;
657 }
658
659 memcpy(buf, versal_get_read_buffer(&req), EFUSE_REVOCATION_ID_LEN);
660 versal_free_read_buffer(&req);
661
662 return TEE_SUCCESS;
663 }
664
versal_efuse_read_misc_ctrl(struct versal_efuse_misc_ctrl_bits * buf)665 TEE_Result versal_efuse_read_misc_ctrl(struct versal_efuse_misc_ctrl_bits *buf)
666 {
667 struct versal_nvm_read_req req = {
668 .efuse_id = EFUSE_READ_MISC_CTRL,
669 };
670
671 if (versal_alloc_read_buffer(&req))
672 return TEE_ERROR_OUT_OF_MEMORY;
673
674 if (versal_nvm_read(&req)) {
675 versal_free_read_buffer(&req);
676 return TEE_ERROR_GENERIC;
677 }
678
679 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
680 versal_free_read_buffer(&req);
681
682 return TEE_SUCCESS;
683 }
684
versal_efuse_read_sec_ctrl(struct versal_efuse_sec_ctrl_bits * buf)685 TEE_Result versal_efuse_read_sec_ctrl(struct versal_efuse_sec_ctrl_bits *buf)
686 {
687 struct versal_nvm_read_req req = {
688 .efuse_id = EFUSE_READ_SEC_CTRL,
689 };
690
691 if (versal_alloc_read_buffer(&req))
692 return TEE_ERROR_OUT_OF_MEMORY;
693
694 if (versal_nvm_read(&req)) {
695 versal_free_read_buffer(&req);
696 return TEE_ERROR_GENERIC;
697 }
698
699 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
700 versal_free_read_buffer(&req);
701
702 return TEE_SUCCESS;
703 }
704
versal_efuse_read_sec_misc1(struct versal_efuse_sec_misc1_bits * buf)705 TEE_Result versal_efuse_read_sec_misc1(struct versal_efuse_sec_misc1_bits *buf)
706 {
707 struct versal_nvm_read_req req = {
708 .efuse_id = EFUSE_READ_SEC_MISC1,
709 };
710
711 if (versal_alloc_read_buffer(&req))
712 return TEE_ERROR_OUT_OF_MEMORY;
713
714 if (versal_nvm_read(&req)) {
715 versal_free_read_buffer(&req);
716 return TEE_ERROR_GENERIC;
717 }
718
719 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
720 versal_free_read_buffer(&req);
721
722 return TEE_SUCCESS;
723 }
724
725 TEE_Result
versal_efuse_read_boot_env_ctrl(struct versal_efuse_boot_env_ctrl_bits * buf)726 versal_efuse_read_boot_env_ctrl(struct versal_efuse_boot_env_ctrl_bits *buf)
727 {
728 struct versal_nvm_read_req req = {
729 .efuse_id = EFUSE_READ_BOOT_ENV_CTRL,
730 };
731
732 if (versal_alloc_read_buffer(&req))
733 return TEE_ERROR_OUT_OF_MEMORY;
734
735 if (versal_nvm_read(&req)) {
736 versal_free_read_buffer(&req);
737 return TEE_ERROR_GENERIC;
738 }
739
740 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
741 versal_free_read_buffer(&req);
742
743 return TEE_SUCCESS;
744 }
745
versal_efuse_read_offchip_revoke_id(uint32_t * buf,size_t len,enum versal_nvm_offchip_id id)746 TEE_Result versal_efuse_read_offchip_revoke_id(uint32_t *buf, size_t len,
747 enum versal_nvm_offchip_id id)
748 {
749 struct versal_nvm_read_req req = {
750 .efuse_id = EFUSE_READ_OFFCHIP_REVOCATION_ID,
751 .offchip_id = id,
752 };
753
754 if (len < EFUSE_OFFCHIP_REVOCATION_ID_LEN)
755 return TEE_ERROR_BAD_PARAMETERS;
756
757 if (versal_alloc_read_buffer(&req))
758 return TEE_ERROR_OUT_OF_MEMORY;
759
760 if (versal_nvm_read(&req)) {
761 versal_free_read_buffer(&req);
762 return TEE_ERROR_GENERIC;
763 }
764
765 memcpy(buf, versal_get_read_buffer(&req),
766 EFUSE_OFFCHIP_REVOCATION_ID_LEN);
767 versal_free_read_buffer(&req);
768
769 return TEE_SUCCESS;
770 }
771
versal_efuse_read_dec_only(uint32_t * buf,size_t len)772 TEE_Result versal_efuse_read_dec_only(uint32_t *buf, size_t len)
773 {
774 struct versal_nvm_read_req req = {
775 .efuse_id = EFUSE_READ_DEC_EFUSE_ONLY,
776 };
777
778 if (len < EFUSE_DEC_ONLY_LEN)
779 return TEE_ERROR_BAD_PARAMETERS;
780
781 if (versal_alloc_read_buffer(&req))
782 return TEE_ERROR_OUT_OF_MEMORY;
783
784 if (versal_nvm_read(&req)) {
785 versal_free_read_buffer(&req);
786 return TEE_ERROR_GENERIC;
787 }
788
789 memcpy(buf, versal_get_read_buffer(&req), EFUSE_DEC_ONLY_LEN);
790 versal_free_read_buffer(&req);
791
792 return TEE_SUCCESS;
793 }
794
795 TEE_Result
versal_efuse_read_puf_sec_ctrl(struct versal_efuse_puf_sec_ctrl_bits * buf)796 versal_efuse_read_puf_sec_ctrl(struct versal_efuse_puf_sec_ctrl_bits *buf)
797 {
798 struct versal_nvm_read_req req = {
799 .efuse_id = EFUSE_READ_PUF_SEC_CTRL,
800 };
801
802 if (versal_alloc_read_buffer(&req))
803 return TEE_ERROR_OUT_OF_MEMORY;
804
805 if (versal_nvm_read(&req)) {
806 versal_free_read_buffer(&req);
807 return TEE_ERROR_GENERIC;
808 }
809
810 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
811 versal_free_read_buffer(&req);
812
813 return TEE_SUCCESS;
814 }
815
versal_efuse_read_puf(struct versal_efuse_puf_header * buf)816 TEE_Result versal_efuse_read_puf(struct versal_efuse_puf_header *buf)
817 {
818 struct versal_nvm_read_req req = {
819 .efuse_id = EFUSE_READ_PUF,
820 };
821
822 if (versal_alloc_read_buffer(&req))
823 return TEE_ERROR_OUT_OF_MEMORY;
824
825 memcpy(versal_get_read_buffer(&req), buf, sizeof(*buf));
826
827 if (versal_nvm_read(&req)) {
828 versal_free_read_buffer(&req);
829 return TEE_ERROR_GENERIC;
830 }
831
832 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
833 versal_free_read_buffer(&req);
834
835 return TEE_SUCCESS;
836 }
837
838 /*
839 * This functionality requires building the PLM with XNVM_ACCESS_PUF_USER_DATA
840 * Calls will fail otherwise.
841 * When available, efuse_read_puf becomes unavailable.
842 */
843 TEE_Result
versal_efuse_read_puf_as_user_fuse(struct versal_efuse_puf_user_fuse * p)844 versal_efuse_read_puf_as_user_fuse(struct versal_efuse_puf_user_fuse *p)
845 {
846 uint32_t fuses[PUF_EFUSES_WORDS]__aligned_efuse = { 0 };
847 struct versal_efuse_puf_fuse_addr lbuf __aligned_efuse = {
848 .env_monitor_dis = p->env_monitor_dis,
849 .prgm_puf_fuse = p->prgm_puf_fuse,
850 .start_row = p->start_row,
851 .num_rows = p->num_rows,
852 .data_addr = virt_to_phys(fuses),
853 };
854 struct versal_nvm_read_req req = {
855 .efuse_id = EFUSE_READ_PUF_USER_FUSES,
856 };
857
858 req.ibuf[0].buf = &lbuf;
859 req.ibuf[0].len = sizeof(lbuf);
860 req.ibuf[1].buf = fuses;
861 req.ibuf[1].len = sizeof(fuses);
862
863 if (versal_nvm_read(&req))
864 return TEE_ERROR_GENERIC;
865
866 memcpy(p->data_addr, fuses, sizeof(fuses));
867
868 return TEE_SUCCESS;
869 }
870
871 /*
872 * This functionality requires building the PLM with XNVM_ACCESS_PUF_USER_DATA.
873 * Calls will fail otherwise.
874 * When available, efuse_write_puf becomes unavailable.
875 */
876 TEE_Result
versal_efuse_write_puf_as_user_fuse(struct versal_efuse_puf_user_fuse * p)877 versal_efuse_write_puf_as_user_fuse(struct versal_efuse_puf_user_fuse *p)
878 {
879 uint32_t fuses[PUF_EFUSES_WORDS]__aligned_efuse = { 0 };
880 struct versal_efuse_puf_fuse_addr lbuf __aligned_efuse = {
881 .env_monitor_dis = p->env_monitor_dis,
882 .prgm_puf_fuse = p->prgm_puf_fuse,
883 .start_row = p->start_row,
884 .num_rows = p->num_rows,
885 .data_addr = virt_to_phys(fuses),
886 };
887 struct versal_nvm_write_req req = {
888 .efuse_id = EFUSE_PUF_USER_FUSE_WRITE,
889 };
890
891 memcpy(fuses, p->data_addr, sizeof(p->data_addr));
892
893 req.ibuf[0].buf = &lbuf;
894 req.ibuf[0].len = sizeof(lbuf);
895 req.ibuf[1].buf = fuses;
896 req.ibuf[1].len = sizeof(fuses);
897
898 if (versal_nvm_write(&req))
899 return TEE_ERROR_GENERIC;
900
901 return TEE_SUCCESS;
902 }
903
versal_efuse_write_puf(struct versal_efuse_puf_header * buf)904 TEE_Result versal_efuse_write_puf(struct versal_efuse_puf_header *buf)
905 {
906 struct versal_efuse_puf_header cfg __aligned_efuse = { };
907 struct versal_nvm_write_req req __aligned_efuse = {
908 .efuse_id = EFUSE_WRITE_PUF,
909 };
910
911 memcpy(&cfg, buf, sizeof(*buf));
912
913 req.ibuf[0].buf = &cfg;
914 req.ibuf[0].len = sizeof(cfg);
915
916 if (versal_nvm_write(&req))
917 return TEE_ERROR_GENERIC;
918
919 return TEE_SUCCESS;
920 }
921
versal_bbram_write_aes_key(uint8_t * key,size_t len)922 TEE_Result versal_bbram_write_aes_key(uint8_t *key, size_t len)
923 {
924 struct versal_nvm_write_req req __aligned_efuse = {
925 .efuse_id = BBRAM_WRITE_AES_KEY,
926 .bbram.aes_key_len = len,
927 };
928 void *buf = NULL;
929
930 if (len != 32)
931 return TEE_ERROR_BAD_PARAMETERS;
932
933 buf = alloc_cache_aligned(1024);
934 if (!buf)
935 return TEE_ERROR_OUT_OF_MEMORY;
936
937 memcpy(buf, key, len);
938
939 req.ibuf[0].buf = buf;
940 req.ibuf[0].len = 1024;
941
942 if (versal_nvm_write(&req)) {
943 free(buf);
944 return TEE_ERROR_GENERIC;
945 }
946 free(buf);
947
948 return TEE_SUCCESS;
949 }
950
versal_bbram_zeroize(void)951 TEE_Result versal_bbram_zeroize(void)
952 {
953 struct versal_nvm_write_req req __aligned_efuse = {
954 .efuse_id = BBRAM_ZEROIZE,
955 };
956
957 if (versal_nvm_write(&req))
958 return TEE_ERROR_GENERIC;
959
960 return TEE_SUCCESS;
961 }
962
versal_bbram_write_user_data(uint32_t data)963 TEE_Result versal_bbram_write_user_data(uint32_t data)
964 {
965 struct versal_nvm_write_req req __aligned_efuse = {
966 .efuse_id = BBRAM_WRITE_USER_DATA,
967 .bbram.user_data = data,
968 };
969
970 if (versal_nvm_write(&req))
971 return TEE_ERROR_GENERIC;
972
973 return TEE_SUCCESS;
974 }
975
versal_bbram_read_user_data(uint32_t * data)976 TEE_Result versal_bbram_read_user_data(uint32_t *data)
977 {
978 struct versal_nvm_read_req req = {
979 .efuse_id = BBRAM_READ_USER_DATA,
980 };
981
982 if (versal_alloc_read_buffer(&req))
983 return TEE_ERROR_OUT_OF_MEMORY;
984
985 if (versal_nvm_read(&req)) {
986 versal_free_read_buffer(&req);
987 return TEE_ERROR_GENERIC;
988 }
989
990 memcpy(data, versal_get_read_buffer(&req), sizeof(*data));
991 versal_free_read_buffer(&req);
992
993 return TEE_SUCCESS;
994 }
995
versal_bbram_lock_write_user_data(void)996 TEE_Result versal_bbram_lock_write_user_data(void)
997 {
998 struct versal_nvm_write_req req __aligned_efuse = {
999 .efuse_id = BBRAM_LOCK_WRITE_USER_DATA,
1000 };
1001
1002 if (versal_nvm_write(&req))
1003 return TEE_ERROR_GENERIC;
1004
1005 return TEE_SUCCESS;
1006 }
1007