1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (C) 2022 Foundries.io Ltd
4 * Jorge Ramirez-Ortiz <jorge@foundries.io>
5 */
6
7 #include <arm.h>
8 #include <drivers/versal_nvm.h>
9 #include <drivers/versal_mbox.h>
10 #include <initcall.h>
11 #include <kernel/panic.h>
12 #include <kernel/tee_misc.h>
13 #include <mm/core_memprot.h>
14 #include <string.h>
15 #include <tee/cache.h>
16
17 #include "drivers/versal_nvm.h"
18
19 #define NVM_WORD_LEN 4
20
21 /* Protocol API with the remote processor */
22 #define NVM_MODULE_SHIFT 8
23 #define NVM_MODULE 11
24 #define NVM_API_ID(_id) ((NVM_MODULE << NVM_MODULE_SHIFT) | (_id))
25
26 #define __aligned_efuse __aligned(CACHELINE_LEN)
27
28 /* Internal */
29 struct versal_efuse_puf_fuse_addr {
30 uint64_t data_addr;
31 uint32_t start_row;
32 uint32_t num_rows;
33 uint8_t env_monitor_dis;
34 uint8_t prgm_puf_fuse;
35 uint8_t pad[46];
36 };
37
38 /*
39 * Max size of the buffer needed for the remote processor to DMA efuse _data_
40 * to/from
41 */
42 #define EFUSE_MAX_LEN (EFUSE_MAX_USER_FUSES * sizeof(uint32_t))
43
44 enum versal_nvm_api_id {
45 API_FEATURES = 0,
46 BBRAM_WRITE_AES_KEY = 1,
47 BBRAM_ZEROIZE = 2,
48 BBRAM_WRITE_USER_DATA = 3,
49 BBRAM_READ_USER_DATA = 4,
50 BBRAM_LOCK_WRITE_USER_DATA = 5,
51 EFUSE_WRITE = 6,
52 EFUSE_WRITE_PUF = 7,
53 EFUSE_PUF_USER_FUSE_WRITE = 8,
54 EFUSE_READ_IV = 9,
55 EFUSE_READ_REVOCATION_ID = 10,
56 EFUSE_READ_OFFCHIP_REVOCATION_ID = 11,
57 EFUSE_READ_USER_FUSES = 12,
58 EFUSE_READ_MISC_CTRL = 13,
59 EFUSE_READ_SEC_CTRL = 14,
60 EFUSE_READ_SEC_MISC1 = 15,
61 EFUSE_READ_BOOT_ENV_CTRL = 16,
62 EFUSE_READ_PUF_SEC_CTRL = 17,
63 EFUSE_READ_PPK_HASH = 18,
64 EFUSE_READ_DEC_EFUSE_ONLY = 19,
65 EFUSE_READ_DNA = 20,
66 EFUSE_READ_PUF_USER_FUSES = 21,
67 EFUSE_READ_PUF = 22,
68 EFUSE_INVALID = 23,
69 };
70
71 /* uint64_t are memory addresses */
72 struct versal_efuse_data {
73 uint64_t env_mon_dis_flag;
74 uint64_t aes_key_addr;
75 uint64_t ppk_hash_addr;
76 uint64_t dec_only_addr;
77 uint64_t sec_ctrl_addr;
78 uint64_t misc_ctrl_addr;
79 uint64_t revoke_id_addr;
80 uint64_t iv_addr;
81 uint64_t user_fuse_addr;
82 uint64_t glitch_cfg_addr;
83 uint64_t boot_env_ctrl_addr;
84 uint64_t misc1_ctrl_addr;
85 uint64_t offchip_id_addr;
86 uint8_t pad[24];
87 };
88
89 /* Helper read and write requests (not part of the protocol) */
90 struct versal_nvm_buf {
91 size_t len;
92 void *buf;
93 };
94
95 struct versal_nvm_read_req {
96 enum versal_nvm_api_id efuse_id;
97 enum versal_nvm_revocation_id revocation_id;
98 enum versal_nvm_offchip_id offchip_id;
99 enum versal_nvm_ppk_type ppk_type;
100 enum versal_nvm_iv_type iv_type;
101 struct versal_nvm_buf ibuf[VERSAL_MAX_IPI_BUF];
102 };
103
104 struct versal_bbram_data {
105 size_t aes_key_len;
106 uint32_t user_data;
107 };
108
109 struct versal_nvm_write_req {
110 struct versal_efuse_data data;
111 struct versal_bbram_data bbram;
112 struct versal_nvm_buf ibuf[VERSAL_MAX_IPI_BUF];
113 enum versal_nvm_api_id efuse_id;
114 };
115
116 static TEE_Result
prepare_cmd(struct versal_ipi_cmd * cmd,enum versal_nvm_api_id efuse,struct versal_nvm_buf * ibufs,uint32_t * arg)117 prepare_cmd(struct versal_ipi_cmd *cmd, enum versal_nvm_api_id efuse,
118 struct versal_nvm_buf *ibufs, uint32_t *arg)
119 {
120 uint32_t a = 0;
121 uint32_t b = 0;
122 size_t i = 0;
123
124 cmd->data[i++] = NVM_API_ID(efuse);
125 if (arg)
126 cmd->data[i++] = *arg;
127
128 if (!ibufs[0].buf)
129 return TEE_SUCCESS;
130
131 reg_pair_from_64(virt_to_phys(ibufs[0].buf), &b, &a);
132
133 cmd->data[i++] = a;
134 cmd->data[i++] = b;
135
136 for (i = 0; i < VERSAL_MAX_IPI_BUF; i++) {
137 cmd->ibuf[i].mem.alloc_len = ibufs[i].len;
138 cmd->ibuf[i].mem.buf = ibufs[i].buf;
139 }
140
141 return TEE_SUCCESS;
142 }
143
efuse_req(enum versal_nvm_api_id efuse,struct versal_nvm_buf * ibufs,uint32_t * arg)144 static TEE_Result efuse_req(enum versal_nvm_api_id efuse,
145 struct versal_nvm_buf *ibufs, uint32_t *arg)
146 {
147 struct versal_ipi_cmd cmd = { };
148 TEE_Result ret = TEE_SUCCESS;
149
150 ret = prepare_cmd(&cmd, efuse, ibufs, arg);
151 if (ret)
152 return ret;
153
154 ret = versal_mbox_notify(&cmd, NULL, NULL);
155 if (ret)
156 EMSG("Mailbox error");
157
158 return ret;
159 }
160
versal_alloc_read_buffer(struct versal_nvm_read_req * req)161 static TEE_Result versal_alloc_read_buffer(struct versal_nvm_read_req *req)
162 {
163 assert(req);
164 req->ibuf[0].len = 1024;
165 req->ibuf[0].buf = alloc_cache_aligned(req->ibuf[0].len);
166 if (!req->ibuf[0].buf)
167 return TEE_ERROR_OUT_OF_MEMORY;
168
169 return TEE_SUCCESS;
170 }
171
versal_free_read_buffer(struct versal_nvm_read_req * req)172 static void versal_free_read_buffer(struct versal_nvm_read_req *req)
173 {
174 assert(req);
175 free(req->ibuf[0].buf);
176 }
177
versal_get_read_buffer(struct versal_nvm_read_req * req)178 static void *versal_get_read_buffer(struct versal_nvm_read_req *req)
179 {
180 assert(req);
181 return req->ibuf[0].buf;
182 }
183
versal_nvm_read(struct versal_nvm_read_req * req)184 static TEE_Result versal_nvm_read(struct versal_nvm_read_req *req)
185 {
186 uint32_t *arg = NULL;
187 uint32_t val = 0;
188
189 if (!req)
190 return TEE_ERROR_GENERIC;
191
192 switch (req->efuse_id) {
193 case EFUSE_READ_DNA:
194 case EFUSE_READ_DEC_EFUSE_ONLY:
195 case EFUSE_READ_PUF_SEC_CTRL:
196 case EFUSE_READ_BOOT_ENV_CTRL:
197 case EFUSE_READ_SEC_CTRL:
198 case EFUSE_READ_MISC_CTRL:
199 case EFUSE_READ_SEC_MISC1:
200 case EFUSE_READ_USER_FUSES:
201 case EFUSE_READ_PUF_USER_FUSES:
202 case EFUSE_READ_PUF:
203 break;
204 case EFUSE_READ_OFFCHIP_REVOCATION_ID:
205 val = req->offchip_id;
206 arg = &val;
207 break;
208 case EFUSE_READ_REVOCATION_ID:
209 val = req->revocation_id;
210 arg = &val;
211 break;
212 case EFUSE_READ_IV:
213 val = req->iv_type;
214 arg = &val;
215 break;
216 case EFUSE_READ_PPK_HASH:
217 val = req->ppk_type;
218 arg = &val;
219 break;
220 case BBRAM_READ_USER_DATA:
221 break;
222 default:
223 return TEE_ERROR_GENERIC;
224 }
225
226 return efuse_req(req->efuse_id, req->ibuf, arg);
227 }
228
versal_nvm_write(struct versal_nvm_write_req * req)229 static TEE_Result versal_nvm_write(struct versal_nvm_write_req *req)
230 {
231 uint32_t *arg = NULL;
232 uint32_t val = 0;
233
234 switch (req->efuse_id) {
235 case BBRAM_WRITE_AES_KEY:
236 val = req->bbram.aes_key_len;
237 arg = &val;
238 break;
239 case BBRAM_WRITE_USER_DATA:
240 val = req->bbram.user_data;
241 arg = &val;
242 break;
243 case EFUSE_PUF_USER_FUSE_WRITE:
244 case EFUSE_WRITE_PUF:
245 case EFUSE_WRITE:
246 break;
247 default:
248 return TEE_ERROR_GENERIC;
249 }
250
251 return efuse_req(req->efuse_id, req->ibuf, arg);
252 }
253
versal_efuse_read_user_data(uint32_t * buf,size_t len,uint32_t first,size_t num)254 TEE_Result versal_efuse_read_user_data(uint32_t *buf, size_t len,
255 uint32_t first, size_t num)
256 {
257 struct versal_efuse_user_data cfg __aligned_efuse = {
258 .start = first,
259 .num = num,
260 };
261 struct versal_nvm_read_req req = {
262 .efuse_id = EFUSE_READ_USER_FUSES,
263 };
264 void *rsp = NULL;
265
266 if (first + num > EFUSE_MAX_USER_FUSES || len < num * sizeof(uint32_t))
267 return TEE_ERROR_BAD_PARAMETERS;
268
269 rsp = alloc_cache_aligned(1024);
270 if (!rsp)
271 return TEE_ERROR_OUT_OF_MEMORY;
272
273 req.ibuf[0].buf = &cfg;
274 req.ibuf[0].len = sizeof(cfg);
275 req.ibuf[1].buf = rsp;
276 req.ibuf[1].len = 1024;
277
278 cfg.addr = virt_to_phys((void *)rsp);
279
280 if (versal_nvm_read(&req)) {
281 free(rsp);
282 return TEE_ERROR_GENERIC;
283 }
284
285 memcpy(buf, rsp, num * sizeof(uint32_t));
286 free(rsp);
287
288 return TEE_SUCCESS;
289 }
290
versal_efuse_read_dna(uint32_t * buf,size_t len)291 TEE_Result versal_efuse_read_dna(uint32_t *buf, size_t len)
292 {
293 struct versal_nvm_read_req req = {
294 .efuse_id = EFUSE_READ_DNA,
295 };
296
297 if (len < EFUSE_DNA_LEN)
298 return TEE_ERROR_BAD_PARAMETERS;
299
300 if (versal_alloc_read_buffer(&req))
301 return TEE_ERROR_OUT_OF_MEMORY;
302
303 if (versal_nvm_read(&req)) {
304 versal_free_read_buffer(&req);
305 return TEE_ERROR_GENERIC;
306 }
307
308 memcpy(buf, versal_get_read_buffer(&req), EFUSE_DNA_LEN);
309 versal_free_read_buffer(&req);
310
311 return TEE_SUCCESS;
312 }
313
versal_efuse_read_iv(uint32_t * buf,size_t len,enum versal_nvm_iv_type type)314 TEE_Result versal_efuse_read_iv(uint32_t *buf, size_t len,
315 enum versal_nvm_iv_type type)
316 {
317 struct versal_nvm_read_req req = {
318 .efuse_id = EFUSE_READ_IV,
319 .iv_type = type,
320 };
321
322 if (len < EFUSE_IV_LEN)
323 return TEE_ERROR_BAD_PARAMETERS;
324
325 if (versal_alloc_read_buffer(&req))
326 return TEE_ERROR_OUT_OF_MEMORY;
327
328 if (versal_nvm_read(&req)) {
329 versal_free_read_buffer(&req);
330 return TEE_ERROR_GENERIC;
331 }
332
333 memcpy(buf, versal_get_read_buffer(&req), EFUSE_IV_LEN);
334 versal_free_read_buffer(&req);
335
336 return TEE_SUCCESS;
337 }
338
versal_efuse_read_ppk(uint32_t * buf,size_t len,enum versal_nvm_ppk_type type)339 TEE_Result versal_efuse_read_ppk(uint32_t *buf, size_t len,
340 enum versal_nvm_ppk_type type)
341 {
342 struct versal_nvm_read_req req = {
343 req.efuse_id = EFUSE_READ_PPK_HASH,
344 .ppk_type = type,
345 };
346
347 if (len < EFUSE_PPK_LEN)
348 return TEE_ERROR_BAD_PARAMETERS;
349
350 if (versal_alloc_read_buffer(&req))
351 return TEE_ERROR_OUT_OF_MEMORY;
352
353 if (versal_nvm_read(&req))
354 return TEE_ERROR_GENERIC;
355
356 memcpy(buf, versal_get_read_buffer(&req), EFUSE_PPK_LEN);
357 versal_free_read_buffer(&req);
358
359 return TEE_SUCCESS;
360 }
361
versal_efuse_write_user_data(uint32_t * buf,size_t len,uint32_t first,size_t num)362 TEE_Result versal_efuse_write_user_data(uint32_t *buf, size_t len,
363 uint32_t first, size_t num)
364 {
365 uint32_t lbuf[EFUSE_MAX_USER_FUSES] __aligned_efuse = { 0 };
366 struct versal_efuse_user_data cfg __aligned_efuse = {
367 .addr = (uintptr_t)lbuf,
368 .start = first,
369 .num = num,
370 };
371 struct versal_nvm_write_req __aligned_efuse req = {
372 .data.user_fuse_addr = virt_to_phys(&cfg),
373 .data.env_mon_dis_flag = 1,
374 .efuse_id = EFUSE_WRITE,
375 };
376 size_t i = 0;
377
378 if (first + num > EFUSE_MAX_USER_FUSES || len < num * sizeof(uint32_t))
379 return TEE_ERROR_BAD_PARAMETERS;
380
381 req.data.user_fuse_addr = virt_to_phys((void *)req.data.user_fuse_addr);
382 cfg.addr = virt_to_phys(lbuf);
383
384 req.ibuf[0].buf = &req.data;
385 req.ibuf[0].len = sizeof(req.data);
386 req.ibuf[1].buf = &cfg;
387 req.ibuf[1].len = sizeof(cfg);
388 req.ibuf[2].buf = lbuf;
389 req.ibuf[2].len = sizeof(lbuf);
390
391 for (i = 0; i < cfg.num; i++)
392 lbuf[i] = buf[i];
393
394 return versal_nvm_write(&req);
395 }
396
versal_efuse_write_aes_keys(struct versal_efuse_aes_keys * keys)397 TEE_Result versal_efuse_write_aes_keys(struct versal_efuse_aes_keys *keys)
398 {
399 struct versal_efuse_aes_keys cfg __aligned_efuse = { };
400 struct versal_nvm_write_req req __aligned_efuse = {
401 .data.aes_key_addr = virt_to_phys(&cfg),
402 .data.env_mon_dis_flag = 1,
403 .efuse_id = EFUSE_WRITE,
404 };
405
406 memcpy(&cfg, keys, sizeof(cfg));
407
408 req.ibuf[0].buf = &req.data;
409 req.ibuf[0].len = sizeof(req.data);
410 req.ibuf[1].buf = &cfg;
411 req.ibuf[1].len = sizeof(cfg);
412
413 return versal_nvm_write(&req);
414 }
415
versal_efuse_write_ppk_hash(struct versal_efuse_ppk_hash * hash)416 TEE_Result versal_efuse_write_ppk_hash(struct versal_efuse_ppk_hash *hash)
417 {
418 struct versal_efuse_ppk_hash cfg __aligned_efuse = { };
419 struct versal_nvm_write_req req __aligned_efuse = {
420 .data.ppk_hash_addr = virt_to_phys(&cfg),
421 .data.env_mon_dis_flag = 1,
422 .efuse_id = EFUSE_WRITE,
423 };
424
425 memcpy(&cfg, hash, sizeof(cfg));
426
427 req.ibuf[0].buf = &req.data;
428 req.ibuf[0].len = sizeof(req.data);
429 req.ibuf[1].buf = &cfg;
430 req.ibuf[1].len = sizeof(cfg);
431
432 return versal_nvm_write(&req);
433 }
434
versal_efuse_write_iv(struct versal_efuse_ivs * p)435 TEE_Result versal_efuse_write_iv(struct versal_efuse_ivs *p)
436 {
437 struct versal_efuse_ivs cfg __aligned_efuse = { };
438 struct versal_nvm_write_req req __aligned_efuse = {
439 .data.iv_addr = virt_to_phys(&cfg),
440 .data.env_mon_dis_flag = 1,
441 .efuse_id = EFUSE_WRITE,
442 };
443
444 memcpy(&cfg, p, sizeof(cfg));
445
446 req.ibuf[0].buf = &req.data;
447 req.ibuf[0].len = sizeof(req.data);
448 req.ibuf[1].buf = &cfg;
449 req.ibuf[1].len = sizeof(cfg);
450
451 return versal_nvm_write(&req);
452 }
453
versal_efuse_write_dec_only(struct versal_efuse_dec_only * p)454 TEE_Result versal_efuse_write_dec_only(struct versal_efuse_dec_only *p)
455 {
456 struct versal_efuse_dec_only cfg __aligned_efuse = { };
457 struct versal_nvm_write_req req __aligned_efuse = {
458 .data.dec_only_addr = virt_to_phys(&cfg),
459 .data.env_mon_dis_flag = 1,
460 .efuse_id = EFUSE_WRITE,
461 };
462
463 memcpy(&cfg, p, sizeof(cfg));
464
465 req.ibuf[0].buf = &req.data;
466 req.ibuf[0].len = sizeof(req.data);
467 req.ibuf[1].buf = &cfg;
468 req.ibuf[1].len = sizeof(cfg);
469
470 return versal_nvm_write(&req);
471 }
472
versal_efuse_write_sec(struct versal_efuse_sec_ctrl_bits * p)473 TEE_Result versal_efuse_write_sec(struct versal_efuse_sec_ctrl_bits *p)
474 {
475 struct versal_efuse_sec_ctrl_bits cfg __aligned_efuse = { };
476 struct versal_nvm_write_req req __aligned_efuse = {
477 .data.sec_ctrl_addr = virt_to_phys(&cfg),
478 .data.env_mon_dis_flag = 1,
479 .efuse_id = EFUSE_WRITE,
480 };
481
482 memcpy(&cfg, p, sizeof(cfg));
483
484 req.ibuf[0].buf = &req.data;
485 req.ibuf[0].len = sizeof(req.data);
486 req.ibuf[1].buf = &cfg;
487 req.ibuf[1].len = sizeof(cfg);
488
489 return versal_nvm_write(&req);
490 }
491
versal_efuse_write_misc(struct versal_efuse_misc_ctrl_bits * p)492 TEE_Result versal_efuse_write_misc(struct versal_efuse_misc_ctrl_bits *p)
493 {
494 struct versal_efuse_misc_ctrl_bits cfg __aligned_efuse = { };
495 struct versal_nvm_write_req req __aligned_efuse = {
496 .data.misc_ctrl_addr = virt_to_phys(&cfg),
497 .data.env_mon_dis_flag = 1,
498 .efuse_id = EFUSE_WRITE,
499 };
500
501 memcpy(&cfg, p, sizeof(cfg));
502
503 req.ibuf[0].buf = &req.data;
504 req.ibuf[0].len = sizeof(req.data);
505 req.ibuf[1].buf = &cfg;
506 req.ibuf[1].len = sizeof(cfg);
507
508 return versal_nvm_write(&req);
509 }
510
versal_efuse_write_glitch_cfg(struct versal_efuse_glitch_cfg_bits * p)511 TEE_Result versal_efuse_write_glitch_cfg(struct versal_efuse_glitch_cfg_bits *p)
512 {
513 struct versal_efuse_glitch_cfg_bits cfg __aligned_efuse = { };
514 struct versal_nvm_write_req req __aligned_efuse = {
515 .data.glitch_cfg_addr = virt_to_phys(&cfg),
516 .data.env_mon_dis_flag = 1,
517 .efuse_id = EFUSE_WRITE,
518 };
519
520 memcpy(&cfg, p, sizeof(cfg));
521
522 req.ibuf[0].buf = &req.data;
523 req.ibuf[0].len = sizeof(req.data);
524 req.ibuf[1].buf = &cfg;
525 req.ibuf[1].len = sizeof(cfg);
526
527 return versal_nvm_write(&req);
528 }
529
versal_efuse_write_boot_env(struct versal_efuse_boot_env_ctrl_bits * p)530 TEE_Result versal_efuse_write_boot_env(struct versal_efuse_boot_env_ctrl_bits
531 *p)
532 {
533 struct versal_efuse_boot_env_ctrl_bits cfg __aligned_efuse = { };
534 struct versal_nvm_write_req req __aligned_efuse = {
535 .data.boot_env_ctrl_addr = virt_to_phys(&cfg),
536 .data.env_mon_dis_flag = 1,
537 .efuse_id = EFUSE_WRITE,
538 };
539
540 memcpy(&cfg, p, sizeof(cfg));
541
542 req.ibuf[0].buf = &req.data;
543 req.ibuf[0].len = sizeof(req.data);
544 req.ibuf[1].buf = &cfg;
545 req.ibuf[1].len = sizeof(cfg);
546
547 return versal_nvm_write(&req);
548 }
549
versal_efuse_write_sec_misc1(struct versal_efuse_sec_misc1_bits * p)550 TEE_Result versal_efuse_write_sec_misc1(struct versal_efuse_sec_misc1_bits *p)
551 {
552 struct versal_efuse_sec_misc1_bits cfg __aligned_efuse = { };
553 struct versal_nvm_write_req req __aligned_efuse = {
554 .data.misc1_ctrl_addr = virt_to_phys(&cfg),
555 .data.env_mon_dis_flag = 1,
556 .efuse_id = EFUSE_WRITE,
557 };
558
559 memcpy(&cfg, p, sizeof(cfg));
560
561 req.ibuf[0].buf = &req.data;
562 req.ibuf[0].len = sizeof(req.data);
563 req.ibuf[1].buf = &cfg;
564 req.ibuf[1].len = sizeof(cfg);
565
566 return versal_nvm_write(&req);
567 }
568
versal_efuse_write_offchip_ids(struct versal_efuse_offchip_ids * p)569 TEE_Result versal_efuse_write_offchip_ids(struct versal_efuse_offchip_ids *p)
570 {
571 struct versal_efuse_offchip_ids cfg __aligned_efuse = { };
572 struct versal_nvm_write_req req __aligned_efuse = {
573 .data.offchip_id_addr = virt_to_phys(&cfg),
574 .data.env_mon_dis_flag = 1,
575 .efuse_id = EFUSE_WRITE,
576 };
577
578 memcpy(&cfg, p, sizeof(cfg));
579
580 req.ibuf[0].buf = &req.data;
581 req.ibuf[0].len = sizeof(req.data);
582 req.ibuf[1].buf = &cfg;
583 req.ibuf[1].len = sizeof(cfg);
584
585 return versal_nvm_write(&req);
586 }
587
versal_efuse_write_revoke_ppk(enum versal_nvm_ppk_type type)588 TEE_Result versal_efuse_write_revoke_ppk(enum versal_nvm_ppk_type type)
589 {
590 struct versal_efuse_misc_ctrl_bits cfg __aligned_efuse = { };
591 struct versal_nvm_write_req req __aligned_efuse = {
592 .data.misc_ctrl_addr = virt_to_phys(&cfg),
593 .data.env_mon_dis_flag = 1,
594 .efuse_id = EFUSE_WRITE,
595 };
596
597 req.data.misc_ctrl_addr = virt_to_phys((void *)req.data.misc_ctrl_addr);
598 if (type == EFUSE_PPK0)
599 cfg.ppk0_invalid = 1;
600 else if (type == EFUSE_PPK1)
601 cfg.ppk1_invalid = 1;
602 else if (type == EFUSE_PPK2)
603 cfg.ppk2_invalid = 1;
604 else
605 return TEE_ERROR_BAD_PARAMETERS;
606
607 req.ibuf[0].buf = &req.data;
608 req.ibuf[0].len = sizeof(req.data);
609 req.ibuf[1].buf = &cfg;
610 req.ibuf[1].len = sizeof(cfg);
611
612 return versal_nvm_write(&req);
613 }
614
versal_efuse_write_revoke_id(uint32_t id)615 TEE_Result versal_efuse_write_revoke_id(uint32_t id)
616 {
617 struct versal_efuse_revoke_ids cfg __aligned_efuse = { };
618 struct versal_nvm_write_req req __aligned_efuse = {
619 .data.revoke_id_addr = virt_to_phys(&cfg),
620 .data.env_mon_dis_flag = 1,
621 .efuse_id = EFUSE_WRITE,
622 };
623 uint32_t row = 0;
624 uint32_t bit = 0;
625
626 row = id >> (NVM_WORD_LEN + 1);
627 bit = id & (NVM_WORD_LEN - 1);
628
629 cfg.revoke_id[row] = BIT(bit);
630 cfg.prgm_revoke_id = 1;
631
632 req.ibuf[0].buf = &req.data;
633 req.ibuf[0].len = sizeof(req.data);
634 req.ibuf[1].buf = &cfg;
635 req.ibuf[1].len = sizeof(cfg);
636
637 return versal_nvm_write(&req);
638 }
639
versal_efuse_read_revoke_id(uint32_t * buf,size_t len,enum versal_nvm_revocation_id id)640 TEE_Result versal_efuse_read_revoke_id(uint32_t *buf, size_t len,
641 enum versal_nvm_revocation_id id)
642 {
643 struct versal_nvm_read_req req = {
644 .efuse_id = EFUSE_READ_REVOCATION_ID,
645 .revocation_id = id,
646 };
647
648 if (len < EFUSE_REVOCATION_ID_LEN)
649 return TEE_ERROR_BAD_PARAMETERS;
650
651 if (versal_alloc_read_buffer(&req))
652 return TEE_ERROR_OUT_OF_MEMORY;
653
654 if (versal_nvm_read(&req)) {
655 versal_free_read_buffer(&req);
656 return TEE_ERROR_GENERIC;
657 }
658
659 memcpy(buf, versal_get_read_buffer(&req), EFUSE_REVOCATION_ID_LEN);
660 versal_free_read_buffer(&req);
661
662 return TEE_SUCCESS;
663 }
664
versal_efuse_read_misc_ctrl(struct versal_efuse_misc_ctrl_bits * buf)665 TEE_Result versal_efuse_read_misc_ctrl(struct versal_efuse_misc_ctrl_bits *buf)
666 {
667 struct versal_nvm_read_req req = {
668 .efuse_id = EFUSE_READ_MISC_CTRL,
669 };
670
671 if (versal_alloc_read_buffer(&req))
672 return TEE_ERROR_OUT_OF_MEMORY;
673
674 if (versal_nvm_read(&req)) {
675 versal_free_read_buffer(&req);
676 return TEE_ERROR_GENERIC;
677 }
678
679 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
680 versal_free_read_buffer(&req);
681
682 return TEE_SUCCESS;
683 }
684
versal_efuse_read_sec_ctrl(struct versal_efuse_sec_ctrl_bits * buf)685 TEE_Result versal_efuse_read_sec_ctrl(struct versal_efuse_sec_ctrl_bits *buf)
686 {
687 struct versal_nvm_read_req req = {
688 .efuse_id = EFUSE_READ_SEC_CTRL,
689 };
690
691 if (versal_alloc_read_buffer(&req))
692 return TEE_ERROR_OUT_OF_MEMORY;
693
694 if (versal_nvm_read(&req)) {
695 versal_free_read_buffer(&req);
696 return TEE_ERROR_GENERIC;
697 }
698
699 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
700 versal_free_read_buffer(&req);
701
702 return TEE_SUCCESS;
703 }
704
versal_efuse_read_sec_misc1(struct versal_efuse_sec_misc1_bits * buf)705 TEE_Result versal_efuse_read_sec_misc1(struct versal_efuse_sec_misc1_bits *buf)
706 {
707 struct versal_nvm_read_req req = {
708 .efuse_id = EFUSE_READ_SEC_MISC1,
709 };
710
711 if (versal_alloc_read_buffer(&req))
712 return TEE_ERROR_OUT_OF_MEMORY;
713
714 if (versal_nvm_read(&req)) {
715 versal_free_read_buffer(&req);
716 return TEE_ERROR_GENERIC;
717 }
718
719 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
720 versal_free_read_buffer(&req);
721
722 return TEE_SUCCESS;
723 }
724
725 TEE_Result
versal_efuse_read_boot_env_ctrl(struct versal_efuse_boot_env_ctrl_bits * buf)726 versal_efuse_read_boot_env_ctrl(struct versal_efuse_boot_env_ctrl_bits *buf)
727 {
728 struct versal_nvm_read_req req = {
729 .efuse_id = EFUSE_READ_BOOT_ENV_CTRL,
730 };
731
732 if (versal_alloc_read_buffer(&req))
733 return TEE_ERROR_OUT_OF_MEMORY;
734
735 if (versal_nvm_read(&req)) {
736 versal_free_read_buffer(&req);
737 return TEE_ERROR_GENERIC;
738 }
739
740 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
741 versal_free_read_buffer(&req);
742
743 return TEE_SUCCESS;
744 }
745
versal_efuse_read_offchip_revoke_id(uint32_t * buf,size_t len,enum versal_nvm_offchip_id id)746 TEE_Result versal_efuse_read_offchip_revoke_id(uint32_t *buf, size_t len,
747 enum versal_nvm_offchip_id id)
748 {
749 struct versal_nvm_read_req req = {
750 .efuse_id = EFUSE_READ_OFFCHIP_REVOCATION_ID,
751 .offchip_id = id,
752 };
753
754 if (len < EFUSE_OFFCHIP_REVOCATION_ID_LEN)
755 return TEE_ERROR_BAD_PARAMETERS;
756
757 if (versal_alloc_read_buffer(&req))
758 return TEE_ERROR_OUT_OF_MEMORY;
759
760 if (versal_nvm_read(&req)) {
761 versal_free_read_buffer(&req);
762 return TEE_ERROR_GENERIC;
763 }
764
765 memcpy(buf, versal_get_read_buffer(&req), EFUSE_REVOCATION_ID_LEN);
766 versal_free_read_buffer(&req);
767
768 return TEE_SUCCESS;
769 }
770
versal_efuse_read_dec_only(uint32_t * buf,size_t len)771 TEE_Result versal_efuse_read_dec_only(uint32_t *buf, size_t len)
772 {
773 struct versal_nvm_read_req req = {
774 .efuse_id = EFUSE_READ_DEC_EFUSE_ONLY,
775 };
776
777 if (len < EFUSE_DEC_ONLY_LEN)
778 return TEE_ERROR_BAD_PARAMETERS;
779
780 if (versal_alloc_read_buffer(&req))
781 return TEE_ERROR_OUT_OF_MEMORY;
782
783 if (versal_nvm_read(&req)) {
784 versal_free_read_buffer(&req);
785 return TEE_ERROR_GENERIC;
786 }
787
788 memcpy(buf, versal_get_read_buffer(&req), EFUSE_DEC_ONLY_LEN);
789 versal_free_read_buffer(&req);
790
791 return TEE_SUCCESS;
792 }
793
794 TEE_Result
versal_efuse_read_puf_sec_ctrl(struct versal_efuse_puf_sec_ctrl_bits * buf)795 versal_efuse_read_puf_sec_ctrl(struct versal_efuse_puf_sec_ctrl_bits *buf)
796 {
797 struct versal_nvm_read_req req = {
798 .efuse_id = EFUSE_READ_PUF_SEC_CTRL,
799 };
800
801 if (versal_alloc_read_buffer(&req))
802 return TEE_ERROR_OUT_OF_MEMORY;
803
804 if (versal_nvm_read(&req)) {
805 versal_free_read_buffer(&req);
806 return TEE_ERROR_GENERIC;
807 }
808
809 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
810 versal_free_read_buffer(&req);
811
812 return TEE_SUCCESS;
813 }
814
versal_efuse_read_puf(struct versal_efuse_puf_header * buf)815 TEE_Result versal_efuse_read_puf(struct versal_efuse_puf_header *buf)
816 {
817 struct versal_nvm_read_req req = {
818 .efuse_id = EFUSE_READ_PUF,
819 };
820
821 if (versal_alloc_read_buffer(&req))
822 return TEE_ERROR_OUT_OF_MEMORY;
823
824 memcpy(versal_get_read_buffer(&req), buf, sizeof(*buf));
825
826 if (versal_nvm_read(&req)) {
827 versal_free_read_buffer(&req);
828 return TEE_ERROR_GENERIC;
829 }
830
831 memcpy(buf, versal_get_read_buffer(&req), sizeof(*buf));
832 versal_free_read_buffer(&req);
833
834 return TEE_SUCCESS;
835 }
836
837 /*
838 * This functionality requires building the PLM with XNVM_ACCESS_PUF_USER_DATA
839 * Calls will fail otherwise.
840 * When available, efuse_read_puf becomes unavailable.
841 */
842 TEE_Result
versal_efuse_read_puf_as_user_fuse(struct versal_efuse_puf_user_fuse * p)843 versal_efuse_read_puf_as_user_fuse(struct versal_efuse_puf_user_fuse *p)
844 {
845 uint32_t fuses[PUF_EFUSES_WORDS]__aligned_efuse = { 0 };
846 struct versal_efuse_puf_fuse_addr lbuf __aligned_efuse = {
847 .env_monitor_dis = p->env_monitor_dis,
848 .prgm_puf_fuse = p->prgm_puf_fuse,
849 .start_row = p->start_row,
850 .num_rows = p->num_rows,
851 .data_addr = virt_to_phys(fuses),
852 };
853 struct versal_nvm_read_req req = {
854 .efuse_id = EFUSE_READ_PUF_USER_FUSES,
855 };
856
857 req.ibuf[0].buf = &lbuf;
858 req.ibuf[0].len = sizeof(lbuf);
859 req.ibuf[1].buf = fuses;
860 req.ibuf[1].len = sizeof(fuses);
861
862 if (versal_nvm_read(&req))
863 return TEE_ERROR_GENERIC;
864
865 memcpy(p->data_addr, fuses, sizeof(fuses));
866
867 return TEE_SUCCESS;
868 }
869
870 /*
871 * This functionality requires building the PLM with XNVM_ACCESS_PUF_USER_DATA.
872 * Calls will fail otherwise.
873 * When available, efuse_write_puf becomes unavailable.
874 */
875 TEE_Result
versal_efuse_write_puf_as_user_fuse(struct versal_efuse_puf_user_fuse * p)876 versal_efuse_write_puf_as_user_fuse(struct versal_efuse_puf_user_fuse *p)
877 {
878 uint32_t fuses[PUF_EFUSES_WORDS]__aligned_efuse = { 0 };
879 struct versal_efuse_puf_fuse_addr lbuf __aligned_efuse = {
880 .env_monitor_dis = p->env_monitor_dis,
881 .prgm_puf_fuse = p->prgm_puf_fuse,
882 .start_row = p->start_row,
883 .num_rows = p->num_rows,
884 .data_addr = virt_to_phys(fuses),
885 };
886 struct versal_nvm_write_req req = {
887 .efuse_id = EFUSE_PUF_USER_FUSE_WRITE,
888 };
889
890 memcpy(fuses, p->data_addr, sizeof(p->data_addr));
891
892 req.ibuf[0].buf = &lbuf;
893 req.ibuf[0].len = sizeof(lbuf);
894 req.ibuf[1].buf = fuses;
895 req.ibuf[1].len = sizeof(fuses);
896
897 if (versal_nvm_write(&req))
898 return TEE_ERROR_GENERIC;
899
900 return TEE_SUCCESS;
901 }
902
versal_efuse_write_puf(struct versal_efuse_puf_header * buf)903 TEE_Result versal_efuse_write_puf(struct versal_efuse_puf_header *buf)
904 {
905 struct versal_efuse_puf_header cfg __aligned_efuse = { };
906 struct versal_nvm_write_req req __aligned_efuse = {
907 .efuse_id = EFUSE_WRITE_PUF,
908 };
909
910 memcpy(&cfg, buf, sizeof(*buf));
911
912 req.ibuf[0].buf = &cfg;
913 req.ibuf[0].len = sizeof(cfg);
914
915 if (versal_nvm_write(&req))
916 return TEE_ERROR_GENERIC;
917
918 return TEE_SUCCESS;
919 }
920
versal_bbram_write_aes_key(uint8_t * key,size_t len)921 TEE_Result versal_bbram_write_aes_key(uint8_t *key, size_t len)
922 {
923 struct versal_nvm_write_req req __aligned_efuse = {
924 .efuse_id = BBRAM_WRITE_AES_KEY,
925 .bbram.aes_key_len = len,
926 };
927 void *buf = NULL;
928
929 if (len != 32)
930 return TEE_ERROR_BAD_PARAMETERS;
931
932 buf = alloc_cache_aligned(1024);
933 if (!buf)
934 return TEE_ERROR_OUT_OF_MEMORY;
935
936 memcpy(buf, key, len);
937
938 req.ibuf[0].buf = buf;
939 req.ibuf[0].len = 1024;
940
941 if (versal_nvm_write(&req)) {
942 free(buf);
943 return TEE_ERROR_GENERIC;
944 }
945 free(buf);
946
947 return TEE_SUCCESS;
948 }
949
versal_bbram_zeroize(void)950 TEE_Result versal_bbram_zeroize(void)
951 {
952 struct versal_nvm_write_req req __aligned_efuse = {
953 .efuse_id = BBRAM_ZEROIZE,
954 };
955
956 if (versal_nvm_write(&req))
957 return TEE_ERROR_GENERIC;
958
959 return TEE_SUCCESS;
960 }
961
versal_bbram_write_user_data(uint32_t data)962 TEE_Result versal_bbram_write_user_data(uint32_t data)
963 {
964 struct versal_nvm_write_req req __aligned_efuse = {
965 .efuse_id = BBRAM_WRITE_USER_DATA,
966 .bbram.user_data = data,
967 };
968
969 if (versal_nvm_write(&req))
970 return TEE_ERROR_GENERIC;
971
972 return TEE_SUCCESS;
973 }
974
versal_bbram_read_user_data(uint32_t * data)975 TEE_Result versal_bbram_read_user_data(uint32_t *data)
976 {
977 struct versal_nvm_read_req req = {
978 .efuse_id = BBRAM_READ_USER_DATA,
979 };
980
981 if (versal_alloc_read_buffer(&req))
982 return TEE_ERROR_OUT_OF_MEMORY;
983
984 if (versal_nvm_read(&req)) {
985 versal_free_read_buffer(&req);
986 return TEE_ERROR_GENERIC;
987 }
988
989 memcpy(data, versal_get_read_buffer(&req), sizeof(*data));
990 versal_free_read_buffer(&req);
991
992 return TEE_SUCCESS;
993 }
994
versal_bbram_lock_write_user_data(void)995 TEE_Result versal_bbram_lock_write_user_data(void)
996 {
997 struct versal_nvm_write_req req __aligned_efuse = {
998 .efuse_id = BBRAM_LOCK_WRITE_USER_DATA,
999 };
1000
1001 if (versal_nvm_write(&req))
1002 return TEE_ERROR_GENERIC;
1003
1004 return TEE_SUCCESS;
1005 }
1006