1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2023 Linaro Limited
4  */
5 
6 #include <dm.h>
7 #include <dm/of_access.h>
8 #include <tpm_api.h>
9 #include <tpm-common.h>
10 #include <tpm-v2.h>
11 #include <tpm_tcg2.h>
12 #include <u-boot/sha1.h>
13 #include <u-boot/sha256.h>
14 #include <u-boot/sha512.h>
15 #include <version_string.h>
16 #include <asm/io.h>
17 #include <linux/bitops.h>
18 #include <linux/unaligned/be_byteshift.h>
19 #include <linux/unaligned/generic.h>
20 #include <linux/unaligned/le_byteshift.h>
21 #include "tpm-utils.h"
22 #include <bloblist.h>
23 
tcg2_get_pcr_info(struct udevice * dev,u32 * supported_bank,u32 * active_bank,u32 * bank_num)24 int tcg2_get_pcr_info(struct udevice *dev, u32 *supported_bank, u32 *active_bank,
25 		      u32 *bank_num)
26 {
27 	struct tpml_pcr_selection pcrs;
28 	size_t i;
29 	u32 ret;
30 
31 	*supported_bank = 0;
32 	*active_bank = 0;
33 	*bank_num = 0;
34 
35 	ret = tpm2_get_pcr_info(dev, &pcrs);
36 	if (ret)
37 		return ret;
38 
39 	for (i = 0; i < pcrs.count; i++) {
40 		struct tpms_pcr_selection *sel = &pcrs.selection[i];
41 		u32 hash_mask = tcg2_algorithm_to_mask(sel->hash);
42 
43 		if (tpm2_algorithm_supported(sel->hash))
44 			*supported_bank |= hash_mask;
45 		else
46 			log_warning("%s: unknown algorithm %x\n", __func__,
47 				    sel->hash);
48 
49 		if (tpm2_is_active_bank(sel))
50 			*active_bank |= hash_mask;
51 	}
52 
53 	*bank_num = pcrs.count;
54 
55 	return 0;
56 }
57 
tcg2_get_active_pcr_banks(struct udevice * dev,u32 * active_pcr_banks)58 int tcg2_get_active_pcr_banks(struct udevice *dev, u32 *active_pcr_banks)
59 {
60 	u32 supported = 0;
61 	u32 pcr_banks = 0;
62 	u32 active = 0;
63 	int rc;
64 
65 	rc = tcg2_get_pcr_info(dev, &supported, &active, &pcr_banks);
66 	if (rc)
67 		return rc;
68 
69 	*active_pcr_banks = active;
70 
71 	return 0;
72 }
73 
tcg2_event_get_size(struct tpml_digest_values * digest_list)74 u32 tcg2_event_get_size(struct tpml_digest_values *digest_list)
75 {
76 	u32 len;
77 	size_t i;
78 
79 	len = offsetof(struct tcg_pcr_event2, digests);
80 	len += offsetof(struct tpml_digest_values, digests);
81 	for (i = 0; i < digest_list->count; ++i) {
82 		u16 l = tpm2_algorithm_to_len(digest_list->digests[i].hash_alg);
83 
84 		if (!l)
85 			continue;
86 
87 		len += l + offsetof(struct tpmt_ha, digest);
88 	}
89 	len += sizeof(u32);
90 
91 	return len;
92 }
93 
tcg2_create_digest(struct udevice * dev,const u8 * input,u32 length,struct tpml_digest_values * digest_list)94 int tcg2_create_digest(struct udevice *dev, const u8 *input, u32 length,
95 		       struct tpml_digest_values *digest_list)
96 {
97 	struct tpm_chip_priv *priv = dev_get_uclass_priv(dev);
98 	u8 final[sizeof(union tpmu_ha)];
99 #if IS_ENABLED(CONFIG_SHA256)
100 	sha256_context ctx_256;
101 #endif
102 #if IS_ENABLED(CONFIG_SHA512)
103 	sha512_context ctx_512;
104 #endif
105 #if IS_ENABLED(CONFIG_SHA1)
106 	sha1_context ctx;
107 #endif
108 	size_t i;
109 	u32 len;
110 
111 	digest_list->count = 0;
112 	for (i = 0; i < priv->active_bank_count; i++) {
113 
114 		switch (priv->active_banks[i]) {
115 #if IS_ENABLED(CONFIG_SHA1)
116 		case TPM2_ALG_SHA1:
117 			sha1_starts(&ctx);
118 			sha1_update(&ctx, input, length);
119 			sha1_finish(&ctx, final);
120 			len = TPM2_SHA1_DIGEST_SIZE;
121 			break;
122 #endif
123 #if IS_ENABLED(CONFIG_SHA256)
124 		case TPM2_ALG_SHA256:
125 			sha256_starts(&ctx_256);
126 			sha256_update(&ctx_256, input, length);
127 			sha256_finish(&ctx_256, final);
128 			len = TPM2_SHA256_DIGEST_SIZE;
129 			break;
130 #endif
131 #if IS_ENABLED(CONFIG_SHA384)
132 		case TPM2_ALG_SHA384:
133 			sha384_starts(&ctx_512);
134 			sha384_update(&ctx_512, input, length);
135 			sha384_finish(&ctx_512, final);
136 			len = TPM2_SHA384_DIGEST_SIZE;
137 			break;
138 #endif
139 #if IS_ENABLED(CONFIG_SHA512)
140 		case TPM2_ALG_SHA512:
141 			sha512_starts(&ctx_512);
142 			sha512_update(&ctx_512, input, length);
143 			sha512_finish(&ctx_512, final);
144 			len = TPM2_SHA512_DIGEST_SIZE;
145 			break;
146 #endif
147 		default:
148 			printf("%s: unsupported algorithm %x\n", __func__,
149 			       priv->active_banks[i]);
150 			continue;
151 		}
152 
153 		digest_list->digests[digest_list->count].hash_alg =
154 			priv->active_banks[i];
155 		memcpy(&digest_list->digests[digest_list->count].digest, final,
156 		       len);
157 		digest_list->count++;
158 	}
159 
160 	return 0;
161 }
162 
tcg2_log_append(u32 pcr_index,u32 event_type,struct tpml_digest_values * digest_list,u32 size,const u8 * event,u8 * log)163 void tcg2_log_append(u32 pcr_index, u32 event_type,
164 		     struct tpml_digest_values *digest_list, u32 size,
165 		     const u8 *event, u8 *log)
166 {
167 	size_t len;
168 	size_t pos;
169 	u32 i;
170 
171 	pos = offsetof(struct tcg_pcr_event2, pcr_index);
172 	put_unaligned_le32(pcr_index, log);
173 	pos = offsetof(struct tcg_pcr_event2, event_type);
174 	put_unaligned_le32(event_type, log + pos);
175 	pos = offsetof(struct tcg_pcr_event2, digests) +
176 		offsetof(struct tpml_digest_values, count);
177 	put_unaligned_le32(digest_list->count, log + pos);
178 
179 	pos = offsetof(struct tcg_pcr_event2, digests) +
180 		offsetof(struct tpml_digest_values, digests);
181 	for (i = 0; i < digest_list->count; ++i) {
182 		u16 hash_alg = digest_list->digests[i].hash_alg;
183 
184 		len = tpm2_algorithm_to_len(hash_alg);
185 		if (!len)
186 			continue;
187 
188 		pos += offsetof(struct tpmt_ha, hash_alg);
189 		put_unaligned_le16(hash_alg, log + pos);
190 		pos += offsetof(struct tpmt_ha, digest);
191 		memcpy(log + pos, (u8 *)&digest_list->digests[i].digest, len);
192 		pos += len;
193 	}
194 
195 	put_unaligned_le32(size, log + pos);
196 	pos += sizeof(u32);
197 	memcpy(log + pos, event, size);
198 }
199 
tcg2_log_append_check(struct tcg2_event_log * elog,u32 pcr_index,u32 event_type,struct tpml_digest_values * digest_list,u32 size,const u8 * event)200 static int tcg2_log_append_check(struct tcg2_event_log *elog, u32 pcr_index,
201 				 u32 event_type,
202 				 struct tpml_digest_values *digest_list,
203 				 u32 size, const u8 *event)
204 {
205 	u32 event_size;
206 	u8 *log;
207 
208 	event_size = size + tcg2_event_get_size(digest_list);
209 	if (elog->log_position + event_size > elog->log_size) {
210 		printf("%s: log too large: %u + %u > %u\n", __func__,
211 		       elog->log_position, event_size, elog->log_size);
212 		return -ENOBUFS;
213 	}
214 
215 	log = elog->log + elog->log_position;
216 	elog->log_position += event_size;
217 
218 	tcg2_log_append(pcr_index, event_type, digest_list, size, event, log);
219 
220 	return 0;
221 }
222 
tcg2_log_init(struct udevice * dev,struct tcg2_event_log * elog)223 static int tcg2_log_init(struct udevice *dev, struct tcg2_event_log *elog)
224 {
225 	struct tpm_chip_priv *priv = dev_get_uclass_priv(dev);
226 	struct tcg_efi_spec_id_event *ev;
227 	struct tcg_pcr_event *log;
228 	u32 event_size;
229 	u32 count = 0;
230 	u32 log_size;
231 	size_t i;
232 	u16 len;
233 
234 	count = priv->active_bank_count;
235 	event_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes);
236 	event_size += 1 +
237 		(sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count);
238 	log_size = offsetof(struct tcg_pcr_event, event) + event_size;
239 
240 	if (log_size > elog->log_size) {
241 		printf("%s: log too large: %u > %u\n", __func__, log_size,
242 		       elog->log_size);
243 		return -ENOBUFS;
244 	}
245 
246 	log = (struct tcg_pcr_event *)elog->log;
247 	put_unaligned_le32(0, &log->pcr_index);
248 	put_unaligned_le32(EV_NO_ACTION, &log->event_type);
249 	memset(&log->digest, 0, sizeof(log->digest));
250 	put_unaligned_le32(event_size, &log->event_size);
251 
252 	ev = (struct tcg_efi_spec_id_event *)log->event;
253 	strlcpy((char *)ev->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03,
254 		sizeof(ev->signature));
255 	put_unaligned_le32(0, &ev->platform_class);
256 	ev->spec_version_minor = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2;
257 	ev->spec_version_major = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2;
258 	ev->spec_errata = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_ERRATA_TPM2;
259 	ev->uintn_size = sizeof(size_t) / sizeof(u32);
260 	put_unaligned_le32(count, &ev->number_of_algorithms);
261 
262 	for (i = 0; i < count; ++i) {
263 		len = tpm2_algorithm_to_len(priv->active_banks[i]);
264 		put_unaligned_le16(priv->active_banks[i],
265 				   &ev->digest_sizes[i].algorithm_id);
266 		put_unaligned_le16(len, &ev->digest_sizes[i].digest_size);
267 	}
268 
269 	*((u8 *)ev + (event_size - 1)) = 0;
270 	elog->log_position = log_size;
271 
272 	return 0;
273 }
274 
tcg2_replay_eventlog(struct tcg2_event_log * elog,struct udevice * dev,struct tpml_digest_values * digest_list,u32 log_position)275 static int tcg2_replay_eventlog(struct tcg2_event_log *elog,
276 				struct udevice *dev,
277 				struct tpml_digest_values *digest_list,
278 				u32 log_position)
279 {
280 	const u32 offset = offsetof(struct tcg_pcr_event2, digests) +
281 		offsetof(struct tpml_digest_values, digests);
282 	u32 event_size;
283 	u32 count;
284 	u16 algo;
285 	u32 pcr;
286 	u32 pos;
287 	u16 len;
288 	u8 *log;
289 	int rc;
290 	u32 i;
291 
292 	while (log_position + offset < elog->log_size) {
293 		log = elog->log + log_position;
294 
295 		pos = offsetof(struct tcg_pcr_event2, pcr_index);
296 		pcr = get_unaligned_le32(log + pos);
297 		pos = offsetof(struct tcg_pcr_event2, event_type);
298 		if (!get_unaligned_le32(log + pos))
299 			return 0;
300 
301 		pos = offsetof(struct tcg_pcr_event2, digests) +
302 			offsetof(struct tpml_digest_values, count);
303 		count = get_unaligned_le32(log + pos);
304 		if (count > ARRAY_SIZE(hash_algo_list) ||
305 		    (digest_list->count && digest_list->count != count))
306 			return 0;
307 
308 		pos = offsetof(struct tcg_pcr_event2, digests) +
309 			offsetof(struct tpml_digest_values, digests);
310 		for (i = 0; i < count; ++i) {
311 			pos += offsetof(struct tpmt_ha, hash_alg);
312 			if (log_position + pos + sizeof(u16) >= elog->log_size)
313 				return 0;
314 
315 			algo = get_unaligned_le16(log + pos);
316 			pos += offsetof(struct tpmt_ha, digest);
317 			switch (algo) {
318 			case TPM2_ALG_SHA1:
319 			case TPM2_ALG_SHA256:
320 			case TPM2_ALG_SHA384:
321 			case TPM2_ALG_SHA512:
322 				len = tpm2_algorithm_to_len(algo);
323 				break;
324 			default:
325 				return 0;
326 			}
327 
328 			if (digest_list->count) {
329 				if (algo != digest_list->digests[i].hash_alg ||
330 				    log_position + pos + len >= elog->log_size)
331 					return 0;
332 
333 				memcpy(digest_list->digests[i].digest.sha512,
334 				       log + pos, len);
335 			}
336 
337 			pos += len;
338 		}
339 
340 		if (log_position + pos + sizeof(u32) >= elog->log_size)
341 			return 0;
342 
343 		event_size = get_unaligned_le32(log + pos);
344 		pos += event_size + sizeof(u32);
345 		if (log_position + pos > elog->log_size)
346 			return 0;
347 
348 		if (digest_list->count) {
349 			rc = tcg2_pcr_extend(dev, pcr, digest_list);
350 			if (rc)
351 				return rc;
352 		}
353 
354 		log_position += pos;
355 	}
356 
357 	elog->log_position = log_position;
358 	elog->found = true;
359 	return 0;
360 }
361 
tcg2_log_parse(struct udevice * dev,struct tcg2_event_log * elog,u32 * log_active)362 static int tcg2_log_parse(struct udevice *dev, struct tcg2_event_log *elog,
363 			  u32 *log_active)
364 {
365 	struct tpml_digest_values digest_list;
366 	struct tcg_efi_spec_id_event *event;
367 	struct tcg_pcr_event *log;
368 	u32 calc_size;
369 	u32 active;
370 	u32 count;
371 	u32 evsz;
372 	u32 mask;
373 	u16 algo;
374 	u16 len;
375 	int rc;
376 	u32 i;
377 
378 	*log_active = 0;
379 
380 	if (elog->log_size <= offsetof(struct tcg_pcr_event, event))
381 		return 0;
382 
383 	log = (struct tcg_pcr_event *)elog->log;
384 	if (get_unaligned_le32(&log->pcr_index) != 0 ||
385 	    get_unaligned_le32(&log->event_type) != EV_NO_ACTION)
386 		return 0;
387 
388 	for (i = 0; i < sizeof(log->digest); i++) {
389 		if (log->digest[i])
390 			return 0;
391 	}
392 
393 	evsz = get_unaligned_le32(&log->event_size);
394 	if (evsz < offsetof(struct tcg_efi_spec_id_event, digest_sizes) ||
395 	    evsz + offsetof(struct tcg_pcr_event, event) > elog->log_size)
396 		return 0;
397 
398 	event = (struct tcg_efi_spec_id_event *)log->event;
399 	if (memcmp(event->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03,
400 		   sizeof(TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03)))
401 		return 0;
402 
403 	if (event->spec_version_minor != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2 ||
404 	    event->spec_version_major != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2)
405 		return 0;
406 
407 	count = get_unaligned_le32(&event->number_of_algorithms);
408 	if (count > ARRAY_SIZE(hash_algo_list))
409 		return 0;
410 
411 	calc_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes) +
412 		(sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count) +
413 		1;
414 	if (evsz != calc_size)
415 		return 0;
416 
417 	/*
418 	 * Go through the algorithms the EventLog contains.  If the EventLog
419 	 * algorithms don't match the active TPM ones exit and report the
420 	 * erroneous banks.
421 	 * We've already checked that U-Boot supports all the enabled TPM
422 	 * algorithms, so just check the EvenLog against the TPM active ones.
423 	 */
424 	digest_list.count = 0;
425 	for (i = 0; i < count; ++i) {
426 		algo = get_unaligned_le16(&event->digest_sizes[i].algorithm_id);
427 		mask = tcg2_algorithm_to_mask(algo);
428 
429 		switch (algo) {
430 		case TPM2_ALG_SHA1:
431 		case TPM2_ALG_SHA256:
432 		case TPM2_ALG_SHA384:
433 		case TPM2_ALG_SHA512:
434 			len = get_unaligned_le16(&event->digest_sizes[i].digest_size);
435 			if (tpm2_algorithm_to_len(algo) != len) {
436 				log_err("EventLog invalid algorithm length\n");
437 				return -1;
438 			}
439 			digest_list.digests[digest_list.count++].hash_alg = algo;
440 			break;
441 		default:
442 			/*
443 			 * We can ignore this if the TPM PCRs is not extended
444 			 * by the previous bootloader. But for now just exit
445 			 */
446 			log_err("EventLog has unsupported algorithm 0x%x\n",
447 				algo);
448 			return -1;
449 		}
450 		*log_active |= mask;
451 	}
452 
453 	rc = tcg2_get_active_pcr_banks(dev, &active);
454 	if (rc)
455 		return rc;
456 	/* If the EventLog and active algorithms don't match exit */
457 	if (*log_active != active)
458 		return -ERESTARTSYS;
459 
460 	/* Read PCR0 to check if previous firmware extended the PCRs or not. */
461 	rc = tcg2_pcr_read(dev, 0, &digest_list);
462 	if (rc)
463 		return rc;
464 
465 	for (i = 0; i < digest_list.count; ++i) {
466 		u8 hash_buf[TPM2_SHA512_DIGEST_SIZE] = { 0 };
467 		u16 hash_alg = digest_list.digests[i].hash_alg;
468 
469 		if (memcmp((u8 *)&digest_list.digests[i].digest, hash_buf,
470 			   tpm2_algorithm_to_len(hash_alg)))
471 			digest_list.count = 0;
472 
473 	}
474 
475 	return tcg2_replay_eventlog(elog, dev, &digest_list,
476 				    offsetof(struct tcg_pcr_event, event) +
477 				    evsz);
478 }
479 
tcg2_pcr_extend(struct udevice * dev,u32 pcr_index,struct tpml_digest_values * digest_list)480 int tcg2_pcr_extend(struct udevice *dev, u32 pcr_index,
481 		    struct tpml_digest_values *digest_list)
482 {
483 	u32 rc;
484 	u32 i;
485 
486 	for (i = 0; i < digest_list->count; i++) {
487 		u32 alg = digest_list->digests[i].hash_alg;
488 
489 		rc = tpm2_pcr_extend(dev, pcr_index, alg,
490 				     (u8 *)&digest_list->digests[i].digest,
491 				     tpm2_algorithm_to_len(alg));
492 		if (rc) {
493 			printf("%s: error pcr:%u alg:%08x\n", __func__,
494 			       pcr_index, alg);
495 			return rc;
496 		}
497 	}
498 
499 	return 0;
500 }
501 
tcg2_pcr_read(struct udevice * dev,u32 pcr_index,struct tpml_digest_values * digest_list)502 int tcg2_pcr_read(struct udevice *dev, u32 pcr_index,
503 		  struct tpml_digest_values *digest_list)
504 {
505 	struct tpm_chip_priv *priv;
506 	u32 rc;
507 	u32 i;
508 
509 	priv = dev_get_uclass_priv(dev);
510 	if (!priv)
511 		return -ENODEV;
512 
513 	for (i = 0; i < digest_list->count; i++) {
514 		u32 alg = digest_list->digests[i].hash_alg;
515 		u8 *digest = (u8 *)&digest_list->digests[i].digest;
516 
517 		rc = tpm2_pcr_read(dev, pcr_index, priv->pcr_select_min, alg,
518 				   digest, tpm2_algorithm_to_len(alg), NULL);
519 		if (rc) {
520 			printf("%s: error pcr:%u alg:%08x\n", __func__,
521 			       pcr_index, alg);
522 			return rc;
523 		}
524 	}
525 
526 	return 0;
527 }
528 
tcg2_measure_data(struct udevice * dev,struct tcg2_event_log * elog,u32 pcr_index,u32 size,const u8 * data,u32 event_type,u32 event_size,const u8 * event)529 int tcg2_measure_data(struct udevice *dev, struct tcg2_event_log *elog,
530 		      u32 pcr_index, u32 size, const u8 *data, u32 event_type,
531 		      u32 event_size, const u8 *event)
532 {
533 	struct tpml_digest_values digest_list;
534 	int rc;
535 
536 	if (data)
537 		rc = tcg2_create_digest(dev, data, size, &digest_list);
538 	else
539 		rc = tcg2_create_digest(dev, event, event_size, &digest_list);
540 	if (rc)
541 		return rc;
542 
543 	rc = tcg2_pcr_extend(dev, pcr_index, &digest_list);
544 	if (rc)
545 		return rc;
546 
547 	return tcg2_log_append_check(elog, pcr_index, event_type, &digest_list,
548 				     event_size, event);
549 }
550 
tcg2_log_prepare_buffer(struct udevice * dev,struct tcg2_event_log * elog,bool ignore_existing_log)551 int tcg2_log_prepare_buffer(struct udevice *dev, struct tcg2_event_log *elog,
552 			    bool ignore_existing_log)
553 {
554 	struct tcg2_event_log log;
555 	int rc;
556 	u32 log_active = 0;
557 
558 	elog->log_position = 0;
559 	elog->found = false;
560 
561 	rc = tcg2_platform_get_log(dev, (void **)&log.log, &log.log_size);
562 	if (!rc) {
563 		log.log_position = 0;
564 		log.found = false;
565 
566 		if (!ignore_existing_log) {
567 			rc = tcg2_log_parse(dev, &log, &log_active);
568 			if (rc == -ERESTARTSYS && log_active)
569 				goto pcr_allocate;
570 			if (rc)
571 				return rc;
572 		}
573 
574 		if (elog->log_size) {
575 			if (log.found) {
576 				if (elog->log_size < log.log_position)
577 					return -ENOBUFS;
578 
579 				/*
580 				 * Copy the discovered log into the user buffer
581 				 * if there's enough space.
582 				 */
583 				memcpy(elog->log, log.log, log.log_position);
584 			}
585 
586 			unmap_physmem(log.log, MAP_NOCACHE);
587 		} else {
588 			elog->log = log.log;
589 			elog->log_size = log.log_size;
590 		}
591 
592 		elog->log_position = log.log_position;
593 		elog->found = log.found;
594 	}
595 
596 pcr_allocate:
597 	rc = tpm2_activate_banks(dev, log_active);
598 	if (rc)
599 		return rc;
600 
601 	/*
602 	 * Initialize the log buffer if no log was discovered and the buffer is
603 	 * valid. User's can pass in their own buffer as a fallback if no
604 	 * memory region is found.
605 	 */
606 	if (!elog->found && elog->log_size)
607 		rc = tcg2_log_init(dev, elog);
608 
609 	return rc;
610 }
611 
tcg2_measurement_init(struct udevice ** dev,struct tcg2_event_log * elog,bool ignore_existing_log)612 int tcg2_measurement_init(struct udevice **dev, struct tcg2_event_log *elog,
613 			  bool ignore_existing_log)
614 {
615 	int rc;
616 
617 	rc = tcg2_platform_get_tpm2(dev);
618 	if (rc)
619 		return rc;
620 
621 	rc = tpm_auto_start(*dev);
622 	if (rc)
623 		return rc;
624 
625 	rc = tcg2_log_prepare_buffer(*dev, elog, ignore_existing_log);
626 	if (rc) {
627 		tcg2_measurement_term(*dev, elog, true);
628 		return rc;
629 	}
630 
631 	rc = tcg2_measure_event(*dev, elog, 0, EV_S_CRTM_VERSION,
632 				strlen(version_string) + 1,
633 				(u8 *)version_string);
634 	if (rc) {
635 		tcg2_measurement_term(*dev, elog, true);
636 		return rc;
637 	}
638 
639 	return 0;
640 }
641 
tcg2_measurement_term(struct udevice * dev,struct tcg2_event_log * elog,bool error)642 void tcg2_measurement_term(struct udevice *dev, struct tcg2_event_log *elog,
643 			   bool error)
644 {
645 	u32 event = error ? 0x1 : 0xffffffff;
646 	int i;
647 
648 	for (i = 0; i < 8; ++i)
649 		tcg2_measure_event(dev, elog, i, EV_SEPARATOR, sizeof(event),
650 				   (const u8 *)&event);
651 
652 	if (elog->log)
653 		unmap_physmem(elog->log, MAP_NOCACHE);
654 }
655 
tcg2_platform_get_log(struct udevice * dev,void ** addr,u32 * size)656 __weak int tcg2_platform_get_log(struct udevice *dev, void **addr, u32 *size)
657 {
658 	const __be32 *addr_prop = NULL;
659 	const __be32 *size_prop = NULL;
660 	int asize;
661 	int ssize;
662 	struct ofnode_phandle_args args;
663 	phys_addr_t a;
664 	fdt_size_t s;
665 
666 	*addr = NULL;
667 	*size = 0;
668 
669 	*addr = bloblist_get_blob(BLOBLISTT_TPM_EVLOG, size);
670 	if (*addr && *size) {
671 		*addr = map_physmem((uintptr_t)(*addr), *size, MAP_NOCACHE);
672 		return 0;
673 	}
674 
675 	/*
676 	 * TODO:
677 	 * Replace BLOBLIST with a new kconfig for handoff all components
678 	 * (fdt, tpm event log, etc...) from previous boot stage via bloblist
679 	 * mandatorily following Firmware Handoff spec.
680 	 */
681 	if (!CONFIG_IS_ENABLED(BLOBLIST)) {
682 		addr_prop = dev_read_prop(dev, "tpm_event_log_addr", &asize);
683 		size_prop = dev_read_prop(dev, "tpm_event_log_size", &ssize);
684 	}
685 
686 	/*
687 	 * If no eventlog was observed, a sml buffer is required for the kernel
688 	 * to discover the eventlog.
689 	 */
690 	if (!addr_prop || !size_prop) {
691 		addr_prop = dev_read_prop(dev, "linux,sml-base", &asize);
692 		size_prop = dev_read_prop(dev, "linux,sml-size", &ssize);
693 	}
694 
695 	if (addr_prop && size_prop) {
696 		u64 a = of_read_number(addr_prop, asize / sizeof(__be32));
697 		u64 s = of_read_number(size_prop, ssize / sizeof(__be32));
698 
699 		*addr = map_physmem(a, s, MAP_NOCACHE);
700 		*size = (u32)s;
701 
702 		return 0;
703 	}
704 
705 	if (dev_read_phandle_with_args(dev, "memory-region", NULL, 0, 0, &args))
706 		return -ENODEV;
707 
708 	a = ofnode_get_addr_size(args.node, "reg", &s);
709 	if (a == FDT_ADDR_T_NONE)
710 		return -ENOMEM;
711 
712 	*addr = map_physmem(a, s, MAP_NOCACHE);
713 	*size = (u32)s;
714 
715 	return 0;
716 }
717 
tcg2_platform_get_tpm2(struct udevice ** dev)718 __weak int tcg2_platform_get_tpm2(struct udevice **dev)
719 {
720 	for_each_tpm_device(*dev) {
721 		if (tpm_get_version(*dev) == TPM_V2)
722 			return 0;
723 	}
724 
725 	return -ENODEV;
726 }
727 
tcg2_algorithm_to_mask(enum tpm2_algorithms algo)728 u32 tcg2_algorithm_to_mask(enum tpm2_algorithms algo)
729 {
730 	size_t i;
731 
732 	for (i = 0; i < ARRAY_SIZE(hash_algo_list); i++) {
733 		if (hash_algo_list[i].hash_alg == algo)
734 			return hash_algo_list[i].hash_mask;
735 	}
736 
737 	return 0;
738 }
739 
tcg2_platform_startup_error(struct udevice * dev,int rc)740 __weak void tcg2_platform_startup_error(struct udevice *dev, int rc) {}
741