1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2020 NXP
4  */
5 
6 #include <config.h>
7 #include <dcp_utils.h>
8 #include <drivers/imx/dcp.h>
9 #include <imx-regs.h>
10 #include <io.h>
11 #include <kernel/boot.h>
12 #include <kernel/dt.h>
13 #include <kernel/mutex.h>
14 #include <kernel/spinlock.h>
15 #include <libfdt.h>
16 #include <local.h>
17 #include <mm/core_memprot.h>
18 #include <tee/cache.h>
19 #include <utee_defines.h>
20 
21 static const uint8_t sha1_null_msg[] = {
22 	0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55,
23 	0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09,
24 };
25 
26 static const uint8_t sha256_null_msg[] = {
27 	0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4,
28 	0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b,
29 	0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
30 };
31 
32 static vaddr_t dcp_base;
33 static bool driver_initialized;
34 static unsigned int clk_refcount;
35 static unsigned int key_store_spinlock = SPINLOCK_UNLOCK;
36 static unsigned int clock_spinlock = SPINLOCK_UNLOCK;
37 static struct dcp_align_buf hw_context_buffer;
38 
39 static struct mutex lock_channel[DCP_NB_CHANNELS] = {
40 	[DCP_CHANN0] = MUTEX_INITIALIZER,
41 	[DCP_CHANN1] = MUTEX_INITIALIZER,
42 	[DCP_CHANN2] = MUTEX_INITIALIZER,
43 	[DCP_CHANN3] = MUTEX_INITIALIZER,
44 };
45 
46 static const struct dcp_hashalg hash_alg[2] = {
47 	[DCP_SHA1] = {
48 		.type = DCP_CONTROL1_HASH_SELECT_SHA1,
49 		.size = TEE_SHA1_HASH_SIZE,
50 	},
51 	[DCP_SHA256] = {
52 		.type = DCP_CONTROL1_HASH_SELECT_SHA256,
53 		.size = TEE_SHA256_HASH_SIZE,
54 	},
55 };
56 
57 /*
58  * Enable/disable DCP clock.
59  *
60  * @enable   Enable the clock if true, disable if false.
61  */
dcp_clk_enable(bool enable)62 static void dcp_clk_enable(bool enable)
63 {
64 	vaddr_t ccm_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC,
65 					   CCM_CCGR0 + sizeof(uint32_t));
66 	uint32_t clock_except = cpu_spin_lock_xsave(&clock_spinlock);
67 
68 	if (enable) {
69 		if (clk_refcount > 0) {
70 			clk_refcount++;
71 			goto out;
72 		} else {
73 			clk_refcount++;
74 			io_setbits32(ccm_base + CCM_CCGR0, DCP_CLK_ENABLE_MASK);
75 		}
76 	} else {
77 		assert(clk_refcount != 0);
78 
79 		clk_refcount--;
80 		if (clk_refcount > 0)
81 			goto out;
82 		else
83 			io_clrbits32(ccm_base + CCM_CCGR0, DCP_CLK_ENABLE_MASK);
84 	}
85 out:
86 	cpu_spin_unlock_xrestore(&clock_spinlock, clock_except);
87 }
88 
89 /*
90  * Lock the given channel with a mutex.
91  *
92  * @chan   DCP channel to lock
93  */
dcp_lock_known_channel(enum dcp_channel chan)94 static TEE_Result dcp_lock_known_channel(enum dcp_channel chan)
95 {
96 	if (mutex_trylock(&lock_channel[chan]))
97 		return TEE_SUCCESS;
98 	else
99 		return TEE_ERROR_BUSY;
100 }
101 
102 /*
103  * Lock a DCP channel
104  *
105  * @channel    Pointer on operation channel parameter
106  */
dcp_lock_channel(enum dcp_channel * channel)107 static TEE_Result dcp_lock_channel(enum dcp_channel *channel)
108 {
109 	TEE_Result ret = TEE_ERROR_BUSY;
110 	enum dcp_channel chan = DCP_CHANN0;
111 
112 	for (chan = DCP_CHANN0; chan < DCP_NB_CHANNELS; chan++) {
113 		ret = dcp_lock_known_channel(chan);
114 		if (ret == TEE_SUCCESS) {
115 			*channel = chan;
116 			return ret;
117 		}
118 	}
119 
120 	EMSG("All channels are busy");
121 
122 	return ret;
123 }
124 
125 /*
126  * Unlock the given channel.
127  *
128  * @chan   DCP channel to unlock
129  */
dcp_unlock_channel(enum dcp_channel chan)130 static void dcp_unlock_channel(enum dcp_channel chan)
131 {
132 	mutex_unlock(&lock_channel[chan]);
133 }
134 
135 /*
136  * Start the DCP operation.
137  *
138  * @dcp_data   Structure containing dcp_descriptor configuration and channel to
139  *	       use.
140  */
dcp_run(struct dcp_data * dcp_data)141 static TEE_Result dcp_run(struct dcp_data *dcp_data)
142 {
143 	TEE_Result ret = TEE_SUCCESS;
144 	unsigned int timeout = 0;
145 	uint32_t val = 0;
146 
147 	dcp_data->desc.next = 0;
148 	cache_operation(TEE_CACHEFLUSH, &dcp_data->desc,
149 			sizeof(dcp_data->desc));
150 
151 	/* Enable clock if it's not done */
152 	dcp_clk_enable(true);
153 
154 	/* Clear DCP_STAT IRQ field for the channel used by the operation */
155 	io_clrbits32(dcp_base + DCP_STAT, BIT32(dcp_data->channel));
156 
157 	/* Clear CH_N_STAT to clear IRQ and error codes */
158 	io_write32(dcp_base + DCP_CH_N_STAT(dcp_data->channel), 0x0);
159 
160 	/* Update descriptor structure to be processed for the channel */
161 	io_write32(dcp_base + DCP_CH_N_CMDPTR(dcp_data->channel),
162 		   virt_to_phys(&dcp_data->desc));
163 
164 	/* Increment the semaphore to start the transfer */
165 	io_write32(dcp_base + DCP_CH_N_SEMA(dcp_data->channel), 0x1);
166 
167 	for (timeout = 0; timeout < DCP_MAX_TIMEOUT; timeout++) {
168 		dcp_udelay(10);
169 		val = io_read32(dcp_base + DCP_STAT);
170 		if (val & BIT(dcp_data->channel))
171 			break;
172 	}
173 
174 	if (timeout == DCP_MAX_TIMEOUT) {
175 		EMSG("Timeout elapsed before operation");
176 		ret = TEE_ERROR_GENERIC;
177 		goto out;
178 	}
179 
180 	val = io_read32(dcp_base + DCP_CH_N_STAT(dcp_data->channel));
181 	if (val & DCP_CH_STAT_ERROR_MASK) {
182 		EMSG("Error operation, 0x%" PRIx32, val);
183 		ret = TEE_ERROR_GENERIC;
184 	}
185 
186 out:
187 	dcp_clk_enable(false);
188 
189 	return ret;
190 }
191 
dcp_cmac_subkey_generation(struct dcp_cipher_init * init,uint8_t * k1,uint8_t * k2)192 static TEE_Result dcp_cmac_subkey_generation(struct dcp_cipher_init *init,
193 					     uint8_t *k1, uint8_t *k2)
194 {
195 	TEE_Result ret = TEE_ERROR_GENERIC;
196 	struct dcp_cipher_data data = { };
197 	uint8_t l[16] = { };
198 	uint8_t tmp[16] = { };
199 	uint8_t const_zero[16] = { };
200 	uint8_t const_rb[16] = { [15] = 0x87 };
201 
202 	ret = dcp_cipher_do_init(&data, init);
203 	if (ret != TEE_SUCCESS)
204 		return ret;
205 
206 	ret = dcp_cipher_do_update(&data, const_zero, l, sizeof(l));
207 	if (ret != TEE_SUCCESS)
208 		goto out;
209 
210 	if ((l[0] & BIT(7)) == 0) {
211 		dcp_left_shift_buffer(l, k1, 16);
212 	} else {
213 		dcp_left_shift_buffer(l, tmp, 16);
214 		dcp_xor(tmp, const_rb, k1, 16);
215 	}
216 
217 	if ((k1[0] & BIT(7)) == 0) {
218 		dcp_left_shift_buffer(k1, k2, 16);
219 	} else {
220 		dcp_left_shift_buffer(k1, tmp, 16);
221 		dcp_xor(tmp, const_rb, k2, 16);
222 	}
223 
224 	ret = TEE_SUCCESS;
225 out:
226 	dcp_cipher_do_final(&data);
227 
228 	return ret;
229 }
230 
dcp_store_key(uint32_t * key,unsigned int index)231 TEE_Result dcp_store_key(uint32_t *key, unsigned int index)
232 {
233 	uint32_t val = 0;
234 	unsigned int i = 0;
235 	uint32_t key_store_except = 0;
236 
237 	if (!key)
238 		return TEE_ERROR_BAD_PARAMETERS;
239 
240 	if (index > DCP_SRAM_KEY_NB_SUBWORD - 1) {
241 		EMSG("Bad parameters, index must be < %u",
242 		     DCP_SRAM_KEY_NB_SUBWORD);
243 		return TEE_ERROR_BAD_PARAMETERS;
244 	}
245 
246 	key_store_except = cpu_spin_lock_xsave(&key_store_spinlock);
247 
248 	dcp_clk_enable(true);
249 
250 	val = DCP_SRAM_KEY_INDEX(index);
251 	io_write32(dcp_base + DCP_KEY, val);
252 
253 	/*
254 	 * Key is stored as four uint32 values, starting with subword0
255 	 * (least-significant word)
256 	 */
257 	for (i = 0; i < DCP_SRAM_KEY_NB_SUBWORD; i++) {
258 		val = TEE_U32_TO_BIG_ENDIAN(key[i]);
259 		io_write32(dcp_base + DCP_KEYDATA, val);
260 	}
261 
262 	dcp_clk_enable(false);
263 
264 	cpu_spin_unlock_xrestore(&key_store_spinlock, key_store_except);
265 
266 	return TEE_SUCCESS;
267 }
268 
dcp_cmac(struct dcp_cipher_init * init,uint8_t * input,size_t input_size,uint8_t * output)269 TEE_Result dcp_cmac(struct dcp_cipher_init *init, uint8_t *input,
270 		    size_t input_size, uint8_t *output)
271 {
272 	TEE_Result ret = TEE_ERROR_GENERIC;
273 	uint8_t key1[DCP_AES128_KEY_SIZE] = { };
274 	uint8_t key2[DCP_AES128_KEY_SIZE] = { };
275 	unsigned int nb_blocks = 0;
276 	bool block_complete = false;
277 	struct dcp_cipher_data data = { };
278 	uint8_t y[DCP_AES128_BLOCK_SIZE] = { };
279 	uint8_t x[DCP_AES128_BLOCK_SIZE] = { };
280 	uint8_t last[DCP_AES128_BLOCK_SIZE] = { };
281 	unsigned int i = 0;
282 	uint8_t offset = 0;
283 
284 	if (!output || !init)
285 		return TEE_ERROR_BAD_PARAMETERS;
286 
287 	if (!input && input_size)
288 		return TEE_ERROR_BAD_PARAMETERS;
289 
290 	ret = dcp_cipher_do_init(&data, init);
291 	if (ret != TEE_SUCCESS) {
292 		ret = TEE_ERROR_OUT_OF_MEMORY;
293 		goto out;
294 	}
295 
296 	/* Generate CMAC subkeys */
297 	ret = dcp_cmac_subkey_generation(init, key1, key2);
298 	if (ret != TEE_SUCCESS)
299 		goto out;
300 
301 	/* Get number of block */
302 	nb_blocks = ROUNDUP(input_size, DCP_AES128_BLOCK_SIZE) /
303 		    DCP_AES128_BLOCK_SIZE;
304 
305 	block_complete = nb_blocks && !(input_size % DCP_AES128_BLOCK_SIZE);
306 	if (nb_blocks == 0)
307 		nb_blocks = 1;
308 
309 	for (i = 0; i < nb_blocks - 1; i++) {
310 		dcp_xor(x, input + offset, y, DCP_AES128_BLOCK_SIZE);
311 		ret = dcp_cipher_do_update(&data, y, x,
312 					   DCP_AES128_BLOCK_SIZE);
313 		if (ret)
314 			goto out;
315 		offset += DCP_AES128_BLOCK_SIZE;
316 	}
317 
318 	/* Process the last block */
319 	memcpy(last, input + offset, input_size - offset);
320 
321 	if (block_complete) {
322 		dcp_xor(last, key1, last, DCP_AES128_BLOCK_SIZE);
323 	} else {
324 		dcp_cmac_padding(last, input_size % DCP_AES128_BLOCK_SIZE);
325 		dcp_xor(last, key2, last, DCP_AES128_BLOCK_SIZE);
326 	}
327 
328 	dcp_xor(x, last, y, DCP_AES128_BLOCK_SIZE);
329 	ret = dcp_cipher_do_update(&data, y, x,
330 				   DCP_AES128_BLOCK_SIZE);
331 	if (ret)
332 		goto out;
333 
334 	memcpy(output, x, DCP_AES128_BLOCK_SIZE);
335 
336 out:
337 	dcp_cipher_do_final(&data);
338 
339 	return ret;
340 }
341 
dcp_cipher_do_init(struct dcp_cipher_data * data,struct dcp_cipher_init * init)342 TEE_Result dcp_cipher_do_init(struct dcp_cipher_data *data,
343 			      struct dcp_cipher_init *init)
344 {
345 	struct dcp_descriptor *desc = NULL;
346 	TEE_Result ret = TEE_ERROR_GENERIC;
347 
348 	if (!init || !data)
349 		return TEE_ERROR_BAD_PARAMETERS;
350 
351 	ret = dcp_lock_channel(&data->dcp_data.channel);
352 	if (ret != TEE_SUCCESS)
353 		return ret;
354 
355 	desc = &data->dcp_data.desc;
356 
357 	desc->ctrl0 = DCP_CONTROL0_DECR_SEMAPHORE | DCP_CONTROL0_ENABLE_CIPHER |
358 		      DCP_CONTROL0_INTERRUPT_ENABLE;
359 	desc->ctrl1 = DCP_CONTROL1_CIPHER_SELECT_AES128;
360 
361 	if (init->op == DCP_ENCRYPT)
362 		desc->ctrl0 |= DCP_CONTROL0_CIPHER_ENCRYPT;
363 
364 	if (init->key_mode == DCP_OTP) {
365 		desc->ctrl0 &= ~DCP_CONTROL0_OTP_KEY;
366 		desc->ctrl1 |= DCP_CONTROL1_KEY_SELECT_OTP_CRYPTO;
367 	} else if (init->key_mode == DCP_PAYLOAD) {
368 		desc->ctrl0 |= DCP_CONTROL0_PAYLOAD_KEY;
369 		if (!init->key)
370 			return TEE_ERROR_BAD_PARAMETERS;
371 		memcpy(data->key, init->key, DCP_AES128_KEY_SIZE);
372 	} else {
373 		desc->ctrl1 |= SHIFT_U32(init->key_mode, 8);
374 	}
375 
376 	if (init->mode == DCP_CBC) {
377 		desc->ctrl0 |= DCP_CONTROL0_CIPHER_INIT;
378 		desc->ctrl1 |= DCP_CONTROL1_CIPHER_MODE_CBC;
379 		if (!init->iv)
380 			return TEE_ERROR_BAD_PARAMETERS;
381 		memcpy(data->iv, init->iv, DCP_AES128_IV_SIZE);
382 	}
383 
384 	/* Allocate aligned buffer for dcp iv and key */
385 	ret = dcp_calloc_align_buf(&data->payload,
386 				   DCP_AES128_IV_SIZE + DCP_AES128_KEY_SIZE);
387 	if (ret != TEE_SUCCESS)
388 		return ret;
389 
390 	desc->src_buffer = 0;
391 	desc->dest_buffer = 0;
392 	desc->status = 0;
393 	desc->buff_size = 0;
394 	desc->next = virt_to_phys(desc);
395 
396 	data->initialized = true;
397 
398 	return ret;
399 }
400 
dcp_cipher_do_update(struct dcp_cipher_data * data,const uint8_t * src,uint8_t * dst,size_t size)401 TEE_Result dcp_cipher_do_update(struct dcp_cipher_data *data,
402 				const uint8_t *src, uint8_t *dst, size_t size)
403 {
404 	TEE_Result ret = TEE_ERROR_GENERIC;
405 	struct dcp_align_buf output = { };
406 	struct dcp_align_buf input = { };
407 	struct dcp_descriptor *desc = NULL;
408 
409 	if (!data || !src || !dst)
410 		return TEE_ERROR_BAD_PARAMETERS;
411 
412 	if (!data->initialized) {
413 		EMSG("Error, please call dcp_aes_do_init() before");
414 		return TEE_ERROR_BAD_STATE;
415 	}
416 
417 	if (size % DCP_AES128_BLOCK_SIZE) {
418 		EMSG("Input size has to be a multiple of %zu bytes",
419 		     DCP_AES128_BLOCK_SIZE);
420 		return TEE_ERROR_BAD_PARAMETERS;
421 	}
422 
423 	ret = dcp_calloc_align_buf(&output, size);
424 	if (ret != TEE_SUCCESS)
425 		goto out;
426 
427 	ret = dcp_calloc_align_buf(&input, size);
428 	if (ret != TEE_SUCCESS)
429 		goto out;
430 
431 	desc = &data->dcp_data.desc;
432 
433 	/* Copy input data */
434 	memcpy(input.data, src, size);
435 
436 	/* Copy key and IV */
437 	memcpy(data->payload.data, data->key, DCP_AES128_KEY_SIZE);
438 	data->payload_size = DCP_AES128_KEY_SIZE;
439 	if (desc->ctrl0 & DCP_CONTROL0_CIPHER_INIT) {
440 		memcpy(data->payload.data + DCP_AES128_KEY_SIZE, data->iv,
441 		       DCP_AES128_IV_SIZE);
442 		data->payload_size += DCP_AES128_IV_SIZE;
443 	}
444 
445 	desc->src_buffer = input.paddr;
446 	desc->dest_buffer = output.paddr;
447 	desc->payload = data->payload.paddr;
448 	desc->buff_size = size;
449 
450 	cache_operation(TEE_CACHECLEAN, data->payload.data,
451 			data->payload_size);
452 	cache_operation(TEE_CACHECLEAN, input.data, size);
453 	cache_operation(TEE_CACHEINVALIDATE, output.data, size);
454 
455 	ret = dcp_run(&data->dcp_data);
456 	if (ret)
457 		goto out;
458 
459 	cache_operation(TEE_CACHEINVALIDATE, output.data, size);
460 
461 	desc->ctrl0 &= ~DCP_CONTROL0_CIPHER_INIT;
462 
463 	memcpy(dst, output.data, size);
464 out:
465 	dcp_free(&output);
466 	dcp_free(&input);
467 
468 	return ret;
469 }
470 
dcp_cipher_do_final(struct dcp_cipher_data * data)471 void dcp_cipher_do_final(struct dcp_cipher_data *data)
472 {
473 	if (data)
474 		data->initialized = false;
475 
476 	dcp_free(&data->payload);
477 	dcp_unlock_channel(data->dcp_data.channel);
478 }
479 
dcp_sha_do_init(struct dcp_hash_data * hashdata)480 TEE_Result dcp_sha_do_init(struct dcp_hash_data *hashdata)
481 {
482 	struct dcp_descriptor *desc = NULL;
483 	TEE_Result ret = TEE_ERROR_GENERIC;
484 
485 	if (!hashdata) {
486 		EMSG("Bad parameters, hashdata is NULL");
487 		return TEE_ERROR_BAD_PARAMETERS;
488 	}
489 
490 	desc = &hashdata->dcp_data.desc;
491 
492 	/* DCP descriptor init */
493 	desc->status = 0;
494 	desc->payload = 0;
495 	desc->dest_buffer = 0;
496 	desc->ctrl0 = DCP_CONTROL0_ENABLE_HASH | DCP_CONTROL0_INTERRUPT_ENABLE |
497 		      DCP_CONTROL0_DECR_SEMAPHORE | DCP_CONTROL0_HASH_INIT;
498 	desc->ctrl1 = hash_alg[hashdata->alg].type;
499 	desc->buff_size = 0;
500 	desc->next = 0;
501 	desc->src_buffer = 0;
502 
503 	ret = dcp_lock_channel(&hashdata->dcp_data.channel);
504 	if (ret != TEE_SUCCESS) {
505 		EMSG("Channel is busy, can't start operation now");
506 		return ret;
507 	}
508 
509 	/* Allocate context data */
510 	ret = dcp_calloc_align_buf(&hashdata->ctx, DCP_SHA_BLOCK_SIZE);
511 	if (ret != TEE_SUCCESS)
512 		return ret;
513 
514 	hashdata->initialized = true;
515 	hashdata->ctx_size = 0;
516 
517 	return ret;
518 }
519 
dcp_sha_do_update(struct dcp_hash_data * hashdata,const uint8_t * data,size_t len)520 TEE_Result dcp_sha_do_update(struct dcp_hash_data *hashdata,
521 			     const uint8_t *data, size_t len)
522 {
523 	TEE_Result ret = TEE_ERROR_GENERIC;
524 	struct dcp_descriptor *desc = NULL;
525 	struct dcp_align_buf input = { };
526 	uint32_t offset = 0;
527 	uint32_t nb_blocks = 0;
528 	size_t size_todo = 0;
529 	size_t size_left = 0;
530 	size_t size_total = 0;
531 
532 	if (!hashdata || !data || !len)
533 		return TEE_ERROR_BAD_PARAMETERS;
534 
535 	if (!hashdata->initialized) {
536 		EMSG("hashdata is uninitialized");
537 		return TEE_ERROR_BAD_STATE;
538 	}
539 
540 	/* Get number of blocks */
541 	if (ADD_OVERFLOW(hashdata->ctx_size, len, &size_total))
542 		return TEE_ERROR_BAD_PARAMETERS;
543 
544 	nb_blocks = size_total / DCP_SHA_BLOCK_SIZE;
545 	size_todo = nb_blocks * DCP_SHA_BLOCK_SIZE;
546 	size_left = len - size_todo + hashdata->ctx_size;
547 	desc = &hashdata->dcp_data.desc;
548 
549 	if (size_todo) {
550 		/* Allocate buffer as input */
551 		ret = dcp_calloc_align_buf(&input, size_todo);
552 		if (ret != TEE_SUCCESS)
553 			return ret;
554 
555 		/* Copy previous data if any */
556 		offset = size_todo - hashdata->ctx_size;
557 		memcpy(input.data, hashdata->ctx.data, hashdata->ctx_size);
558 		memcpy(input.data + hashdata->ctx_size, data, offset);
559 		hashdata->ctx_size = 0;
560 
561 		desc->src_buffer = input.paddr;
562 		desc->buff_size = size_todo;
563 
564 		cache_operation(TEE_CACHECLEAN, input.data, size_todo);
565 
566 		ret = dcp_run(&hashdata->dcp_data);
567 		desc->ctrl0 &= ~DCP_CONTROL0_HASH_INIT;
568 
569 		dcp_free(&input);
570 	} else {
571 		size_left = len;
572 		offset = 0;
573 		ret = TEE_SUCCESS;
574 	}
575 
576 	/* Save any data left */
577 	memcpy(hashdata->ctx.data + hashdata->ctx_size, data + offset,
578 	       size_left);
579 	hashdata->ctx_size += size_left;
580 
581 	return ret;
582 }
583 
dcp_sha_do_final(struct dcp_hash_data * hashdata,uint8_t * digest,size_t digest_size)584 TEE_Result dcp_sha_do_final(struct dcp_hash_data *hashdata, uint8_t *digest,
585 			    size_t digest_size)
586 {
587 	TEE_Result ret = TEE_ERROR_GENERIC;
588 	size_t payload_size = 0;
589 	struct dcp_descriptor *desc = NULL;
590 	struct dcp_align_buf payload = { };
591 
592 	if (!hashdata || !digest)
593 		return TEE_ERROR_BAD_PARAMETERS;
594 
595 	if (!hashdata->initialized) {
596 		EMSG("hashdata is uninitialized");
597 		return TEE_ERROR_BAD_STATE;
598 	}
599 
600 	if (digest_size < hash_alg[hashdata->alg].size) {
601 		EMSG("Digest buffer size is to small, should be %" PRId32,
602 		     hash_alg[hashdata->alg].size);
603 		return TEE_ERROR_BAD_PARAMETERS;
604 	}
605 
606 	desc = &hashdata->dcp_data.desc;
607 	payload_size = hash_alg[hashdata->alg].size;
608 
609 	/* Handle the case where the input message is NULL */
610 	if ((desc->ctrl0 & DCP_CONTROL0_HASH_INIT) && hashdata->ctx_size == 0) {
611 		if (hashdata->alg == DCP_SHA1)
612 			memcpy(digest, sha1_null_msg, payload_size);
613 		if (hashdata->alg == DCP_SHA256)
614 			memcpy(digest, sha256_null_msg, payload_size);
615 		ret = TEE_SUCCESS;
616 	} else {
617 		/* Allocate buffer for the digest */
618 		ret = dcp_calloc_align_buf(&payload, payload_size);
619 		if (ret != TEE_SUCCESS)
620 			return ret;
621 
622 		/* Set work packet for last iteration */
623 		desc->ctrl0 |= DCP_CONTROL0_HASH_TERM;
624 		desc->src_buffer = hashdata->ctx.paddr;
625 		desc->buff_size = hashdata->ctx_size;
626 		desc->payload = payload.paddr;
627 
628 		cache_operation(TEE_CACHECLEAN, hashdata->ctx.data,
629 				hashdata->ctx_size);
630 		cache_operation(TEE_CACHEINVALIDATE, payload.data,
631 				payload_size);
632 
633 		ret = dcp_run(&hashdata->dcp_data);
634 
635 		/* Copy the result */
636 		cache_operation(TEE_CACHEINVALIDATE, payload.data,
637 				payload_size);
638 		/* DCP payload result is flipped */
639 		dcp_reverse(payload.data, digest, payload_size);
640 
641 		dcp_free(&payload);
642 	}
643 
644 	dcp_free(&hashdata->ctx);
645 
646 	/* Reset hashdata strcuture */
647 	hashdata->initialized = false;
648 
649 	dcp_unlock_channel(hashdata->dcp_data.channel);
650 
651 	return ret;
652 }
653 
dcp_disable_unique_key(void)654 void dcp_disable_unique_key(void)
655 {
656 	dcp_clk_enable(true);
657 	io_setbits32(dcp_base + DCP_CAPABILITY0,
658 		     DCP_CAPABILITY0_DISABLE_UNIQUE_KEY);
659 	dcp_clk_enable(false);
660 }
661 
662 #ifdef CFG_DT
663 static const char *const dt_ctrl_match_table[] = {
664 	"fsl,imx28-dcp",
665 	"fsl,imx6sl-dcp",
666 };
667 
668 /*
669  * Fetch DCP base address from DT
670  *
671  * @base        [out] DCP base address
672  */
dcp_pbase(paddr_t * base)673 static TEE_Result dcp_pbase(paddr_t *base)
674 {
675 	void *fdt = NULL;
676 	int node = -1;
677 	unsigned int i = 0;
678 
679 	fdt = get_dt();
680 	if (!fdt) {
681 		EMSG("DTB no present");
682 		return TEE_ERROR_ITEM_NOT_FOUND;
683 	}
684 
685 	for (i = 0; i < ARRAY_SIZE(dt_ctrl_match_table); i++) {
686 		node = fdt_node_offset_by_compatible(fdt, 0,
687 						     dt_ctrl_match_table[i]);
688 		if (node >= 0)
689 			break;
690 	}
691 
692 	if (node < 0) {
693 		EMSG("DCP node not found err = %d", node);
694 		return TEE_ERROR_ITEM_NOT_FOUND;
695 	}
696 
697 	if (_fdt_get_status(fdt, node) == DT_STATUS_DISABLED)
698 		return TEE_ERROR_ITEM_NOT_FOUND;
699 
700 	/* Force secure-status = "okay" and status="disabled" */
701 	if (dt_enable_secure_status(fdt, node)) {
702 		EMSG("Not able to set DCP Control DTB entry secure");
703 		return TEE_ERROR_NOT_SUPPORTED;
704 	}
705 
706 	*base = _fdt_reg_base_address(fdt, node);
707 	if (*base == DT_INFO_INVALID_REG) {
708 		EMSG("Unable to get the DCP Base address");
709 		return TEE_ERROR_ITEM_NOT_FOUND;
710 	}
711 
712 	return TEE_SUCCESS;
713 }
714 #endif /* CFG_DT */
715 
dcp_init(void)716 TEE_Result dcp_init(void)
717 {
718 	TEE_Result ret = TEE_ERROR_GENERIC;
719 	paddr_t pbase = 0;
720 
721 	if (driver_initialized)
722 		return TEE_SUCCESS;
723 
724 	dcp_clk_enable(true);
725 
726 	ret = dcp_pbase(&pbase);
727 	if (ret != TEE_SUCCESS)
728 		pbase = DCP_BASE;
729 
730 	dcp_base = core_mmu_get_va(pbase, MEM_AREA_IO_SEC, DCP_CONTEXT +
731 				   sizeof(uint32_t));
732 	if (!dcp_base) {
733 		EMSG("Unable to get DCP physical address");
734 		return TEE_ERROR_ITEM_NOT_FOUND;
735 	}
736 
737 	/* Context switching buffer memory allocation */
738 	ret = dcp_calloc_align_buf(&hw_context_buffer, DCP_CONTEXT_BUFFER_SIZE);
739 	if (ret != TEE_SUCCESS) {
740 		EMSG("hw_context_buffer allocation failed");
741 		return ret;
742 	}
743 
744 	/*
745 	 * Reset the DCP before initialization. Depending on the SoC lifecycle
746 	 * state, the DCP needs to be reset to reload the OTP master key from
747 	 * the SNVS.
748 	 */
749 	io_write32(dcp_base + DCP_CTRL_SET, DCP_CTRL_SFTRST | DCP_CTRL_CLKGATE);
750 
751 	/*
752 	 * Initialize control register.
753 	 * Enable normal DCP operation (SFTRST & CLKGATE bits set to 0)
754 	 */
755 	io_write32(dcp_base + DCP_CTRL_CLR, DCP_CTRL_SFTRST | DCP_CTRL_CLKGATE);
756 
757 	io_write32(dcp_base + DCP_CTRL_SET,
758 		   DCP_CTRL_GATHER_RESIDUAL_WRITES |
759 			   DCP_CTRL_ENABLE_CONTEXT_SWITCHING);
760 
761 	/* Enable all DCP channels */
762 	io_write32(dcp_base + DCP_CHANNELCTRL,
763 		   DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK);
764 
765 	/* Clear DCP_STAT register */
766 	io_write32(dcp_base + DCP_STAT_CLR, DCP_STAT_CLEAR);
767 
768 	/* Copy context switching buffer address in DCP_CONTEXT register */
769 	io_write32(dcp_base + DCP_CONTEXT, (uint32_t)hw_context_buffer.paddr);
770 
771 	driver_initialized = true;
772 
773 	dcp_clk_enable(false);
774 
775 	return ret;
776 }
777