1 // SPDX-License-Identifier: BSD-2-Clause
2 /* Copyright (c) 2018, Linaro Limited */
3
4 /*
5 * This is an implementation of the Fortuna cryptographic PRNG as defined in
6 * https://www.schneier.com/academic/paperfiles/fortuna.pdf
7 * There's one small exception, see comment in restart_pool() below.
8 */
9
10 #include <assert.h>
11 #include <crypto/crypto.h>
12 #include <kernel/mutex.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_time.h>
16 #include <string.h>
17 #include <types_ext.h>
18 #include <utee_defines.h>
19 #include <util.h>
20
21 #define NUM_POOLS 32
22 #define BLOCK_SIZE 16
23 #define KEY_SIZE 32
24 #define CIPHER_ALGO TEE_ALG_AES_ECB_NOPAD
25 #define HASH_ALGO TEE_ALG_SHA256
26 #define MIN_POOL_SIZE 64
27 #define MAX_EVENT_DATA_LEN 32U
28 #define RING_BUF_DATA_SIZE 4U
29
30 /*
31 * struct fortuna_state - state of the Fortuna PRNG
32 * @ctx: Cipher context used to produce the random numbers
33 * @counter: Counter which is encrypted to produce the random numbers
34 * @pool0_length: Amount of data added to pool0
35 * @pool_ctx: One hash context for each pool
36 * @reseed_ctx: Hash context used while reseeding
37 * @reseed_count: Number of time we've reseeded the PRNG, used to tell
38 * which pools should be used in the reseed process
39 * @next_reseed_time: If we have a secure time, the earliest next time we
40 * may reseed
41 *
42 * To minimize the delay in crypto_rng_add_event() there's @pool_spin_lock
43 * which protects everything needed by this function.
44 *
45 * @next_reseed_time is used as a rate limiter for reseeding.
46 */
47 static struct fortuna_state {
48 void *ctx;
49 uint64_t counter[2];
50 unsigned int pool0_length;
51 void *pool_ctx[NUM_POOLS];
52 void *reseed_ctx;
53 uint32_t reseed_count;
54 #ifndef CFG_SECURE_TIME_SOURCE_REE
55 TEE_Time next_reseed_time;
56 #endif
57 } state;
58
59 static struct mutex state_mu = MUTEX_INITIALIZER;
60
61 static struct {
62 struct {
63 uint8_t snum;
64 uint8_t pnum;
65 uint8_t dlen;
66 uint8_t data[RING_BUF_DATA_SIZE];
67 } elem[8];
68 unsigned int begin;
69 unsigned int end;
70 } ring_buffer;
71
72 unsigned int ring_buffer_spin_lock;
73
inc_counter(uint64_t counter[2])74 static void inc_counter(uint64_t counter[2])
75 {
76 counter[0]++;
77 if (!counter[0])
78 counter[1]++;
79 }
80
hash_init(void * ctx)81 static TEE_Result hash_init(void *ctx)
82 {
83 return crypto_hash_init(ctx);
84 }
85
hash_update(void * ctx,const void * data,size_t dlen)86 static TEE_Result hash_update(void *ctx, const void *data, size_t dlen)
87 {
88 return crypto_hash_update(ctx, data, dlen);
89 }
90
hash_final(void * ctx,uint8_t digest[KEY_SIZE])91 static TEE_Result hash_final(void *ctx, uint8_t digest[KEY_SIZE])
92 {
93 return crypto_hash_final(ctx, digest, KEY_SIZE);
94 }
95
key_from_data(void * ctx,const void * data,size_t dlen,uint8_t key[KEY_SIZE])96 static TEE_Result key_from_data(void *ctx, const void *data, size_t dlen,
97 uint8_t key[KEY_SIZE])
98 {
99 TEE_Result res;
100
101 res = hash_init(ctx);
102 if (res)
103 return res;
104 res = hash_update(ctx, data, dlen);
105 if (res)
106 return res;
107 return hash_final(ctx, key);
108 }
109
cipher_init(void * ctx,uint8_t key[KEY_SIZE])110 static TEE_Result cipher_init(void *ctx, uint8_t key[KEY_SIZE])
111 {
112 return crypto_cipher_init(ctx, TEE_MODE_ENCRYPT,
113 key, KEY_SIZE, NULL, 0, NULL, 0);
114 }
115
fortuna_done(void)116 static void fortuna_done(void)
117 {
118 size_t n;
119
120 for (n = 0; n < NUM_POOLS; n++) {
121 crypto_hash_free_ctx(state.pool_ctx[n]);
122 state.pool_ctx[n] = NULL;
123 }
124 crypto_hash_free_ctx(state.reseed_ctx);
125 state.reseed_ctx = NULL;
126 crypto_cipher_free_ctx(state.ctx);
127 state.ctx = NULL;
128 }
129
crypto_rng_init(const void * data,size_t dlen)130 TEE_Result crypto_rng_init(const void *data, size_t dlen)
131 {
132 TEE_Result res;
133 uint8_t key[KEY_SIZE];
134 void *ctx;
135 size_t n;
136
137 COMPILE_TIME_ASSERT(sizeof(state.counter) == BLOCK_SIZE);
138
139 if (state.ctx)
140 return TEE_ERROR_BAD_STATE;
141
142 memset(&state, 0, sizeof(state));
143
144 for (n = 0; n < NUM_POOLS; n++) {
145 res = crypto_hash_alloc_ctx(&state.pool_ctx[n], HASH_ALGO);
146 if (res)
147 goto err;
148 res = crypto_hash_init(state.pool_ctx[n]);
149 if (res)
150 goto err;
151 }
152
153 res = crypto_hash_alloc_ctx(&state.reseed_ctx, HASH_ALGO);
154 if (res)
155 goto err;
156
157 res = key_from_data(state.reseed_ctx, data, dlen, key);
158 if (res)
159 return res;
160
161 res = crypto_cipher_alloc_ctx(&ctx, CIPHER_ALGO);
162 if (res)
163 return res;
164 res = cipher_init(ctx, key);
165 if (res)
166 return res;
167 inc_counter(state.counter);
168 state.ctx = ctx;
169 return TEE_SUCCESS;
170 err:
171 fortuna_done();
172 return res;
173 }
174
push_ring_buffer(uint8_t snum,uint8_t pnum,const void * data,size_t dlen)175 static void push_ring_buffer(uint8_t snum, uint8_t pnum, const void *data,
176 size_t dlen)
177 {
178 uint8_t dl = MIN(RING_BUF_DATA_SIZE, dlen);
179 unsigned int next_begin;
180 uint32_t old_itr_status;
181
182 /* Spinlock to serialize writers */
183 old_itr_status = cpu_spin_lock_xsave(&ring_buffer_spin_lock);
184
185 next_begin = (ring_buffer.begin + 1) % ARRAY_SIZE(ring_buffer.elem);
186 if (next_begin == atomic_load_uint(&ring_buffer.end))
187 goto out; /* buffer is full */
188
189 ring_buffer.elem[next_begin].snum = snum;
190 ring_buffer.elem[next_begin].pnum = pnum;
191 ring_buffer.elem[next_begin].dlen = dl;
192 memcpy(ring_buffer.elem[next_begin].data, data, dl);
193
194 atomic_store_uint(&ring_buffer.begin, next_begin);
195
196 out:
197 cpu_spin_unlock_xrestore(&ring_buffer_spin_lock, old_itr_status);
198 }
199
pop_ring_buffer(uint8_t * snum,uint8_t * pnum,uint8_t data[RING_BUF_DATA_SIZE])200 static size_t pop_ring_buffer(uint8_t *snum, uint8_t *pnum,
201 uint8_t data[RING_BUF_DATA_SIZE])
202 {
203 unsigned int next_end;
204 size_t dlen;
205
206 if (atomic_load_uint(&ring_buffer.begin) == ring_buffer.end)
207 return 0;
208
209 next_end = (ring_buffer.end + 1) % ARRAY_SIZE(ring_buffer.elem);
210
211 *snum = ring_buffer.elem[ring_buffer.end].snum;
212 *pnum = ring_buffer.elem[ring_buffer.end].pnum;
213 dlen = MIN(ring_buffer.elem[ring_buffer.end].dlen, RING_BUF_DATA_SIZE);
214 assert(ring_buffer.elem[ring_buffer.end].dlen == dlen);
215 memcpy(data, ring_buffer.elem[ring_buffer.end].data, dlen);
216
217 atomic_store_uint(&ring_buffer.end, next_end);
218
219 return dlen;
220 }
221
add_event(uint8_t snum,uint8_t pnum,const void * data,size_t dlen)222 static TEE_Result add_event(uint8_t snum, uint8_t pnum,
223 const void *data, size_t dlen)
224 {
225 TEE_Result res;
226 size_t dl = MIN(MAX_EVENT_DATA_LEN, dlen);
227 uint8_t v[] = { snum, dl };
228
229 if (pnum >= NUM_POOLS)
230 return TEE_ERROR_BAD_PARAMETERS;
231
232 res = hash_update(state.pool_ctx[pnum], v, sizeof(v));
233 if (res)
234 return res;
235 res = hash_update(state.pool_ctx[pnum], data, dl);
236 if (res)
237 return res;
238 if (!pnum) {
239 unsigned int l;
240
241 if (!ADD_OVERFLOW(state.pool0_length, dl, &l))
242 state.pool0_length = l;
243 }
244
245 return TEE_SUCCESS;
246 }
247
drain_ring_buffer(void)248 static TEE_Result drain_ring_buffer(void)
249 {
250 while (true) {
251 TEE_Result res;
252 uint8_t snum;
253 uint8_t pnum;
254 uint8_t data[RING_BUF_DATA_SIZE];
255 size_t dlen;
256
257 dlen = pop_ring_buffer(&snum, &pnum, data);
258 if (!dlen)
259 return TEE_SUCCESS;
260
261 res = add_event(snum, pnum, data, dlen);
262 if (res)
263 return res;
264 }
265 }
266
get_next_pnum(unsigned int * pnum)267 static unsigned int get_next_pnum(unsigned int *pnum)
268 {
269 unsigned int nval;
270 unsigned int oval = atomic_load_uint(pnum);
271
272 while (true) {
273 nval = (oval + 1) % NUM_POOLS;
274
275 if (atomic_cas_uint(pnum, &oval, nval)) {
276 /*
277 * *pnum is normally initialized to 0 and we'd like
278 * to start feeding pool number 0 as that's the
279 * most important one.
280 *
281 * If we where to take just *pnum and increase it
282 * later multiple updaters could end up with the
283 * same number.
284 *
285 * By increasing first we get the number unique for
286 * next update and by subtracting one (using
287 * modulus) we get the number for this update.
288 */
289 return (nval + NUM_POOLS - 1) % NUM_POOLS;
290 }
291 /*
292 * At this point atomic_cas_uint() has updated oval to the
293 * current *pnum.
294 */
295 }
296 }
297
crypto_rng_add_event(enum crypto_rng_src sid,unsigned int * pnum,const void * data,size_t dlen)298 void crypto_rng_add_event(enum crypto_rng_src sid, unsigned int *pnum,
299 const void *data, size_t dlen)
300 {
301 unsigned int pn = get_next_pnum(pnum);
302 uint8_t snum = sid >> 1;
303
304 if (CRYPTO_RNG_SRC_IS_QUICK(sid)) {
305 push_ring_buffer(snum, pn, data, dlen);
306 } else {
307 mutex_lock(&state_mu);
308 add_event(snum, pn, data, dlen);
309 drain_ring_buffer();
310 mutex_unlock(&state_mu);
311 }
312 }
313
314 /* GenerateBlocks */
generate_blocks(void * block,size_t nblocks)315 static TEE_Result generate_blocks(void *block, size_t nblocks)
316 {
317 uint8_t *b = block;
318 size_t n;
319
320 for (n = 0; n < nblocks; n++) {
321 TEE_Result res = crypto_cipher_update(state.ctx,
322 TEE_MODE_ENCRYPT, false,
323 (void *)state.counter,
324 BLOCK_SIZE,
325 b + n * BLOCK_SIZE);
326
327 /*
328 * Make sure to increase the counter before returning an
329 * eventual errors, we must never re-use the counter with
330 * the same key.
331 */
332 inc_counter(state.counter);
333 if (res)
334 return res;
335 }
336
337 return TEE_SUCCESS;
338 }
339
340 /* GenerateRandomData */
generate_random_data(void * buf,size_t blen)341 static TEE_Result generate_random_data(void *buf, size_t blen)
342 {
343 TEE_Result res;
344
345 res = generate_blocks(buf, blen / BLOCK_SIZE);
346 if (res)
347 return res;
348 if (blen % BLOCK_SIZE) {
349 uint8_t block[BLOCK_SIZE];
350 uint8_t *b = (uint8_t *)buf + ROUNDDOWN(blen, BLOCK_SIZE);
351
352 res = generate_blocks(block, 1);
353 if (res)
354 return res;
355 memcpy(b, block, blen % BLOCK_SIZE);
356 }
357
358 return TEE_SUCCESS;
359 }
360
361 #ifdef CFG_SECURE_TIME_SOURCE_REE
reseed_rate_limiting(void)362 static bool reseed_rate_limiting(void)
363 {
364 /*
365 * There's no point in checking REE time for reseed rate limiting,
366 * and also it makes it less complicated if we can avoid doing RPC
367 * here.
368 */
369 return false;
370 }
371 #else
reseed_rate_limiting(void)372 static bool reseed_rate_limiting(void)
373 {
374 TEE_Result res;
375 TEE_Time time;
376 const TEE_Time time_100ms = { 0, 100 };
377
378 res = tee_time_get_sys_time(&time);
379 /*
380 * Failure to read time must result in allowing reseed or we could
381 * block reseeding forever.
382 */
383 if (res)
384 return false;
385
386 if (TEE_TIME_LT(time, state.next_reseed_time))
387 return true;
388
389 /* Time to reseed, calculate next time reseed is OK */
390 TEE_TIME_ADD(time, time_100ms, state.next_reseed_time);
391 return false;
392 }
393 #endif
394
restart_pool(void * pool_ctx,uint8_t pool_digest[KEY_SIZE])395 static TEE_Result restart_pool(void *pool_ctx, uint8_t pool_digest[KEY_SIZE])
396 {
397 TEE_Result res = hash_final(pool_ctx, pool_digest);
398
399 if (res)
400 return res;
401
402 res = hash_init(pool_ctx);
403 if (res)
404 return res;
405
406 /*
407 * Restart the pool with the digest of the old pool. This is an
408 * extension to Fortuna. In the original Fortuna all pools was
409 * restarted from scratch. This extension is one more defense
410 * against spamming of the pools with known data which could lead
411 * to the spammer knowing the state of the pools.
412 *
413 * This extra precaution could be useful since OP-TEE sometimes
414 * have very few sources of good entropy and at the same time has
415 * sources that could quite easily be predicted by an attacker.
416 */
417 return hash_update(pool_ctx, pool_digest, KEY_SIZE);
418 }
419
reseed_from_pool(uint32_t reseed_count,size_t pool_num)420 static bool reseed_from_pool(uint32_t reseed_count, size_t pool_num)
421 {
422 /*
423 * Specification says: use pool if
424 * 2^pool_num is a divisor of reseed_count
425 *
426 * in order to avoid an expensive modulus operation we're
427 * optimizing this below.
428 */
429 return !pool_num || !((reseed_count >> (pool_num - 1)) & 1);
430 }
431
maybe_reseed(void)432 static TEE_Result maybe_reseed(void)
433 {
434 TEE_Result res;
435 size_t n;
436 uint8_t pool_digest[KEY_SIZE];
437
438 if (state.pool0_length < MIN_POOL_SIZE)
439 return TEE_SUCCESS;
440
441 if (reseed_rate_limiting())
442 return TEE_SUCCESS;
443
444 state.reseed_count++;
445
446 res = hash_init(state.reseed_ctx);
447 if (res)
448 return res;
449
450 for (n = 0;
451 n < NUM_POOLS && reseed_from_pool(state.reseed_count, n); n++) {
452 res = restart_pool(state.pool_ctx[n], pool_digest);
453 if (res)
454 return res;
455 if (!n)
456 state.pool0_length = 0;
457
458 res = hash_update(state.reseed_ctx, pool_digest, KEY_SIZE);
459 if (res)
460 return res;
461 }
462 res = hash_final(state.reseed_ctx, pool_digest);
463 if (res)
464 return res;
465
466 crypto_cipher_final(state.ctx);
467 res = crypto_cipher_init(state.ctx, TEE_MODE_ENCRYPT,
468 pool_digest, KEY_SIZE, NULL, 0, NULL, 0);
469 if (res)
470 return res;
471 inc_counter(state.counter);
472
473 return TEE_SUCCESS;
474 }
475
fortuna_read(void * buf,size_t blen)476 static TEE_Result fortuna_read(void *buf, size_t blen)
477 {
478 TEE_Result res;
479
480 if (!state.ctx)
481 return TEE_ERROR_BAD_STATE;
482
483 mutex_lock(&state_mu);
484
485 res = maybe_reseed();
486 if (res)
487 goto out;
488
489 if (blen) {
490 uint8_t new_key[KEY_SIZE];
491
492 res = generate_random_data(buf, blen);
493 if (res)
494 goto out;
495
496 res = generate_blocks(new_key, KEY_SIZE / BLOCK_SIZE);
497 if (res)
498 goto out;
499 crypto_cipher_final(state.ctx);
500 res = cipher_init(state.ctx, new_key);
501 if (res)
502 goto out;
503 }
504
505 res = drain_ring_buffer();
506 out:
507 if (res)
508 fortuna_done();
509 mutex_unlock(&state_mu);
510
511 return res;
512 }
513
crypto_rng_read(void * buf,size_t blen)514 TEE_Result crypto_rng_read(void *buf, size_t blen)
515 {
516 size_t offs = 0;
517
518 while (true) {
519 TEE_Result res;
520 size_t n;
521
522 /* Draw at most 1 MiB of random on a single key */
523 n = MIN(blen - offs, SIZE_1M);
524 if (!n)
525 return TEE_SUCCESS;
526 res = fortuna_read((uint8_t *)buf + offs, n);
527 if (res)
528 return res;
529 offs += n;
530 }
531 }
532