1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright 2018-2019 NXP
4 *
5 * Brief CAAM Job Rings manager.
6 * Implementation of functions to enqueue/dequeue CAAM Job Descriptor
7 */
8 #include <caam_common.h>
9 #include <caam_desc_helper.h>
10 #include <caam_hal_jr.h>
11 #include <caam_io.h>
12 #include <caam_jr.h>
13 #include <caam_rng.h>
14 #include <caam_utils_delay.h>
15 #include <caam_utils_mem.h>
16 #include <kernel/interrupt.h>
17 #include <kernel/panic.h>
18 #include <kernel/pm.h>
19 #include <kernel/spinlock.h>
20 #include <mm/core_memprot.h>
21 #include <tee/cache.h>
22
23 /*
24 * Job Free define
25 */
26 #define JR_JOB_FREE 0
27
28 /*
29 * Caller information context object
30 */
31 struct caller_info {
32 struct caam_jobctx *jobctx; /* Caller job context object */
33 uint32_t job_id; /* Current Job ID */
34 paddr_t pdesc; /* Physical address of the descriptor */
35 };
36
37 /*
38 * Job Ring module private data
39 */
40 struct jr_privdata {
41 vaddr_t baseaddr; /* Job Ring base address */
42
43 vaddr_t ctrladdr; /* CAAM virtual base address */
44 paddr_t jroffset; /* Job Ring address offset */
45 uint64_t paddr_inrings; /* CAAM physical addr of input queue */
46 uint64_t paddr_outrings; /* CAAM physical addr of output queue */
47
48 uint8_t nb_jobs; /* Number of Job ring entries managed */
49
50 /* Input Job Ring Variables */
51 struct caam_inring_entry *inrings; /* Input JR HW queue */
52 unsigned int inlock; /* Input JR spin lock */
53 uint16_t inwrite_index; /* SW Index - next JR entry free */
54
55 /* Output Job Ring Variables */
56 struct caam_outring_entry *outrings; /* Output JR HW queue */
57 unsigned int outlock; /* Output JR spin lock */
58 uint16_t outread_index; /* SW Index - next JR output done */
59
60 /* Caller Information Variables */
61 struct caller_info *callers; /* Job Ring Caller information */
62 unsigned int callers_lock; /* Job Ring Caller spin lock */
63
64 struct itr_handler it_handler; /* Interrupt handler */
65 };
66
67 /*
68 * Job Ring module private data reference
69 */
70 static struct jr_privdata *jr_privdata;
71
72 /*
73 * Free module resources
74 *
75 * @jr_priv Reference to the module private data
76 */
do_jr_free(struct jr_privdata * jr_priv)77 static void do_jr_free(struct jr_privdata *jr_priv)
78 {
79 if (jr_priv) {
80 caam_free(jr_priv->inrings);
81 caam_free(jr_priv->outrings);
82 caam_free(jr_priv->callers);
83 caam_free(jr_priv);
84 }
85 }
86
87 /*
88 * Allocate module resources
89 *
90 * @privdata [out] Allocated Job Ring private data
91 * @nb_jobs Number of jobs to manage in the queue
92 */
do_jr_alloc(struct jr_privdata ** privdata,uint8_t nb_jobs)93 static enum caam_status do_jr_alloc(struct jr_privdata **privdata,
94 uint8_t nb_jobs)
95 {
96 enum caam_status retstatus = CAAM_OUT_MEMORY;
97 struct jr_privdata *jr_priv = NULL;
98
99 /* Allocate the Job Ring private data */
100 jr_priv = caam_calloc(sizeof(*jr_priv));
101
102 if (!jr_priv) {
103 JR_TRACE("Private Data allocation error");
104 goto end_alloc;
105 }
106
107 /* Setup the number of jobs */
108 jr_priv->nb_jobs = nb_jobs;
109
110 /* Allocate the input and output job ring queues */
111 jr_priv->inrings =
112 caam_calloc_align(nb_jobs * sizeof(struct caam_inring_entry));
113 jr_priv->outrings =
114 caam_calloc_align(nb_jobs * sizeof(struct caam_outring_entry));
115
116 /* Allocate the callers information */
117 jr_priv->callers = caam_calloc(nb_jobs * sizeof(struct caller_info));
118
119 if (!jr_priv->inrings || !jr_priv->outrings || !jr_priv->callers) {
120 JR_TRACE("JR resources allocation error");
121 goto end_alloc;
122 }
123
124 /* Initialize the spin locks */
125 jr_priv->inlock = SPINLOCK_UNLOCK;
126 jr_priv->outlock = SPINLOCK_UNLOCK;
127 jr_priv->callers_lock = SPINLOCK_UNLOCK;
128
129 /* Initialize the queue counter */
130 jr_priv->inwrite_index = 0;
131 jr_priv->outread_index = 0;
132
133 /*
134 * Ensure that allocated queue initialization is pushed to the physical
135 * memory
136 */
137 cache_operation(TEE_CACHEFLUSH, jr_priv->inrings,
138 nb_jobs * sizeof(struct caam_inring_entry));
139 cache_operation(TEE_CACHEFLUSH, jr_priv->outrings,
140 nb_jobs * sizeof(struct caam_outring_entry));
141
142 retstatus = CAAM_NO_ERROR;
143 end_alloc:
144 if (retstatus != CAAM_NO_ERROR)
145 do_jr_free(jr_priv);
146 else
147 *privdata = jr_priv;
148
149 return retstatus;
150 }
151
152 /*
153 * Job Ring Interrupt handler
154 *
155 * @handler Interrupt Handler structure
156 */
caam_jr_irqhandler(struct itr_handler * handler)157 static enum itr_return caam_jr_irqhandler(struct itr_handler *handler)
158 {
159 JR_TRACE("Disable the interrupt");
160 itr_disable(handler->it);
161
162 /* Send a signal to exit WFE loop */
163 sev();
164
165 return ITRR_HANDLED;
166 }
167
168 /*
169 * Returns all jobs completed depending on the input @wait_job_ids mask.
170 *
171 * Dequeues all Jobs completed. Call the job context callback
172 * function. Function returns the bit mask of the expected completed job
173 * (@wait_job_ids parameter)
174 *
175 * @wait_job_ids Expected Jobs to be complete
176 */
do_jr_dequeue(uint32_t wait_job_ids)177 static uint32_t do_jr_dequeue(uint32_t wait_job_ids)
178 {
179 uint32_t ret_job_id = 0;
180 struct caller_info *caller = NULL;
181 struct caam_outring_entry *jr_out = NULL;
182 struct caam_jobctx *jobctx = NULL;
183 uint32_t exceptions = 0;
184 bool found = false;
185 uint16_t idx_jr = 0;
186 uint32_t nb_jobs_done = 0;
187 size_t nb_jobs_inv = 0;
188
189 exceptions = cpu_spin_lock_xsave(&jr_privdata->outlock);
190
191 nb_jobs_done = caam_hal_jr_get_nbjob_done(jr_privdata->baseaddr);
192
193 if (nb_jobs_done == 0) {
194 cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions);
195 return ret_job_id;
196 }
197
198 /* Ensure that output ring descriptor entries are not in cache */
199 if ((jr_privdata->outread_index + nb_jobs_done) >
200 jr_privdata->nb_jobs) {
201 /*
202 * Invalidate the whole circular job buffer because some
203 * completed job rings are at the beginning of the buffer
204 */
205 jr_out = jr_privdata->outrings;
206 nb_jobs_inv = jr_privdata->nb_jobs;
207 } else {
208 /* Invalidate only the completed job */
209 jr_out = &jr_privdata->outrings[jr_privdata->outread_index];
210 nb_jobs_inv = nb_jobs_done;
211 }
212
213 cache_operation(TEE_CACHEINVALIDATE, jr_out,
214 sizeof(struct caam_outring_entry) * nb_jobs_inv);
215
216 for (; nb_jobs_done; nb_jobs_done--) {
217 jr_out = &jr_privdata->outrings[jr_privdata->outread_index];
218
219 /*
220 * Lock the caller information array because enqueue is
221 * also touching it
222 */
223 cpu_spin_lock(&jr_privdata->callers_lock);
224 for (idx_jr = 0, found = false; idx_jr < jr_privdata->nb_jobs;
225 idx_jr++) {
226 /*
227 * Search for the caller information corresponding to
228 * the completed JR.
229 * Don't use the outread_index or inwrite_index because
230 * completion can be out of order compared to input
231 * buffer
232 */
233 caller = &jr_privdata->callers[idx_jr];
234 if (caam_desc_pop(jr_out) == caller->pdesc) {
235 jobctx = caller->jobctx;
236 jobctx->status = caam_read_jobstatus(jr_out);
237
238 /* Update return Job IDs mask */
239 if (caller->job_id & wait_job_ids)
240 ret_job_id |= caller->job_id;
241
242 JR_TRACE("JR id=%" PRId32
243 ", context @0x%08" PRIxVA,
244 caller->job_id, (vaddr_t)jobctx);
245 /* Clear the Entry Descriptor DMA */
246 caller->pdesc = 0;
247 caller->jobctx = NULL;
248 caller->job_id = JR_JOB_FREE;
249 found = true;
250 JR_TRACE("Free space #%" PRId16
251 " in the callers array",
252 idx_jr);
253 break;
254 }
255 }
256 cpu_spin_unlock(&jr_privdata->callers_lock);
257
258 /*
259 * Remove the JR from the output list even if no
260 * JR caller found
261 */
262 caam_hal_jr_del_job(jr_privdata->baseaddr);
263
264 /*
265 * Increment index to next JR output entry taking care that
266 * it is a circular buffer of nb_jobs size.
267 */
268 jr_privdata->outread_index++;
269 jr_privdata->outread_index %= jr_privdata->nb_jobs;
270
271 if (found && jobctx->callback) {
272 /* Finally, execute user's callback */
273 jobctx->callback(jobctx);
274 }
275 }
276
277 cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions);
278
279 return ret_job_id;
280 }
281
282 /*
283 * Enqueues a new job in the Job Ring input queue. Keep the caller's
284 * job context in private array.
285 *
286 * @jobctx Caller's job context
287 * @job_id [out] Job ID enqueued
288 */
do_jr_enqueue(struct caam_jobctx * jobctx,uint32_t * job_id)289 static enum caam_status do_jr_enqueue(struct caam_jobctx *jobctx,
290 uint32_t *job_id)
291 {
292 enum caam_status retstatus = CAAM_BUSY;
293 struct caam_inring_entry *cur_inrings = NULL;
294 struct caller_info *caller = NULL;
295 uint32_t exceptions = 0;
296 uint32_t job_mask = 0;
297 uint8_t idx_jr = 0;
298 bool found = false;
299
300 exceptions = cpu_spin_lock_xsave(&jr_privdata->inlock);
301
302 /*
303 * Stay locked until a job is available
304 * Check if there is an available JR index in the HW
305 */
306 while (caam_hal_jr_read_nbslot_available(jr_privdata->baseaddr) == 0) {
307 /*
308 * WFE will return thanks to a SEV generated by the
309 * interrupt handler or by a spin_unlock
310 */
311 wfe();
312 };
313
314 /*
315 * There is a space free in the input ring but it doesn't mean
316 * that the job pushed is completed.
317 * Completion is out of order. Look for a free space in the
318 * caller data to push them and get a job ID for the completion
319 *
320 * Lock the caller information array because dequeue is
321 * also touching it
322 */
323 cpu_spin_lock(&jr_privdata->callers_lock);
324 for (idx_jr = 0; idx_jr < jr_privdata->nb_jobs; idx_jr++) {
325 if (jr_privdata->callers[idx_jr].job_id == JR_JOB_FREE) {
326 JR_TRACE("Found a space #%" PRId8
327 " free in the callers array",
328 idx_jr);
329 job_mask = 1 << idx_jr;
330
331 /* Store the caller information for the JR completion */
332 caller = &jr_privdata->callers[idx_jr];
333 caller->job_id = job_mask;
334 caller->jobctx = jobctx;
335 caller->pdesc = virt_to_phys((void *)jobctx->desc);
336
337 found = true;
338 break;
339 }
340 }
341 cpu_spin_unlock(&jr_privdata->callers_lock);
342
343 if (!found) {
344 JR_TRACE("Error didn't find a free space in the callers array");
345 goto end_enqueue;
346 }
347
348 JR_TRACE("Push id=%" PRId16 ", job (0x%08" PRIx32
349 ") context @0x%08" PRIxVA,
350 jr_privdata->inwrite_index, job_mask, (vaddr_t)jobctx);
351
352 cur_inrings = &jr_privdata->inrings[jr_privdata->inwrite_index];
353
354 /* Push the descriptor into the JR HW list */
355 caam_desc_push(cur_inrings, caller->pdesc);
356
357 /* Ensure that physical memory is up to date */
358 cache_operation(TEE_CACHECLEAN, cur_inrings,
359 sizeof(struct caam_inring_entry));
360
361 /*
362 * Increment index to next JR input entry taking care that
363 * it is a circular buffer of nb_jobs size.
364 */
365 jr_privdata->inwrite_index++;
366 jr_privdata->inwrite_index %= jr_privdata->nb_jobs;
367
368 /* Ensure that input descriptor is pushed in physical memory */
369 cache_operation(TEE_CACHECLEAN, jobctx->desc,
370 DESC_SZBYTES(caam_desc_get_len(jobctx->desc)));
371
372 /* Inform HW that a new JR is available */
373 caam_hal_jr_add_newjob(jr_privdata->baseaddr);
374
375 *job_id = job_mask;
376 retstatus = CAAM_NO_ERROR;
377
378 end_enqueue:
379 cpu_spin_unlock_xrestore(&jr_privdata->inlock, exceptions);
380
381 return retstatus;
382 }
383
384 /*
385 * Synchronous job completion callback
386 *
387 * @jobctx Job context
388 */
job_done(struct caam_jobctx * jobctx)389 static void job_done(struct caam_jobctx *jobctx)
390 {
391 jobctx->completion = true;
392 }
393
caam_jr_cancel(uint32_t job_id)394 void caam_jr_cancel(uint32_t job_id)
395 {
396 unsigned int idx = 0;
397
398 cpu_spin_lock(&jr_privdata->callers_lock);
399
400 JR_TRACE("Job cancel 0x%" PRIx32, job_id);
401 for (idx = 0; idx < jr_privdata->nb_jobs; idx++) {
402 /*
403 * Search for the caller information corresponding to
404 * the job_id mask.
405 */
406 if (jr_privdata->callers[idx].job_id == job_id) {
407 /* Clear the Entry Descriptor */
408 jr_privdata->callers[idx].pdesc = 0;
409 jr_privdata->callers[idx].jobctx = NULL;
410 jr_privdata->callers[idx].job_id = JR_JOB_FREE;
411 return;
412 }
413 }
414
415 cpu_spin_unlock(&jr_privdata->callers_lock);
416 }
417
caam_jr_dequeue(uint32_t job_ids,unsigned int timeout_ms)418 enum caam_status caam_jr_dequeue(uint32_t job_ids, unsigned int timeout_ms)
419 {
420 uint32_t job_complete = 0;
421 uint32_t nb_loop = 0;
422 bool infinite = false;
423 bool it_active = false;
424
425 if (timeout_ms == UINT_MAX)
426 infinite = true;
427 else
428 nb_loop = timeout_ms * 100;
429
430 do {
431 /* Call the do_jr_dequeue function to dequeue the jobs */
432 job_complete = do_jr_dequeue(job_ids);
433
434 /* Check if new job has been submitted and acknowledge it */
435 it_active = caam_hal_jr_check_ack_itr(jr_privdata->baseaddr);
436
437 if (job_complete & job_ids)
438 return CAAM_NO_ERROR;
439
440 /* Check if JR interrupt otherwise wait a bit */
441 if (!it_active)
442 caam_udelay(10);
443 } while (infinite || (nb_loop--));
444
445 return CAAM_TIMEOUT;
446 }
447
caam_jr_enqueue(struct caam_jobctx * jobctx,uint32_t * job_id)448 enum caam_status caam_jr_enqueue(struct caam_jobctx *jobctx, uint32_t *job_id)
449 {
450 enum caam_status retstatus = CAAM_FAILURE;
451 __maybe_unused int timeout = 10; /* Nb loops to pool job completion */
452
453 if (!jobctx)
454 return CAAM_BAD_PARAM;
455
456 JR_DUMPDESC(jobctx->desc);
457
458 if (!jobctx->callback && job_id) {
459 JR_TRACE("Job Callback not defined whereas asynchronous");
460 return CAAM_BAD_PARAM;
461 }
462
463 if (jobctx->callback && !job_id) {
464 JR_TRACE("Job Id not defined whereas asynchronous");
465 return CAAM_BAD_PARAM;
466 }
467
468 jobctx->completion = false;
469 jobctx->status = 0;
470
471 /*
472 * If parameter job_id is NULL, the job is synchronous, hence use
473 * the local job_done callback function
474 */
475 if (!jobctx->callback && !job_id) {
476 jobctx->callback = job_done;
477 jobctx->context = jobctx;
478 }
479
480 retstatus = do_jr_enqueue(jobctx, &jobctx->id);
481
482 if (retstatus != CAAM_NO_ERROR) {
483 JR_TRACE("enqueue job error 0x%08x", retstatus);
484 return retstatus;
485 }
486
487 /*
488 * If parameter job_id is defined, the job is asynchronous, so
489 * returns with setting the job_id value
490 */
491 if (job_id) {
492 *job_id = jobctx->id;
493 return CAAM_PENDING;
494 }
495
496 #ifdef TIMEOUT_COMPLETION
497 /*
498 * Job is synchronous wait until job completion or timeout
499 */
500 while (!jobctx->completion && timeout--)
501 caam_jr_dequeue(jobctx->id, 100);
502
503 if (timeout <= 0) {
504 /* Job timeout, cancel it and return in error */
505 caam_jr_cancel(jobctx->id);
506 retstatus = CAAM_TIMEOUT;
507 } else {
508 if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE))
509 retstatus = CAAM_JOB_STATUS;
510 else
511 retstatus = CAAM_NO_ERROR;
512 }
513 #else
514 /*
515 * Job is synchronous wait until job complete
516 * Don't use a timeout because there is no HW timer and
517 * so the timeout is not precise
518 */
519 while (!jobctx->completion)
520 caam_jr_dequeue(jobctx->id, 100);
521
522 if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE))
523 retstatus = CAAM_JOB_STATUS;
524 else
525 retstatus = CAAM_NO_ERROR;
526 #endif
527
528 /* Erase local callback function */
529 jobctx->callback = NULL;
530
531 return retstatus;
532 }
533
caam_jr_init(struct caam_jrcfg * jrcfg)534 enum caam_status caam_jr_init(struct caam_jrcfg *jrcfg)
535 {
536 enum caam_status retstatus = CAAM_FAILURE;
537
538 JR_TRACE("Initialization");
539
540 /* Allocate the Job Ring resources */
541 retstatus = do_jr_alloc(&jr_privdata, jrcfg->nb_jobs);
542 if (retstatus != CAAM_NO_ERROR)
543 goto end_init;
544
545 jr_privdata->ctrladdr = jrcfg->base;
546 jr_privdata->jroffset = jrcfg->offset;
547
548 retstatus =
549 caam_hal_jr_setowner(jrcfg->base, jrcfg->offset, JROWN_ARM_S);
550 JR_TRACE("JR setowner returned 0x%x", retstatus);
551
552 if (retstatus != CAAM_NO_ERROR)
553 goto end_init;
554
555 jr_privdata->baseaddr = jrcfg->base + jrcfg->offset;
556 retstatus = caam_hal_jr_reset(jr_privdata->baseaddr);
557 if (retstatus != CAAM_NO_ERROR)
558 goto end_init;
559
560 /*
561 * Get the physical address of the Input/Output queue
562 * The HW configuration is 64 bits registers regardless
563 * the CAAM or CPU addressing mode.
564 */
565 jr_privdata->paddr_inrings = virt_to_phys(jr_privdata->inrings);
566 jr_privdata->paddr_outrings = virt_to_phys(jr_privdata->outrings);
567 if (!jr_privdata->paddr_inrings || !jr_privdata->paddr_outrings) {
568 JR_TRACE("JR bad queue pointers");
569 retstatus = CAAM_FAILURE;
570 goto end_init;
571 }
572
573 caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs,
574 jr_privdata->paddr_inrings,
575 jr_privdata->paddr_outrings);
576
577 /*
578 * Prepare the interrupt handler to secure the interrupt even
579 * if the interrupt is not used
580 */
581 jr_privdata->it_handler.it = jrcfg->it_num;
582 jr_privdata->it_handler.flags = ITRF_TRIGGER_LEVEL;
583 jr_privdata->it_handler.handler = caam_jr_irqhandler;
584 jr_privdata->it_handler.data = jr_privdata;
585
586 #if defined(CFG_NXP_CAAM_RUNTIME_JR) && defined(CFG_CAAM_ITR)
587 itr_add(&jr_privdata->it_handler);
588 #endif
589 caam_hal_jr_enable_itr(jr_privdata->baseaddr);
590
591 retstatus = CAAM_NO_ERROR;
592
593 end_init:
594 if (retstatus != CAAM_NO_ERROR)
595 do_jr_free(jr_privdata);
596
597 return retstatus;
598 }
599
caam_jr_halt(void)600 enum caam_status caam_jr_halt(void)
601 {
602 enum caam_status retstatus = CAAM_FAILURE;
603 __maybe_unused uint32_t job_complete = 0;
604
605 retstatus = caam_hal_jr_halt(jr_privdata->baseaddr);
606
607 /*
608 * All jobs in the input queue have been done, call the
609 * dequeue function to complete them.
610 */
611 job_complete = do_jr_dequeue(UINT32_MAX);
612 JR_TRACE("Completion of jobs mask 0x%" PRIx32, job_complete);
613
614 return retstatus;
615 }
616
caam_jr_flush(void)617 enum caam_status caam_jr_flush(void)
618 {
619 enum caam_status retstatus = CAAM_FAILURE;
620 __maybe_unused uint32_t job_complete = 0;
621
622 retstatus = caam_hal_jr_flush(jr_privdata->baseaddr);
623
624 /*
625 * All jobs in the input queue have been done, call the
626 * dequeue function to complete them.
627 */
628 job_complete = do_jr_dequeue(UINT32_MAX);
629 JR_TRACE("Completion of jobs mask 0x%" PRIx32, job_complete);
630
631 return retstatus;
632 }
633
caam_jr_resume(uint32_t pm_hint)634 void caam_jr_resume(uint32_t pm_hint)
635 {
636 if (pm_hint == PM_HINT_CONTEXT_STATE) {
637 #ifndef CFG_NXP_CAAM_RUNTIME_JR
638 /*
639 * In case the CAAM is not used the JR used to
640 * instantiate the RNG has been released to Non-Secure
641 * hence, need reconfigure the Secure JR and release
642 * it after RNG instantiation
643 */
644 caam_hal_jr_setowner(jr_privdata->ctrladdr,
645 jr_privdata->jroffset, JROWN_ARM_S);
646
647 caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs,
648 jr_privdata->paddr_inrings,
649 jr_privdata->paddr_outrings);
650 #endif /* CFG_NXP_CAAM_RUNTIME_JR */
651
652 /* Read the current job ring index */
653 jr_privdata->inwrite_index =
654 caam_hal_jr_input_index(jr_privdata->baseaddr);
655 /* Read the current output ring index */
656 jr_privdata->outread_index =
657 caam_hal_jr_output_index(jr_privdata->baseaddr);
658
659 if (caam_rng_instantiation() != CAAM_NO_ERROR)
660 panic();
661
662 #ifndef CFG_NXP_CAAM_RUNTIME_JR
663 caam_hal_jr_setowner(jr_privdata->ctrladdr,
664 jr_privdata->jroffset, JROWN_ARM_NS);
665 #endif /* CFG_NXP_CAAM_RUNTIME_JR */
666 } else {
667 caam_hal_jr_resume(jr_privdata->baseaddr);
668 }
669 }
670
caam_jr_complete(void)671 enum caam_status caam_jr_complete(void)
672 {
673 enum caam_status ret = CAAM_BUSY;
674
675 ret = caam_hal_jr_flush(jr_privdata->baseaddr);
676 if (ret == CAAM_NO_ERROR)
677 caam_hal_jr_resume(jr_privdata->baseaddr);
678
679 return ret;
680 }
681