1 /*
2 * Copyright (c) 2018-2019 Nordic Semiconductor ASA
3 * Copyright 2019 NXP
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <stdint.h>
9 #include <stdbool.h>
10 #include <errno.h>
11
12 #include <zephyr/toolchain.h>
13
14 #include <soc.h>
15 #include <zephyr/device.h>
16
17 #include <zephyr/drivers/entropy.h>
18 #include <zephyr/irq.h>
19
20 #include "hal/swi.h"
21 #include "hal/ccm.h"
22 #include "hal/radio.h"
23 #include "hal/ticker.h"
24
25 #include "util/mem.h"
26 #include "util/memq.h"
27 #include "util/mayfly.h"
28
29 #include "ticker/ticker.h"
30
31 #include "lll.h"
32 #include "lll_vendor.h"
33 #include "lll_internal.h"
34
35 #include "hal/debug.h"
36
37 static struct {
38 struct {
39 void *param;
40 lll_is_abort_cb_t is_abort_cb;
41 lll_abort_cb_t abort_cb;
42 } curr;
43
44 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
45 struct {
46 uint8_t volatile lll_count;
47 uint8_t ull_count;
48 } done;
49 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
50 } event;
51
52 /* Entropy device */
53 static const struct device *const dev_entropy = DEVICE_DT_GET(DT_CHOSEN(zephyr_entropy));
54
55 static int init_reset(void);
56 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
57 static inline void done_inc(void);
58 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
59 static struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb);
60
61 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
62 static void ticker_stop_op_cb(uint32_t status, void *param);
63 static void ticker_start_op_cb(uint32_t status, void *param);
64 static void ticker_start_next_op_cb(uint32_t status, void *param);
65 static uint32_t preempt_ticker_start(struct lll_event *event,
66 ticker_op_func op_cb);
67 static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
68 uint32_t remainder, uint16_t lazy, uint8_t force,
69 void *param);
70 static void preempt(void *param);
71 #else /* CONFIG_BT_CTLR_LOW_LAT */
72 #if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
73 static void mfy_ticker_job_idle_get(void *param);
74 static void ticker_op_job_disable(uint32_t status, void *op_context);
75 #endif
76 #endif /* CONFIG_BT_CTLR_LOW_LAT */
77
rtc0_rv32m1_isr(const void * arg)78 static void rtc0_rv32m1_isr(const void *arg)
79 {
80 DEBUG_TICKER_ISR(1);
81
82 /* On compare0 run ticker worker instance0 */
83 if (LPTMR1->CSR & LPTMR_CSR_TCF(1)) {
84 LPTMR1->CSR |= LPTMR_CSR_TCF(1);
85
86 ticker_trigger(0);
87 }
88
89 mayfly_run(TICKER_USER_ID_ULL_HIGH);
90
91 #if !defined(CONFIG_BT_CTLR_LOW_LAT) && \
92 (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
93 mayfly_run(TICKER_USER_ID_ULL_LOW);
94 #endif
95
96 DEBUG_TICKER_ISR(0);
97 }
98
swi_lll_rv32m1_isr(const void * arg)99 static void swi_lll_rv32m1_isr(const void *arg)
100 {
101 DEBUG_RADIO_ISR(1);
102
103 mayfly_run(TICKER_USER_ID_LLL);
104
105 DEBUG_RADIO_ISR(0);
106 }
107
108 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
109 (CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
swi_ull_low_rv32m1_isr(const void * arg)110 static void swi_ull_low_rv32m1_isr(const void *arg)
111 {
112 DEBUG_TICKER_JOB(1);
113
114 mayfly_run(TICKER_USER_ID_ULL_LOW);
115
116 DEBUG_TICKER_JOB(0);
117 }
118 #endif
119
lll_init(void)120 int lll_init(void)
121 {
122 int err;
123
124 /* Check if entropy device is ready */
125 if (!device_is_ready(dev_entropy)) {
126 return -ENODEV;
127 }
128
129 /* Initialise LLL internals */
130 event.curr.abort_cb = NULL;
131
132 err = init_reset();
133 if (err) {
134 return err;
135 }
136
137 /* Initialize SW IRQ structure */
138 hal_swi_init();
139
140 /* Connect ISRs */
141 IRQ_CONNECT(LL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO, isr_radio, NULL, 0);
142 IRQ_CONNECT(LL_RTC0_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
143 rtc0_rv32m1_isr, NULL, 0);
144 IRQ_CONNECT(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
145 swi_lll_rv32m1_isr, NULL, 0);
146 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
147 (CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
148 IRQ_CONNECT(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
149 swi_ull_low_rv32m1_isr, NULL, 0);
150 #endif
151
152 /* Enable IRQs */
153 irq_enable(LL_RADIO_IRQn);
154 irq_enable(LL_RTC0_IRQn);
155 irq_enable(HAL_SWI_RADIO_IRQ);
156 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
157 (CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
158 irq_enable(HAL_SWI_JOB_IRQ);
159 }
160
161 /* Call it after IRQ enable to be able to measure ISR latency */
162 radio_setup();
163
164 return 0;
165 }
166
lll_deinit(void)167 int lll_deinit(void)
168 {
169 /* Disable IRQs */
170 irq_disable(LL_RADIO_IRQn);
171 irq_disable(LL_RTC0_IRQn);
172 irq_disable(HAL_SWI_RADIO_IRQ);
173 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
174 (CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
175 irq_disable(HAL_SWI_JOB_IRQ);
176 }
177
178 return 0;
179 }
180
lll_csrand_get(void * buf,size_t len)181 int lll_csrand_get(void *buf, size_t len)
182 {
183 return entropy_get_entropy(dev_entropy, buf, len);
184 }
185
lll_csrand_isr_get(void * buf,size_t len)186 int lll_csrand_isr_get(void *buf, size_t len)
187 {
188 return entropy_get_entropy_isr(dev_entropy, buf, len, 0);
189 }
190
lll_rand_get(void * buf,size_t len)191 int lll_rand_get(void *buf, size_t len)
192 {
193 return 0;
194 }
195
lll_rand_isr_get(void * buf,size_t len)196 int lll_rand_isr_get(void *buf, size_t len)
197 {
198 return 0;
199 }
200
lll_reset(void)201 int lll_reset(void)
202 {
203 int err;
204
205 err = init_reset();
206 if (err) {
207 return err;
208 }
209
210 return 0;
211 }
212
lll_disable(void * param)213 void lll_disable(void *param)
214 {
215 /* LLL disable of current event, done is generated */
216 if (!param || (param == event.curr.param)) {
217 if (event.curr.abort_cb && event.curr.param) {
218 event.curr.abort_cb(NULL, event.curr.param);
219 } else {
220 LL_ASSERT(!param);
221 }
222 }
223 {
224 struct lll_event *next;
225 uint8_t idx;
226
227 idx = UINT8_MAX;
228 next = ull_prepare_dequeue_iter(&idx);
229 while (next) {
230 if (!next->is_aborted &&
231 (!param || (param == next->prepare_param.param))) {
232 next->is_aborted = 1;
233 next->abort_cb(&next->prepare_param,
234 next->prepare_param.param);
235
236 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
237 /* NOTE: abort_cb called lll_done which modifies
238 * the prepare pipeline hence re-iterate
239 * through the prepare pipeline.
240 */
241 idx = UINT8_MAX;
242 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
243 }
244
245 next = ull_prepare_dequeue_iter(&idx);
246 }
247 }
248 }
249
lll_prepare_done(void * param)250 int lll_prepare_done(void *param)
251 {
252 #if defined(CONFIG_BT_CTLR_LOW_LAT) && \
253 (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
254 static memq_link_t link;
255 static struct mayfly mfy = {0, 0, &link, NULL, mfy_ticker_job_idle_get};
256 uint32_t ret;
257
258 ret = mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_LOW,
259 1, &mfy);
260 if (ret) {
261 return -EFAULT;
262 }
263
264 return 0;
265 #else
266 return 0;
267 #endif /* CONFIG_BT_CTLR_LOW_LAT */
268 }
269
lll_done(void * param)270 int lll_done(void *param)
271 {
272 struct lll_event *next;
273 struct ull_hdr *ull;
274 void *evdone;
275
276 /* Assert if param supplied without a pending prepare to cancel. */
277 next = ull_prepare_dequeue_get();
278 LL_ASSERT(!param || next);
279
280 /* check if current LLL event is done */
281 ull = NULL;
282 if (!param) {
283 /* Reset current event instance */
284 LL_ASSERT(event.curr.abort_cb);
285 event.curr.abort_cb = NULL;
286
287 param = event.curr.param;
288 event.curr.param = NULL;
289
290 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
291 done_inc();
292 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
293
294 if (param) {
295 ull = HDR_LLL2ULL(param);
296 }
297
298 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
299 (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
300 mayfly_enable(TICKER_USER_ID_LLL,
301 TICKER_USER_ID_ULL_LOW,
302 1);
303 }
304
305 DEBUG_RADIO_CLOSE(0);
306 } else {
307 ull = HDR_LLL2ULL(param);
308 }
309
310 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
311 ull_prepare_dequeue(TICKER_USER_ID_LLL);
312 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
313
314 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
315 lll_done_score(param, 0, 0); /* TODO */
316 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
317
318 /* Let ULL know about LLL event done */
319 evdone = ull_event_done(ull);
320 LL_ASSERT(evdone);
321
322 return 0;
323 }
324
325 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
lll_done_ull_inc(void)326 void lll_done_ull_inc(void)
327 {
328 LL_ASSERT(event.done.ull_count != event.done.lll_count);
329 event.done.ull_count++;
330 }
331 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
332
lll_is_done(void * param)333 bool lll_is_done(void *param)
334 {
335 /* FIXME: use param to check */
336 return !event.curr.abort_cb;
337 }
338
lll_is_abort_cb(void * next,void * curr,lll_prepare_cb_t * resume_cb)339 int lll_is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb)
340 {
341 return -ECANCELED;
342 }
343
lll_clk_on(void)344 int lll_clk_on(void)
345 {
346 int err;
347
348 /* turn on radio clock in non-blocking mode. */
349 err = radio_wake();
350 if (!err) {
351 DEBUG_RADIO_XTAL(1);
352 }
353
354 return err;
355 }
356
lll_clk_on_wait(void)357 int lll_clk_on_wait(void)
358 {
359 int err;
360
361 /* turn on radio clock in blocking mode. */
362 err = radio_wake();
363
364 while (radio_is_off()) {
365 k_cpu_idle();
366 }
367
368 DEBUG_RADIO_XTAL(1);
369
370 return err;
371 }
372
lll_clk_off(void)373 int lll_clk_off(void)
374 {
375 int err;
376
377 /* turn off radio clock in non-blocking mode. */
378 err = radio_sleep();
379 if (!err) {
380 DEBUG_RADIO_XTAL(0);
381 }
382
383 return err;
384 }
385
lll_event_offset_get(struct ull_hdr * ull)386 uint32_t lll_event_offset_get(struct ull_hdr *ull)
387 {
388 return HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
389 }
390
lll_preempt_calc(struct ull_hdr * ull,uint8_t ticker_id,uint32_t ticks_at_event)391 uint32_t lll_preempt_calc(struct ull_hdr *ull, uint8_t ticker_id,
392 uint32_t ticks_at_event)
393 {
394 uint32_t ticks_now;
395 uint32_t diff;
396
397 ticks_now = ticker_ticks_now_get();
398 diff = ticks_now - ticks_at_event;
399 if (diff & BIT(HAL_TICKER_CNTR_MSBIT)) {
400 return 0;
401 }
402
403 diff += HAL_TICKER_CNTR_CMP_OFFSET_MIN;
404 if (diff > HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) {
405 /* TODO: for Low Latency Feature with Advanced XTAL feature.
406 * 1. Release retained HF clock.
407 * 2. Advance the radio event to accommodate normal prepare
408 * duration.
409 * 3. Increase the preempt to start ticks for future events.
410 */
411 return 1;
412 }
413
414 return 0;
415 }
416
lll_chan_set(uint32_t chan)417 void lll_chan_set(uint32_t chan)
418 {
419 switch (chan) {
420 case 37:
421 radio_freq_chan_set(2);
422 break;
423
424 case 38:
425 radio_freq_chan_set(26);
426 break;
427
428 case 39:
429 radio_freq_chan_set(80);
430 break;
431
432 default:
433 if (chan < 11) {
434 radio_freq_chan_set(4 + (chan * 2U));
435 } else if (chan < 40) {
436 radio_freq_chan_set(28 + ((chan - 11) * 2U));
437 } else {
438 LL_ASSERT(0);
439 }
440 break;
441 }
442
443 radio_whiten_iv_set(chan);
444 }
445
446
lll_radio_is_idle(void)447 uint32_t lll_radio_is_idle(void)
448 {
449 return radio_is_idle();
450 }
451
lll_radio_tx_ready_delay_get(uint8_t phy,uint8_t flags)452 uint32_t lll_radio_tx_ready_delay_get(uint8_t phy, uint8_t flags)
453 {
454 return radio_tx_ready_delay_get(phy, flags);
455 }
456
lll_radio_rx_ready_delay_get(uint8_t phy,uint8_t flags)457 uint32_t lll_radio_rx_ready_delay_get(uint8_t phy, uint8_t flags)
458 {
459 return radio_rx_ready_delay_get(phy, flags);
460 }
461
lll_isr_status_reset(void)462 void lll_isr_status_reset(void)
463 {
464 radio_status_reset();
465 radio_tmr_status_reset();
466 radio_filter_status_reset();
467 if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
468 radio_ar_status_reset();
469 }
470 radio_rssi_status_reset();
471 }
472
init_reset(void)473 static int init_reset(void)
474 {
475 return 0;
476 }
477
478 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
done_inc(void)479 static inline void done_inc(void)
480 {
481 event.done.lll_count++;
482 LL_ASSERT(event.done.lll_count != event.done.ull_count);
483 }
484 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
485
is_done_sync(void)486 static inline bool is_done_sync(void)
487 {
488 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
489 return event.done.lll_count == event.done.ull_count;
490 #else /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
491 return true;
492 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
493 }
494
lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb,lll_abort_cb_t abort_cb,lll_prepare_cb_t prepare_cb,struct lll_prepare_param * prepare_param,uint8_t is_resume,uint8_t is_dequeue)495 int lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
496 lll_prepare_cb_t prepare_cb,
497 struct lll_prepare_param *prepare_param,
498 uint8_t is_resume, uint8_t is_dequeue)
499 {
500 struct lll_event *p;
501 uint8_t idx;
502 int err;
503
504 /* Find the ready prepare in the pipeline */
505 idx = UINT8_MAX;
506 p = ull_prepare_dequeue_iter(&idx);
507 while (p && (p->is_aborted || p->is_resume)) {
508 p = ull_prepare_dequeue_iter(&idx);
509 }
510
511 /* Current event active or another prepare is ready in the pipeline */
512 if ((!is_dequeue && !is_done_sync()) ||
513 event.curr.abort_cb ||
514 (p && is_resume)) {
515 #if defined(CONFIG_BT_CTLR_LOW_LAT)
516 lll_prepare_cb_t resume_cb;
517 #endif /* CONFIG_BT_CTLR_LOW_LAT */
518 struct lll_event *next;
519
520 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) && event.curr.param) {
521 /* early abort */
522 event.curr.abort_cb(NULL, event.curr.param);
523 }
524
525 /* Store the next prepare for deferred call */
526 next = ull_prepare_enqueue(is_abort_cb, abort_cb, prepare_param,
527 prepare_cb, is_resume);
528 LL_ASSERT(next);
529
530 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
531 if (is_resume) {
532 return -EINPROGRESS;
533 }
534
535 /* Start the preempt timeout */
536 uint32_t ret;
537
538 ret = preempt_ticker_start(next, ticker_start_op_cb);
539 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
540 (ret == TICKER_STATUS_BUSY));
541
542 #else /* CONFIG_BT_CTLR_LOW_LAT */
543 next = NULL;
544 while (p) {
545 if (!p->is_aborted) {
546 if (event.curr.param ==
547 p->prepare_param.param) {
548 p->is_aborted = 1;
549 p->abort_cb(&p->prepare_param,
550 p->prepare_param.param);
551 } else {
552 next = p;
553 }
554 }
555
556 p = ull_prepare_dequeue_iter(&idx);
557 }
558
559 if (next) {
560 /* check if resume requested by curr */
561 err = event.curr.is_abort_cb(NULL, event.curr.param,
562 &resume_cb);
563 LL_ASSERT(err);
564
565 if (err == -EAGAIN) {
566 next = resume_enqueue(resume_cb);
567 LL_ASSERT(next);
568 } else {
569 LL_ASSERT(err == -ECANCELED);
570 }
571 }
572 #endif /* CONFIG_BT_CTLR_LOW_LAT */
573
574 return -EINPROGRESS;
575 }
576
577 LL_ASSERT(!p || &p->prepare_param == prepare_param);
578
579 event.curr.param = prepare_param->param;
580 event.curr.is_abort_cb = is_abort_cb;
581 event.curr.abort_cb = abort_cb;
582
583 err = prepare_cb(prepare_param);
584
585 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
586 uint32_t ret;
587
588 /* Stop any scheduled preempt ticker */
589 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
590 TICKER_USER_ID_LLL,
591 TICKER_ID_LLL_PREEMPT,
592 ticker_stop_op_cb, NULL);
593 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
594 (ret == TICKER_STATUS_FAILURE) ||
595 (ret == TICKER_STATUS_BUSY));
596
597 /* Find next prepare needing preempt timeout to be setup */
598 do {
599 p = ull_prepare_dequeue_iter(&idx);
600 if (!p) {
601 return err;
602 }
603 } while (p->is_aborted || p->is_resume);
604
605 /* Start the preempt timeout */
606 ret = preempt_ticker_start(p, ticker_start_next_op_cb);
607 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
608 (ret == TICKER_STATUS_BUSY));
609 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
610
611 return err;
612 }
613
resume_enqueue(lll_prepare_cb_t resume_cb)614 static struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb)
615 {
616 struct lll_prepare_param prepare_param = {0};
617
618 prepare_param.param = event.curr.param;
619 event.curr.param = NULL;
620
621 return ull_prepare_enqueue(event.curr.is_abort_cb, event.curr.abort_cb,
622 &prepare_param, resume_cb, 1);
623 }
624
625 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
ticker_stop_op_cb(uint32_t status,void * param)626 static void ticker_stop_op_cb(uint32_t status, void *param)
627 {
628 /* NOTE: this callback is present only for addition of debug messages
629 * when needed, else can be dispensed with.
630 */
631 ARG_UNUSED(param);
632
633 LL_ASSERT((status == TICKER_STATUS_SUCCESS) ||
634 (status == TICKER_STATUS_FAILURE));
635 }
636
ticker_start_op_cb(uint32_t status,void * param)637 static void ticker_start_op_cb(uint32_t status, void *param)
638 {
639 /* NOTE: this callback is present only for addition of debug messages
640 * when needed, else can be dispensed with.
641 */
642 ARG_UNUSED(param);
643
644 LL_ASSERT((status == TICKER_STATUS_SUCCESS) ||
645 (status == TICKER_STATUS_FAILURE));
646 }
647
ticker_start_next_op_cb(uint32_t status,void * param)648 static void ticker_start_next_op_cb(uint32_t status, void *param)
649 {
650 ARG_UNUSED(param);
651
652 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
653 }
654
preempt_ticker_start(struct lll_event * evt,ticker_op_func op_cb)655 static uint32_t preempt_ticker_start(struct lll_event *evt,
656 ticker_op_func op_cb)
657 {
658 struct lll_prepare_param *p;
659 uint32_t preempt_anchor;
660 struct ull_hdr *ull;
661 uint32_t preempt_to;
662 uint32_t ret;
663
664 /* Calc the preempt timeout */
665 p = &evt->prepare_param;
666 ull = HDR_LLL2ULL(p->param);
667 preempt_anchor = p->ticks_at_expire;
668 preempt_to = HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US) -
669 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
670
671 /* Setup pre empt timeout */
672 ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
673 TICKER_USER_ID_LLL,
674 TICKER_ID_LLL_PREEMPT,
675 preempt_anchor,
676 preempt_to,
677 TICKER_NULL_PERIOD,
678 TICKER_NULL_REMAINDER,
679 TICKER_NULL_LAZY,
680 TICKER_NULL_SLOT,
681 preempt_ticker_cb, evt,
682 op_cb, evt);
683
684 return ret;
685 }
686
preempt_ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)687 static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
688 uint32_t remainder, uint16_t lazy, uint8_t force,
689 void *param)
690 {
691 static memq_link_t link;
692 static struct mayfly mfy = {0, 0, &link, NULL, preempt};
693 uint32_t ret;
694
695 mfy.param = param;
696 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
697 0, &mfy);
698 LL_ASSERT(!ret);
699 }
700
preempt(void * param)701 static void preempt(void *param)
702 {
703 lll_prepare_cb_t resume_cb;
704 struct lll_event *next;
705 uint8_t idx;
706 int err;
707
708 /* No event to abort */
709 if (!event.curr.abort_cb || !event.curr.param) {
710 return;
711 }
712
713 /* Check if any prepare in pipeline */
714 idx = UINT8_MAX;
715 next = ull_prepare_dequeue_iter(&idx);
716 if (!next) {
717 return;
718 }
719
720 /* Find a prepare that is ready and not a resume */
721 while (next && (next->is_aborted || next->is_resume)) {
722 next = ull_prepare_dequeue_iter(&idx);
723 }
724
725 /* No ready prepare */
726 if (!next) {
727 return;
728 }
729
730 /* Preemptor not in pipeline */
731 if (next != param) {
732 uint32_t ret;
733
734 /* Start the preempt timeout */
735 ret = preempt_ticker_start(next, ticker_start_next_op_cb);
736 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
737 (ret == TICKER_STATUS_BUSY));
738
739 return;
740 }
741
742 /* Check if current event want to continue */
743 err = event.curr.is_abort_cb(next->prepare_param.param,
744 event.curr.param,
745 &resume_cb);
746 if (!err) {
747 /* Let preemptor LLL know about the cancelled prepare */
748 next->is_aborted = 1;
749 next->abort_cb(&next->prepare_param, next->prepare_param.param);
750
751 return;
752 }
753
754 /* Abort the current event */
755 event.curr.abort_cb(NULL, event.curr.param);
756
757 /* Check if resume requested */
758 if (err == -EAGAIN) {
759 struct lll_event *iter;
760 uint8_t iter_idx;
761
762 /* Abort any duplicates so that they get dequeued */
763 iter_idx = UINT8_MAX;
764 iter = ull_prepare_dequeue_iter(&iter_idx);
765 while (iter) {
766 if (!iter->is_aborted &&
767 event.curr.param == iter->prepare_param.param) {
768 iter->is_aborted = 1;
769 iter->abort_cb(&iter->prepare_param,
770 iter->prepare_param.param);
771
772 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
773 /* NOTE: abort_cb called lll_done which modifies
774 * the prepare pipeline hence re-iterate
775 * through the prepare pipeline.
776 */
777 idx = UINT8_MAX;
778 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
779 }
780
781 iter = ull_prepare_dequeue_iter(&iter_idx);
782 }
783
784 /* Enqueue as resume event */
785 iter = resume_enqueue(resume_cb);
786 LL_ASSERT(iter);
787 } else {
788 LL_ASSERT(err == -ECANCELED);
789 }
790 }
791 #else /* CONFIG_BT_CTLR_LOW_LAT */
792
793 #if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
mfy_ticker_job_idle_get(void * param)794 static void mfy_ticker_job_idle_get(void *param)
795 {
796 uint32_t ret;
797
798 /* Ticker Job Silence */
799 ret = ticker_job_idle_get(TICKER_INSTANCE_ID_CTLR,
800 TICKER_USER_ID_ULL_LOW,
801 ticker_op_job_disable, NULL);
802 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
803 (ret == TICKER_STATUS_BUSY));
804 }
805
ticker_op_job_disable(uint32_t status,void * op_context)806 static void ticker_op_job_disable(uint32_t status, void *op_context)
807 {
808 ARG_UNUSED(status);
809 ARG_UNUSED(op_context);
810
811 /* FIXME: */
812 if (1 /* _radio.state != STATE_NONE */) {
813 mayfly_enable(TICKER_USER_ID_ULL_LOW,
814 TICKER_USER_ID_ULL_LOW, 0);
815 }
816 }
817 #endif
818
819 #endif /* CONFIG_BT_CTLR_LOW_LAT */
820