1 /*
2 * Copyright (c) 2018-2019 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <soc.h>
9 #include <zephyr/bluetooth/hci_types.h>
10 #include <zephyr/sys/byteorder.h>
11
12 #include "util/util.h"
13 #include "util/memq.h"
14 #include "util/mem.h"
15 #include "util/mayfly.h"
16 #include "util/dbuf.h"
17
18 #include "hal/cpu.h"
19 #include "hal/ccm.h"
20 #include "hal/radio.h"
21 #include "hal/ticker.h"
22
23 #include "ticker/ticker.h"
24
25 #include "pdu_df.h"
26 #include "lll/pdu_vendor.h"
27 #include "pdu.h"
28
29 #include "lll.h"
30 #include "lll_clock.h"
31 #include "lll/lll_vendor.h"
32 #include "lll/lll_adv_types.h"
33 #include "lll_adv.h"
34 #include "lll/lll_adv_pdu.h"
35 #include "lll_chan.h"
36 #include "lll/lll_df_types.h"
37 #include "lll_conn.h"
38 #include "lll_peripheral.h"
39 #include "lll_filter.h"
40 #include "lll_conn_iso.h"
41
42 #include "ll_sw/ull_tx_queue.h"
43
44 #include "ull_adv_types.h"
45 #include "ull_conn_types.h"
46 #include "ull_filter.h"
47
48 #include "ull_internal.h"
49 #include "ull_adv_internal.h"
50 #include "ull_conn_internal.h"
51 #include "ull_peripheral_internal.h"
52
53 #include "ll.h"
54
55 #include "ll_sw/isoal.h"
56 #include "ll_sw/ull_iso_types.h"
57 #include "ll_sw/ull_conn_iso_types.h"
58
59 #include "ll_sw/ull_llcp.h"
60
61 #include "hal/debug.h"
62
63 static void invalid_release(struct ull_hdr *hdr, struct lll_conn *lll,
64 memq_link_t *link, struct node_rx_pdu *rx);
65 static void ticker_op_stop_adv_cb(uint32_t status, void *param);
66 static void ticker_op_cb(uint32_t status, void *param);
67 static void ticker_update_latency_cancel_op_cb(uint32_t ticker_status,
68 void *param);
69
ull_periph_setup(struct node_rx_pdu * rx,struct node_rx_ftr * ftr,struct lll_conn * lll)70 void ull_periph_setup(struct node_rx_pdu *rx, struct node_rx_ftr *ftr,
71 struct lll_conn *lll)
72 {
73 uint32_t conn_offset_us, conn_interval_us;
74 uint8_t ticker_id_adv, ticker_id_conn;
75 uint8_t peer_id_addr[BDADDR_SIZE];
76 uint8_t peer_addr[BDADDR_SIZE];
77 uint32_t ticks_slot_overhead;
78 uint32_t ticks_slot_offset;
79 uint32_t ready_delay_us;
80 struct pdu_adv *pdu_adv;
81 struct ll_adv_set *adv;
82 uint32_t ticker_status;
83 uint8_t peer_addr_type;
84 uint32_t ticks_at_stop;
85 uint16_t win_delay_us;
86 struct node_rx_cc *cc;
87 struct ll_conn *conn;
88 uint16_t max_tx_time;
89 uint16_t max_rx_time;
90 uint16_t win_offset;
91 memq_link_t *link;
92 uint32_t slot_us;
93 uint8_t chan_sel;
94 void *node;
95
96 adv = ((struct lll_adv *)ftr->param)->hdr.parent;
97 conn = lll->hdr.parent;
98
99 /* Populate the peripheral context */
100 pdu_adv = (void *)rx->pdu;
101
102 peer_addr_type = pdu_adv->tx_addr;
103 memcpy(peer_addr, pdu_adv->connect_ind.init_addr, BDADDR_SIZE);
104
105 #if defined(CONFIG_BT_CTLR_PRIVACY)
106 uint8_t rl_idx = ftr->rl_idx;
107
108 if (rl_idx != FILTER_IDX_NONE) {
109 /* Get identity address */
110 ll_rl_id_addr_get(rl_idx, &peer_addr_type, peer_id_addr);
111 /* Mark it as identity address from RPA (0x02, 0x03) */
112 MARK_AS_IDENTITY_ADDR(peer_addr_type);
113 } else {
114 #else /* CONFIG_BT_CTLR_PRIVACY */
115 if (1) {
116 #endif /* CONFIG_BT_CTLR_PRIVACY */
117 memcpy(peer_id_addr, peer_addr, BDADDR_SIZE);
118 }
119
120 /* Use the link stored in the node rx to enqueue connection
121 * complete node rx towards LL context.
122 */
123 link = rx->hdr.link;
124
125 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
126 const uint8_t peer_id_addr_type = (peer_addr_type & 0x01);
127 const uint8_t own_id_addr_type = pdu_adv->rx_addr;
128 const uint8_t *own_id_addr = adv->own_id_addr;
129
130 /* Do not connect twice to the same peer */
131 if (ull_conn_peer_connected(own_id_addr_type, own_id_addr,
132 peer_id_addr_type, peer_id_addr)) {
133 invalid_release(&adv->ull, lll, link, rx);
134
135 return;
136 }
137
138 /* Remember peer and own identity address */
139 conn->peer_id_addr_type = peer_id_addr_type;
140 (void)memcpy(conn->peer_id_addr, peer_id_addr,
141 sizeof(conn->peer_id_addr));
142 conn->own_id_addr_type = own_id_addr_type;
143 (void)memcpy(conn->own_id_addr, own_id_addr,
144 sizeof(conn->own_id_addr));
145 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
146
147 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
148 /* Set default PAST parameters */
149 conn->past = ull_conn_default_past_param_get();
150 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
151
152 memcpy(&lll->crc_init[0], &pdu_adv->connect_ind.crc_init[0], 3);
153 memcpy(&lll->access_addr[0], &pdu_adv->connect_ind.access_addr[0], 4);
154 memcpy(&lll->data_chan_map[0], &pdu_adv->connect_ind.chan_map[0],
155 sizeof(lll->data_chan_map));
156 lll->data_chan_count = util_ones_count_get(&lll->data_chan_map[0],
157 sizeof(lll->data_chan_map));
158 lll->data_chan_hop = pdu_adv->connect_ind.hop;
159 lll->interval = sys_le16_to_cpu(pdu_adv->connect_ind.interval);
160 if ((lll->data_chan_count < CHM_USED_COUNT_MIN) ||
161 (lll->data_chan_hop < CHM_HOP_COUNT_MIN) ||
162 (lll->data_chan_hop > CHM_HOP_COUNT_MAX) ||
163 !IN_RANGE(lll->interval, BT_HCI_LE_INTERVAL_MIN, BT_HCI_LE_INTERVAL_MAX)) {
164 invalid_release(&adv->ull, lll, link, rx);
165
166 return;
167 }
168
169 ((struct lll_adv *)ftr->param)->conn = NULL;
170
171 lll->latency = sys_le16_to_cpu(pdu_adv->connect_ind.latency);
172
173 win_offset = sys_le16_to_cpu(pdu_adv->connect_ind.win_offset);
174 conn_interval_us = lll->interval * CONN_INT_UNIT_US;
175
176 /* transmitWindowDelay to default calculated connection offset:
177 * 1.25ms for a legacy PDU, 2.5ms for an LE Uncoded PHY and 3.75ms for
178 * an LE Coded PHY.
179 */
180 if (0) {
181 #if defined(CONFIG_BT_CTLR_ADV_EXT)
182 } else if (adv->lll.aux) {
183 if (adv->lll.phy_s & PHY_CODED) {
184 win_delay_us = WIN_DELAY_CODED;
185 } else {
186 win_delay_us = WIN_DELAY_UNCODED;
187 }
188 #endif
189 } else {
190 win_delay_us = WIN_DELAY_LEGACY;
191 }
192
193 /* Set LLCP as connection-wise connected */
194 ull_cp_state_set(conn, ULL_CP_CONNECTED);
195
196 /* calculate the window widening */
197 conn->periph.sca = pdu_adv->connect_ind.sca;
198 lll->periph.window_widening_periodic_us =
199 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
200 lll_clock_ppm_get(conn->periph.sca)) *
201 conn_interval_us), USEC_PER_SEC);
202 lll->periph.window_widening_max_us = (conn_interval_us >> 1) -
203 EVENT_IFS_US;
204 lll->periph.window_size_event_us = pdu_adv->connect_ind.win_size *
205 CONN_INT_UNIT_US;
206
207 /* procedure timeouts */
208 conn->supervision_timeout = sys_le16_to_cpu(pdu_adv->connect_ind.timeout);
209
210 /* Setup the PRT reload */
211 ull_cp_prt_reload_set(conn, conn_interval_us);
212
213 #if defined(CONFIG_BT_CTLR_CONN_ISO)
214 uint16_t conn_accept_timeout;
215
216 (void)ll_conn_iso_accept_timeout_get(&conn_accept_timeout);
217 conn->connect_accept_to = conn_accept_timeout * 625U;
218 #else
219 conn->connect_accept_to = DEFAULT_CONNECTION_ACCEPT_TIMEOUT_US;
220 #endif /* CONFIG_BT_CTLR_CONN_ISO */
221
222 #if defined(CONFIG_BT_CTLR_LE_PING)
223 /* APTO in no. of connection events */
224 conn->apto_reload = RADIO_CONN_EVENTS((30 * 1000 * 1000),
225 conn_interval_us);
226 /* Dispatch LE Ping PDU 6 connection events (that peer would
227 * listen to) before 30s timeout
228 * TODO: "peer listens to" is greater than 30s due to latency
229 */
230 conn->appto_reload = (conn->apto_reload > (lll->latency + 6)) ?
231 (conn->apto_reload - (lll->latency + 6)) :
232 conn->apto_reload;
233 #endif /* CONFIG_BT_CTLR_LE_PING */
234
235 #if defined(CONFIG_BT_CTLR_CONN_RANDOM_FORCE)
236 memcpy((void *)&conn->periph.force, &lll->access_addr[0],
237 sizeof(conn->periph.force));
238 #endif /* CONFIG_BT_CTLR_CONN_RANDOM_FORCE */
239
240 if (0) {
241 #if defined(CONFIG_BT_CTLR_ADV_EXT)
242 } else if (adv->lll.aux) {
243 chan_sel = 1U;
244 #endif
245 } else {
246 chan_sel = pdu_adv->chan_sel;
247 }
248
249 /* Check for pdu field being aligned before populating connection
250 * complete event.
251 */
252 node = pdu_adv;
253 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cc));
254
255 /* Populate the fields required for connection complete event */
256 cc = node;
257 cc->status = 0U;
258 cc->role = 1U;
259
260 #if defined(CONFIG_BT_CTLR_PRIVACY)
261 if (ull_filter_lll_lrpa_used(adv->lll.rl_idx)) {
262 memcpy(&cc->local_rpa[0], &pdu_adv->connect_ind.adv_addr[0],
263 BDADDR_SIZE);
264 } else {
265 memset(&cc->local_rpa[0], 0x0, BDADDR_SIZE);
266 }
267
268 if (rl_idx != FILTER_IDX_NONE) {
269 /* Store peer RPA */
270 memcpy(cc->peer_rpa, peer_addr, BDADDR_SIZE);
271 } else {
272 memset(cc->peer_rpa, 0x0, BDADDR_SIZE);
273 }
274 #endif /* CONFIG_BT_CTLR_PRIVACY */
275
276 cc->peer_addr_type = peer_addr_type;
277 memcpy(cc->peer_addr, peer_id_addr, BDADDR_SIZE);
278
279 cc->interval = lll->interval;
280 cc->latency = lll->latency;
281 cc->timeout = conn->supervision_timeout;
282 cc->sca = conn->periph.sca;
283
284 lll->handle = ll_conn_handle_get(conn);
285 rx->hdr.handle = lll->handle;
286
287 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
288 lll->tx_pwr_lvl = RADIO_TXP_DEFAULT;
289 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
290
291 /* Use Channel Selection Algorithm #2 if peer too supports it */
292 if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
293 struct node_rx_pdu *rx_csa;
294 struct node_rx_cs *cs;
295
296 /* pick the rx node instance stored within the connection
297 * rx node.
298 */
299 rx_csa = (void *)ftr->extra;
300
301 /* Enqueue the connection event */
302 ll_rx_put(link, rx);
303
304 /* use the rx node for CSA event */
305 rx = rx_csa;
306 link = rx->hdr.link;
307
308 rx->hdr.handle = lll->handle;
309 rx->hdr.type = NODE_RX_TYPE_CHAN_SEL_ALGO;
310
311 cs = (void *)rx_csa->pdu;
312
313 if (chan_sel) {
314 lll->data_chan_sel = 1;
315 lll->data_chan_id = lll_chan_id(lll->access_addr);
316
317 cs->csa = 0x01;
318 } else {
319 cs->csa = 0x00;
320 }
321 }
322
323 #if defined(CONFIG_BT_CTLR_ADV_EXT)
324 if (ll_adv_cmds_is_ext()) {
325 uint8_t handle;
326
327 /* Enqueue connection or CSA event */
328 ll_rx_put(link, rx);
329
330 /* use reserved link and node_rx to prepare
331 * advertising terminate event
332 */
333 rx = adv->lll.node_rx_adv_term;
334 link = rx->hdr.link;
335
336 handle = ull_adv_handle_get(adv);
337 LL_ASSERT(handle < BT_CTLR_ADV_SET);
338
339 rx->hdr.type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
340 rx->hdr.handle = handle;
341 rx->rx_ftr.param_adv_term.status = 0U;
342 rx->rx_ftr.param_adv_term.conn_handle = lll->handle;
343 rx->rx_ftr.param_adv_term.num_events = 0U;
344 }
345 #endif
346
347 ll_rx_put_sched(link, rx);
348
349 #if defined(CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX)
350 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
351 #if defined(CONFIG_BT_CTLR_PHY)
352 max_tx_time = lll->dle.eff.max_tx_time;
353 max_rx_time = lll->dle.eff.max_rx_time;
354
355 #else /* !CONFIG_BT_CTLR_PHY */
356 max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
357 max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
358 #endif /* !CONFIG_BT_CTLR_PHY */
359
360 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
361 max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
362 max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
363
364 #if defined(CONFIG_BT_CTLR_PHY)
365 max_tx_time = MAX(max_tx_time,
366 PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_tx));
367 max_rx_time = MAX(max_rx_time,
368 PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_rx));
369 #endif /* CONFIG_BT_CTLR_PHY */
370 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
371
372 #else /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
373 #if defined(CONFIG_BT_CTLR_PHY)
374 max_tx_time = PDU_MAX_US(0U, 0U, lll->phy_tx);
375 max_rx_time = PDU_MAX_US(0U, 0U, lll->phy_rx);
376
377 #else /* !CONFIG_BT_CTLR_PHY */
378 max_tx_time = PDU_MAX_US(0U, 0U, PHY_1M);
379 max_rx_time = PDU_MAX_US(0U, 0U, PHY_1M);
380 #endif /* !CONFIG_BT_CTLR_PHY */
381 #endif /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
382
383 #if defined(CONFIG_BT_CTLR_PHY)
384 ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy_rx, PHY_FLAGS_S8);
385 #else /* CONFIG_BT_CTLR_PHY */
386 ready_delay_us = lll_radio_rx_ready_delay_get(0U, 0U);
387 #endif /* CONFIG_BT_CTLR_PHY */
388
389 lll->tifs_tx_us = EVENT_IFS_DEFAULT_US;
390 lll->tifs_rx_us = EVENT_IFS_DEFAULT_US;
391 lll->tifs_hcto_us = EVENT_IFS_DEFAULT_US;
392 lll->tifs_cis_us = EVENT_IFS_DEFAULT_US;
393
394 /* Calculate event time reservation */
395 slot_us = max_rx_time + max_tx_time;
396 slot_us += lll->tifs_rx_us + (EVENT_CLOCK_JITTER_US << 1);
397 slot_us += ready_delay_us;
398
399 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
400 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
401 }
402
403 conn->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
404
405 ticks_slot_offset = HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
406 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
407 ticks_slot_overhead = ticks_slot_offset;
408 } else {
409 ticks_slot_overhead = 0U;
410 }
411 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
412
413 conn_interval_us -= lll->periph.window_widening_periodic_us;
414
415 conn_offset_us = ftr->radio_end_us;
416 conn_offset_us += win_offset * CONN_INT_UNIT_US;
417 conn_offset_us += win_delay_us;
418 conn_offset_us -= EVENT_TICKER_RES_MARGIN_US;
419 conn_offset_us -= EVENT_JITTER_US;
420 conn_offset_us -= ready_delay_us;
421 /*
422 * NOTE: Correct window widening for the first connection will be:
423 *
424 * conn_offset_us -=
425 * DIV_ROUND_UP(((lll_clock_ppm_local_get() +
426 * lll_clock_ppm_get(conn->periph.sca)) *
427 * (win_offset * CONN_INT_UNIT_US + win_delay_us)), USEC_PER_SEC);
428 *
429 * But, as currently in the implementation the drift compensation uses the
430 * `lll->periph.window_widening_periodic_us` value, we may as well use that value here
431 * as well. Adding another value for LLL to use seems overkill for this one case.
432 */
433 conn_offset_us -= lll->periph.window_widening_periodic_us;
434
435 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
436 /* disable ticker job, in order to chain stop and start to avoid RTC
437 * being stopped if no tickers active.
438 */
439 mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
440 #endif
441
442 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
443 struct lll_adv_aux *lll_aux = adv->lll.aux;
444
445 if (lll_aux) {
446 struct ll_adv_aux_set *aux;
447
448 aux = HDR_LLL2ULL(lll_aux);
449
450 ticker_id_adv = TICKER_ID_ADV_AUX_BASE +
451 ull_adv_aux_handle_get(aux);
452 ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
453 TICKER_USER_ID_ULL_HIGH,
454 ticker_id_adv,
455 ticker_op_stop_adv_cb, aux);
456 ticker_op_stop_adv_cb(ticker_status, aux);
457
458 aux->is_started = 0U;
459 }
460 #endif
461
462 /* Stop Advertiser */
463 ticker_id_adv = TICKER_ID_ADV_BASE + ull_adv_handle_get(adv);
464 ticks_at_stop = ftr->ticks_anchor +
465 HAL_TICKER_US_TO_TICKS(conn_offset_us) -
466 ticks_slot_offset;
467 ticker_status = ticker_stop_abs(TICKER_INSTANCE_ID_CTLR,
468 TICKER_USER_ID_ULL_HIGH,
469 ticker_id_adv, ticks_at_stop,
470 ticker_op_stop_adv_cb, adv);
471 ticker_op_stop_adv_cb(ticker_status, adv);
472
473 /* Stop Direct Adv Stop */
474 if (adv->lll.is_hdcd) {
475 /* Advertiser stop can expire while here in this ISR.
476 * Deferred attempt to stop can fail as it would have
477 * expired, hence ignore failure.
478 */
479 (void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
480 TICKER_ID_ADV_STOP, NULL, NULL);
481 }
482
483 /* Start Peripheral */
484 ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
485 ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
486 TICKER_USER_ID_ULL_HIGH,
487 ticker_id_conn,
488 ftr->ticks_anchor - ticks_slot_offset,
489 HAL_TICKER_US_TO_TICKS(conn_offset_us),
490 HAL_TICKER_US_TO_TICKS(conn_interval_us),
491 HAL_TICKER_REMAINDER(conn_interval_us),
492 TICKER_NULL_LAZY,
493 (conn->ull.ticks_slot +
494 ticks_slot_overhead),
495 ull_periph_ticker_cb, conn, ticker_op_cb,
496 (void *)__LINE__);
497 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
498 (ticker_status == TICKER_STATUS_BUSY));
499
500 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
501 /* enable ticker job, irrespective of disabled in this function so
502 * first connection event can be scheduled as soon as possible.
503 */
504 mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1);
505 #endif
506 }
507
508 void ull_periph_latency_cancel(struct ll_conn *conn, uint16_t handle)
509 {
510 /* break peripheral latency */
511 if (conn->lll.latency_event && !conn->periph.latency_cancel) {
512 uint32_t ticker_status;
513
514 conn->periph.latency_cancel = 1U;
515
516 ticker_status =
517 ticker_update(TICKER_INSTANCE_ID_CTLR,
518 TICKER_USER_ID_THREAD,
519 (TICKER_ID_CONN_BASE + handle),
520 0, 0, 0, 0, 1, 0,
521 ticker_update_latency_cancel_op_cb,
522 (void *)conn);
523 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
524 (ticker_status == TICKER_STATUS_BUSY));
525 }
526 }
527
528 void ull_periph_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
529 uint32_t remainder, uint16_t lazy, uint8_t force,
530 void *param)
531 {
532 static memq_link_t link;
533 static struct mayfly mfy = {0, 0, &link, NULL, lll_periph_prepare};
534 static struct lll_prepare_param p;
535 struct ll_conn *conn;
536 uint32_t err;
537 uint8_t ref;
538
539 DEBUG_RADIO_PREPARE_S(1);
540
541 conn = param;
542
543 /* Check if stopping ticker (on disconnection, race with ticker expiry)
544 */
545 if (unlikely(conn->lll.handle == 0xFFFF)) {
546 DEBUG_RADIO_CLOSE_S(0);
547 return;
548 }
549
550 #if defined(CONFIG_BT_CTLR_CONN_META)
551 conn->common.is_must_expire = (lazy == TICKER_LAZY_MUST_EXPIRE);
552 #endif
553 /* If this is a must-expire callback, LLCP state machine does not need
554 * to know. Will be called with lazy > 0 when scheduled in air.
555 */
556 if (!IS_ENABLED(CONFIG_BT_CTLR_CONN_META) ||
557 (lazy != TICKER_LAZY_MUST_EXPIRE)) {
558 int ret;
559
560 /* Handle any LL Control Procedures */
561 ret = ull_conn_llcp(conn, ticks_at_expire, remainder, lazy);
562 if (ret) {
563 /* NOTE: Under BT_CTLR_LOW_LAT, ULL_LOW context is
564 * disabled inside radio events, hence, abort any
565 * active radio event which will re-enable
566 * ULL_LOW context that permits ticker job to run.
567 */
568 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
569 (CONFIG_BT_CTLR_LLL_PRIO ==
570 CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
571 ll_radio_state_abort();
572 }
573
574 DEBUG_RADIO_CLOSE_S(0);
575 return;
576 }
577 }
578
579 /* Increment prepare reference count */
580 ref = ull_ref_inc(&conn->ull);
581 LL_ASSERT(ref);
582
583 /* Append timing parameters */
584 p.ticks_at_expire = ticks_at_expire;
585 p.remainder = remainder;
586 p.lazy = lazy;
587 p.force = force;
588 p.param = &conn->lll;
589 mfy.param = &p;
590
591 /* Kick LLL prepare */
592 err = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
593 0, &mfy);
594 LL_ASSERT(!err);
595
596 /* De-mux remaining tx nodes from FIFO */
597 ull_conn_tx_demux(UINT8_MAX);
598
599 /* Enqueue towards LLL */
600 ull_conn_tx_lll_enqueue(conn, UINT8_MAX);
601
602 DEBUG_RADIO_PREPARE_S(1);
603 }
604
605 #if defined(CONFIG_BT_CTLR_LE_ENC)
606 uint8_t ll_start_enc_req_send(uint16_t handle, uint8_t error_code,
607 uint8_t const *const ltk)
608 {
609 struct ll_conn *conn;
610
611 conn = ll_connected_get(handle);
612 if (!conn) {
613 return BT_HCI_ERR_UNKNOWN_CONN_ID;
614 }
615
616 if (error_code) {
617 return ull_cp_ltk_req_neq_reply(conn);
618 } else {
619 return ull_cp_ltk_req_reply(conn, ltk);
620 }
621
622 return 0;
623 }
624 #endif /* CONFIG_BT_CTLR_LE_ENC */
625
626 static void invalid_release(struct ull_hdr *hdr, struct lll_conn *lll,
627 memq_link_t *link, struct node_rx_pdu *rx)
628 {
629 /* Reset the advertising disabled callback */
630 hdr->disabled_cb = NULL;
631
632 /* Let the advertiser continue with connectable advertising */
633 lll->periph.initiated = 0U;
634
635 /* Mark for buffer for release */
636 rx->hdr.type = NODE_RX_TYPE_RELEASE;
637
638 /* Release CSA#2 related node rx too */
639 if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
640 struct node_rx_pdu *rx_csa;
641
642 /* pick the rx node instance stored within the
643 * connection rx node.
644 */
645 rx_csa = rx->rx_ftr.extra;
646
647 /* Enqueue the connection event to be release */
648 ll_rx_put(link, rx);
649
650 /* Use the rx node for CSA event */
651 rx = rx_csa;
652 link = rx->hdr.link;
653
654 /* Mark for buffer for release */
655 rx->hdr.type = NODE_RX_TYPE_RELEASE;
656 }
657
658 /* Enqueue connection or CSA event to be release */
659 ll_rx_put_sched(link, rx);
660 }
661
662 static void ticker_op_stop_adv_cb(uint32_t status, void *param)
663 {
664 LL_ASSERT(status != TICKER_STATUS_FAILURE ||
665 param == ull_disable_mark_get());
666 }
667
668 static void ticker_op_cb(uint32_t status, void *param)
669 {
670 ARG_UNUSED(param);
671
672 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
673 }
674
675 static void ticker_update_latency_cancel_op_cb(uint32_t ticker_status,
676 void *param)
677 {
678 struct ll_conn *conn = param;
679
680 LL_ASSERT(ticker_status == TICKER_STATUS_SUCCESS);
681
682 conn->periph.latency_cancel = 0U;
683 }
684
685 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
686 uint8_t ll_set_min_used_chans(uint16_t handle, uint8_t const phys,
687 uint8_t const min_used_chans)
688 {
689 struct ll_conn *conn;
690
691 conn = ll_connected_get(handle);
692 if (!conn) {
693 return BT_HCI_ERR_UNKNOWN_CONN_ID;
694 }
695
696 if (!conn->lll.role) {
697 return BT_HCI_ERR_CMD_DISALLOWED;
698 }
699
700 return ull_cp_min_used_chans(conn, phys, min_used_chans);
701 }
702 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
703