1 /*
2 * Copyright (c) 2020-2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdlib.h>
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/sys/byteorder.h>
11 #include <zephyr/bluetooth/hci_types.h>
12
13 #include "util/util.h"
14 #include "util/mem.h"
15 #include "util/memq.h"
16 #include "util/mayfly.h"
17 #include "util/dbuf.h"
18
19 #include "hal/cpu.h"
20 #include "hal/ccm.h"
21 #include "hal/radio.h"
22 #include "hal/ticker.h"
23
24 #include "ticker/ticker.h"
25
26 #include "pdu_df.h"
27 #include "lll/pdu_vendor.h"
28 #include "pdu.h"
29
30 #include "lll.h"
31 #include "lll/lll_adv_types.h"
32 #include "lll_adv.h"
33 #include "lll/lll_adv_pdu.h"
34 #include "lll_clock.h"
35 #include "lll/lll_vendor.h"
36 #include "lll_chan.h"
37 #include "lll_scan.h"
38 #include "lll/lll_df_types.h"
39 #include "lll_conn.h"
40 #include "lll_conn_iso.h"
41 #include "lll_sync.h"
42 #include "lll_sync_iso.h"
43
44 #include "isoal.h"
45
46 #include "ull_tx_queue.h"
47
48 #include "ull_filter.h"
49 #include "ull_iso_types.h"
50 #include "ull_scan_types.h"
51 #include "ull_sync_types.h"
52 #include "ull_conn_types.h"
53 #include "ull_adv_types.h"
54 #include "ull_conn_iso_types.h"
55
56 #include "ull_internal.h"
57 #include "ull_adv_internal.h"
58 #include "ull_scan_internal.h"
59 #include "ull_sync_internal.h"
60 #include "ull_conn_internal.h"
61 #include "ull_conn_iso_internal.h"
62 #include "ull_df_types.h"
63 #include "ull_df_internal.h"
64
65 #include "ull_llcp.h"
66 #include "ll.h"
67
68 #include <soc.h>
69 #include "hal/debug.h"
70
71 /* Check that timeout_reload member is at safe offset when ll_sync_set is
72 * allocated using mem interface. timeout_reload being non-zero is used to
73 * indicate that a sync is established. And is used to check for sync being
74 * terminated under race conditions between HCI Tx and Rx thread when
75 * Periodic Advertising Reports are generated.
76 */
77 MEM_FREE_MEMBER_ACCESS_BUILD_ASSERT(struct ll_sync_set, timeout_reload);
78
79 static struct ll_sync_set *ull_sync_create(uint8_t sid, uint16_t timeout, uint16_t skip,
80 uint8_t cte_type, uint8_t rx_enable, uint8_t nodups);
81 static int init_reset(void);
82 static inline struct ll_sync_set *sync_acquire(void);
83 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb);
84 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
85 uint32_t remainder, uint16_t lazy, uint8_t force,
86 void *param);
87 static void ticker_start_op_cb(uint32_t status, void *param);
88 static void ticker_update_op_cb(uint32_t status, void *param);
89 static void ticker_stop_sync_expire_op_cb(uint32_t status, void *param);
90 static void sync_expire(void *param);
91 static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param);
92 static void sync_lost(void *param);
93 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
94 static bool peer_sid_sync_exists(uint8_t const peer_id_addr_type,
95 uint8_t const *const peer_id_addr,
96 uint8_t sid);
97 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
98 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
99 !defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
100 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu);
101 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
102
103 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
104 static void ticker_update_op_status_give(uint32_t status, void *param);
105 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
106
107 static struct ll_sync_set ll_sync_pool[CONFIG_BT_PER_ADV_SYNC_MAX];
108 static void *sync_free;
109
110 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
111 /* Semaphore to wakeup thread on ticker API callback */
112 static struct k_sem sem_ticker_cb;
113 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
114
ll_sync_create(uint8_t options,uint8_t sid,uint8_t adv_addr_type,uint8_t * adv_addr,uint16_t skip,uint16_t sync_timeout,uint8_t sync_cte_type)115 uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
116 uint8_t *adv_addr, uint16_t skip,
117 uint16_t sync_timeout, uint8_t sync_cte_type)
118 {
119 struct ll_scan_set *scan_coded;
120 struct ll_scan_set *scan;
121 struct ll_sync_set *sync;
122 uint8_t rx_enable;
123 uint8_t nodups;
124
125 scan = ull_scan_set_get(SCAN_HANDLE_1M);
126 if (!scan || scan->periodic.sync) {
127 return BT_HCI_ERR_CMD_DISALLOWED;
128 }
129
130 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
131 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
132 if (!scan_coded || scan_coded->periodic.sync) {
133 return BT_HCI_ERR_CMD_DISALLOWED;
134 }
135 }
136
137 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
138 /* Do not sync twice to the same peer and same SID */
139 if (((options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST) == 0U) &&
140 peer_sid_sync_exists(adv_addr_type, adv_addr, sid)) {
141 return BT_HCI_ERR_CONN_ALREADY_EXISTS;
142 }
143 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
144
145 rx_enable = !(options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED);
146 nodups = (options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) ? 1U : 0U;
147
148 sync = ull_sync_create(sid, sync_timeout, skip, sync_cte_type, rx_enable, nodups);
149 if (!sync) {
150 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
151 }
152
153 scan->periodic.cancelled = 0U;
154 scan->periodic.state = LL_SYNC_STATE_IDLE;
155 scan->periodic.param = NULL;
156 scan->periodic.filter_policy =
157 options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST;
158 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
159 scan_coded->periodic.cancelled = 0U;
160 scan_coded->periodic.state = LL_SYNC_STATE_IDLE;
161 scan_coded->periodic.param = NULL;
162 scan_coded->periodic.filter_policy =
163 scan->periodic.filter_policy;
164 }
165
166 if (!scan->periodic.filter_policy) {
167 sync->peer_id_addr_type = adv_addr_type;
168 (void)memcpy(sync->peer_id_addr, adv_addr, BDADDR_SIZE);
169 }
170
171 /* Remember the peer address when periodic advertiser list is not
172 * used.
173 * NOTE: Peer address will be filled/overwritten with correct identity
174 * address on sync setup when privacy is enabled.
175 */
176 if ((options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST) == 0U) {
177 sync->peer_id_addr_type = adv_addr_type;
178 (void)memcpy(sync->peer_id_addr, adv_addr,
179 sizeof(sync->peer_id_addr));
180 }
181
182 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
183 /* Set filter policy in lll_sync */
184 sync->lll.filter_policy = scan->periodic.filter_policy;
185 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
186
187 /* Enable scanner to create sync */
188 scan->periodic.sync = sync;
189
190 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
191 scan->lll.is_sync = 1U;
192 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
193 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
194 scan_coded->periodic.sync = sync;
195
196 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
197 scan_coded->lll.is_sync = 1U;
198 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
199 }
200
201 return 0;
202 }
203
204 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
ull_sync_setup_from_sync_transfer(struct ll_conn * conn,uint16_t service_data,struct ll_sync_set * sync,struct pdu_adv_sync_info * si,int16_t conn_evt_offset,uint16_t last_pa_event_counter,uint16_t sync_conn_event_count,uint8_t sender_sca)205 void ull_sync_setup_from_sync_transfer(struct ll_conn *conn, uint16_t service_data,
206 struct ll_sync_set *sync, struct pdu_adv_sync_info *si,
207 int16_t conn_evt_offset, uint16_t last_pa_event_counter,
208 uint16_t sync_conn_event_count, uint8_t sender_sca)
209 {
210 struct node_rx_past_received *se_past;
211 uint32_t ticks_slot_overhead;
212 uint32_t ticks_slot_offset;
213 uint32_t conn_interval_us;
214 uint32_t sync_offset_us;
215 uint32_t ready_delay_us;
216 struct node_rx_pdu *rx;
217 uint8_t *data_chan_map;
218 struct lll_sync *lll;
219 uint32_t interval_us;
220 uint32_t slot_us;
221 uint32_t ticks_anchor;
222 uint8_t chm_last;
223 uint32_t ret;
224 uint16_t interval;
225 uint16_t sync_handle;
226 uint8_t sca;
227
228 lll = &sync->lll;
229
230 /* Copy channel map from sca_chm field in sync_info structure, and
231 * clear the SCA bits.
232 */
233 chm_last = lll->chm_first;
234 lll->chm_last = chm_last;
235 data_chan_map = lll->chm[chm_last].data_chan_map;
236 (void)memcpy(data_chan_map, si->sca_chm,
237 sizeof(lll->chm[chm_last].data_chan_map));
238 data_chan_map[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
239 ~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
240 lll->chm[chm_last].data_chan_count =
241 util_ones_count_get(data_chan_map,
242 sizeof(lll->chm[chm_last].data_chan_map));
243 if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
244 /* Ignore sync setup, invalid available channel count */
245 return;
246 }
247
248 memcpy(lll->access_addr, si->aa, sizeof(lll->access_addr));
249 lll->data_chan_id = lll_chan_id(lll->access_addr);
250 memcpy(lll->crc_init, si->crc_init, sizeof(lll->crc_init));
251 lll->event_counter = sys_le16_to_cpu(si->evt_cntr);
252
253 interval = sys_le16_to_cpu(si->interval);
254 interval_us = interval * PERIODIC_INT_UNIT_US;
255
256 /* Convert fromm 10ms units to interval units */
257 if (sync->timeout != 0 && interval_us != 0) {
258 sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U *
259 USEC_PER_MSEC), interval_us);
260 }
261
262 /* Adjust Skip value so that there is minimum of 6 events that can be
263 * listened to before Sync_Timeout occurs.
264 * The adjustment of the skip value is controller implementation
265 * specific and not specified by the Bluetooth Core Specification v5.3.
266 * The Controller `may` use the Skip value, and the implementation here
267 * covers a case where Skip value could lead to less events being
268 * listened to until Sync_Timeout. Listening to more consecutive events
269 * before Sync_Timeout increases probability of retaining the Periodic
270 * Synchronization.
271 */
272 if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) {
273 uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN;
274
275 if (sync->skip > skip_max) {
276 sync->skip = skip_max;
277 }
278 }
279
280 sync->sync_expire = CONN_ESTAB_COUNTDOWN;
281
282 /* Extract the SCA value from the sca_chm field of the sync_info
283 * structure.
284 */
285 sca = (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
286 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
287 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS;
288
289 lll->sca = sca;
290
291 lll->window_widening_periodic_us =
292 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
293 lll_clock_ppm_get(sca)) *
294 interval_us), USEC_PER_SEC);
295 lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
296 if (PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si)) {
297 lll->window_size_event_us = OFFS_UNIT_300_US;
298 } else {
299 lll->window_size_event_us = OFFS_UNIT_30_US;
300 }
301
302 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
303 lll->node_cte_incomplete = NULL;
304 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
305
306 /* Prepare Periodic Advertising Sync Transfer Received event (dispatched later) */
307 sync_handle = ull_sync_handle_get(sync);
308 rx = (void *)sync->node_rx_sync_estab;
309 rx->hdr.type = NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED;
310 rx->hdr.handle = sync_handle;
311 rx->rx_ftr.param = sync;
312
313 /* Create node_rx and assign values */
314 se_past = (void *)rx->pdu;
315 se_past->rx_sync.status = BT_HCI_ERR_SUCCESS;
316 se_past->rx_sync.interval = interval;
317 se_past->rx_sync.phy = sync->lll.phy;
318 se_past->rx_sync.sca = sca;
319 se_past->conn_handle = ll_conn_handle_get(conn);
320 se_past->service_data = service_data;
321
322 conn_interval_us = conn->lll.interval * CONN_INT_UNIT_US;
323
324 /* Calculate offset and schedule sync radio events */
325 ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
326
327 sync_offset_us = PDU_ADV_SYNC_INFO_OFFSET_GET(si) * lll->window_size_event_us;
328 /* offs_adjust may be 1 only if sync setup by LL_PERIODIC_SYNC_IND */
329 sync_offset_us += (PDU_ADV_SYNC_INFO_OFFS_ADJUST_GET(si) ? OFFS_ADJUST_US : 0U);
330 sync_offset_us -= EVENT_TICKER_RES_MARGIN_US;
331 sync_offset_us -= EVENT_JITTER_US;
332 sync_offset_us -= ready_delay_us;
333
334 if (conn_evt_offset) {
335 int64_t conn_offset_us = (int64_t)conn_evt_offset * conn_interval_us;
336
337 if ((int64_t)sync_offset_us + conn_offset_us < 0) {
338 uint32_t total_offset_us = llabs((int64_t)sync_offset_us + conn_offset_us);
339 uint32_t sync_intervals = DIV_ROUND_UP(total_offset_us, interval_us);
340
341 lll->event_counter += sync_intervals;
342 sync_offset_us = (sync_intervals * interval_us) - total_offset_us;
343 } else {
344 sync_offset_us += conn_offset_us;
345 }
346 }
347
348 /* Calculate initial window widening - see Core Spec vol 6, part B, 5.1.13.1 */
349 {
350 uint16_t event_delta;
351 uint32_t drift_us;
352 uint64_t da;
353 uint64_t db;
354 uint64_t d;
355
356 const uint32_t local_sca_ppm = lll_clock_ppm_local_get();
357
358 event_delta = lll->event_counter - last_pa_event_counter;
359
360 da = (uint64_t)(local_sca_ppm + lll_clock_ppm_get(sca)) * interval_us;
361 da = DIV_ROUND_UP(da * (uint64_t)event_delta, USEC_PER_SEC);
362
363 db = (uint64_t)(local_sca_ppm + lll_clock_ppm_get(sender_sca)) * conn_interval_us;
364 db = DIV_ROUND_UP(db * (uint64_t)(ull_conn_event_counter(conn) -
365 sync_conn_event_count), USEC_PER_SEC);
366
367 d = DIV_ROUND_UP((da + db) * (USEC_PER_SEC + local_sca_ppm +
368 lll_clock_ppm_get(sca) +
369 lll_clock_ppm_get(sender_sca)), USEC_PER_SEC);
370
371 /* Limit drift compenstion to the maximum window widening */
372 drift_us = MIN((uint32_t)d, lll->window_widening_max_us);
373
374 /* Apply total drift to initial window size */
375 lll->window_size_event_us += drift_us;
376
377 /* Adjust offset if less than the drift compensation */
378 while (sync_offset_us < drift_us) {
379 sync_offset_us += interval_us;
380 lll->event_counter++;
381 }
382
383 sync_offset_us -= drift_us;
384 }
385
386 interval_us -= lll->window_widening_periodic_us;
387
388 /* Calculate event time reservation */
389 slot_us = PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll->phy);
390 slot_us += ready_delay_us;
391
392 /* Add implementation defined radio event overheads */
393 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
394 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
395 }
396
397 sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
398
399 ticks_slot_offset = HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
400 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
401 ticks_slot_overhead = ticks_slot_offset;
402 } else {
403 ticks_slot_overhead = 0U;
404 }
405 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
406
407 sync->lll_sync_prepare = lll_sync_create_prepare;
408
409 ticks_anchor = conn->llcp.prep.ticks_at_expire;
410
411 #if defined(CONFIG_BT_PERIPHERAL)
412 if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
413 /* Compensate for window widening */
414 ticks_anchor += HAL_TICKER_US_TO_TICKS(conn->lll.periph.window_widening_event_us);
415 }
416 #endif /* CONFIG_BT_PERIPHERAL */
417
418 ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
419 (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
420 ticks_anchor,
421 HAL_TICKER_US_TO_TICKS(sync_offset_us),
422 HAL_TICKER_US_TO_TICKS(interval_us),
423 HAL_TICKER_REMAINDER(interval_us),
424 TICKER_NULL_LAZY,
425 (sync->ull.ticks_slot + ticks_slot_overhead),
426 ticker_cb, sync,
427 ticker_start_op_cb, (void *)__LINE__);
428 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
429 (ret == TICKER_STATUS_BUSY));
430 }
431 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
432
433
ll_sync_create_cancel(void ** rx)434 uint8_t ll_sync_create_cancel(void **rx)
435 {
436 struct ll_scan_set *scan_coded;
437 memq_link_t *link_sync_estab;
438 memq_link_t *link_sync_lost;
439 struct node_rx_pdu *node_rx;
440 struct ll_scan_set *scan;
441 struct ll_sync_set *sync;
442 struct node_rx_sync *se;
443
444 scan = ull_scan_set_get(SCAN_HANDLE_1M);
445 if (!scan || !scan->periodic.sync) {
446 return BT_HCI_ERR_CMD_DISALLOWED;
447 }
448
449 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
450 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
451 if (!scan_coded || !scan_coded->periodic.sync) {
452 return BT_HCI_ERR_CMD_DISALLOWED;
453 }
454 }
455
456 /* Check for race condition where in sync is established when sync
457 * create cancel is invoked.
458 *
459 * Setting `scan->periodic.cancelled` to represent cancellation
460 * requested in the thread context. Checking `scan->periodic.sync` for
461 * NULL confirms if synchronization was established before
462 * `scan->periodic.cancelled` was set to 1U.
463 */
464 scan->periodic.cancelled = 1U;
465 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
466 scan_coded->periodic.cancelled = 1U;
467 }
468 cpu_dmb();
469 sync = scan->periodic.sync;
470 if (!sync) {
471 return BT_HCI_ERR_CMD_DISALLOWED;
472 }
473
474 /* node_rx_sync_estab is assigned when Host calls create sync and cleared when sync is
475 * established. timeout_reload is set when sync is found and setup. It is non-zero until
476 * sync is terminated. Together they give information about current sync state:
477 * - node_rx_sync_estab == NULL && timeout_reload != 0 => sync is established
478 * - node_rx_sync_estab == NULL && timeout_reload == 0 => sync is terminated
479 * - node_rx_sync_estab != NULL && timeout_reload == 0 => sync is created
480 * - node_rx_sync_estab != NULL && timeout_reload != 0 => sync is waiting to be established
481 */
482 if (!sync->node_rx_sync_estab) {
483 /* There is no sync to be cancelled */
484 return BT_HCI_ERR_CMD_DISALLOWED;
485 }
486
487 sync->is_stop = 1U;
488 cpu_dmb();
489
490 if (sync->timeout_reload != 0U) {
491 uint16_t sync_handle = ull_sync_handle_get(sync);
492
493 LL_ASSERT(sync_handle <= UINT8_MAX);
494
495 /* Sync is not established yet, so stop sync ticker */
496 const int err =
497 ull_ticker_stop_with_mark((TICKER_ID_SCAN_SYNC_BASE +
498 (uint8_t)sync_handle),
499 sync, &sync->lll);
500 if (err != 0 && err != -EALREADY) {
501 return BT_HCI_ERR_CMD_DISALLOWED;
502 }
503 } /* else: sync was created but not yet setup, there is no sync ticker yet. */
504
505 /* It is safe to remove association with scanner as cancelled flag is
506 * set, sync is_stop flag was set and sync has not been established.
507 */
508 ull_sync_setup_reset(sync);
509
510 /* Mark the sync context as sync create cancelled */
511 if (IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)) {
512 sync->timeout = 0U;
513 }
514
515 node_rx = sync->node_rx_sync_estab;
516 link_sync_estab = node_rx->hdr.link;
517 link_sync_lost = sync->node_rx_lost.rx.hdr.link;
518
519 ll_rx_link_release(link_sync_lost);
520 ll_rx_link_release(link_sync_estab);
521 ll_rx_release(node_rx);
522
523 /* Clear the node after release to mark the sync establish as being completed.
524 * In this case the completion reason is sync cancelled by Host.
525 */
526 sync->node_rx_sync_estab = NULL;
527
528 node_rx = (void *)&sync->node_rx_lost;
529 node_rx->hdr.type = NODE_RX_TYPE_SYNC;
530 node_rx->hdr.handle = LLL_HANDLE_INVALID;
531
532 /* NOTE: struct node_rx_lost has uint8_t member following the
533 * struct node_rx_hdr to store the reason.
534 */
535 se = (void *)node_rx->pdu;
536 se->status = BT_HCI_ERR_OP_CANCELLED_BY_HOST;
537
538 /* NOTE: Since NODE_RX_TYPE_SYNC is only generated from ULL context,
539 * pass ULL sync context as parameter.
540 */
541 node_rx->rx_ftr.param = sync;
542
543 *rx = node_rx;
544
545 return 0;
546 }
547
ll_sync_terminate(uint16_t handle)548 uint8_t ll_sync_terminate(uint16_t handle)
549 {
550 struct lll_scan_aux *lll_aux;
551 memq_link_t *link_sync_lost;
552 struct ll_sync_set *sync;
553 int err;
554
555 sync = ull_sync_is_enabled_get(handle);
556 if (!sync) {
557 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
558 }
559
560 /* Request terminate, no new ULL scheduling to be setup */
561 sync->is_stop = 1U;
562 cpu_dmb();
563
564 /* Stop periodic sync ticker timeouts */
565 err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_SYNC_BASE + handle,
566 sync, &sync->lll);
567 LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
568 if (err) {
569 return BT_HCI_ERR_CMD_DISALLOWED;
570 }
571
572 /* Check and stop any auxiliary PDU receptions */
573 lll_aux = sync->lll.lll_aux;
574 if (lll_aux) {
575 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
576 err = ull_scan_aux_stop(&sync->lll);
577 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
578 struct ll_scan_aux_set *aux;
579
580 aux = HDR_LLL2ULL(lll_aux);
581 err = ull_scan_aux_stop(aux);
582 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
583 if (err && (err != -EALREADY)) {
584 return BT_HCI_ERR_CMD_DISALLOWED;
585 }
586
587 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
588 LL_ASSERT(!aux->parent);
589 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
590 }
591
592 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
593 /* Clean up node_rx_sync_estab if still present */
594 if (sync->node_rx_sync_estab) {
595 memq_link_t *link_sync_estab;
596 struct node_rx_pdu *node_rx;
597
598 node_rx = (void *)sync->node_rx_sync_estab;
599 link_sync_estab = node_rx->hdr.link;
600
601 ll_rx_link_release(link_sync_estab);
602 ll_rx_release(node_rx);
603
604 sync->node_rx_sync_estab = NULL;
605 }
606 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
607
608 link_sync_lost = sync->node_rx_lost.rx.hdr.link;
609 ll_rx_link_release(link_sync_lost);
610
611 /* Mark sync context not sync established */
612 sync->timeout_reload = 0U;
613
614 ull_sync_release(sync);
615
616 return 0;
617 }
618
619 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
620 * Advertising Receive Enable command.
621 *
622 * @param[in] handle Sync_Handle identifying the periodic advertising
623 * train. Range: 0x0000 to 0x0EFF.
624 * @param[in] enable Bit number 0 - Reporting Enabled.
625 * Bit number 1 - Duplicate filtering enabled.
626 * All other bits - Reserved for future use.
627 *
628 * @return HCI error codes as documented in Bluetooth Core Specification v5.3.
629 */
ll_sync_recv_enable(uint16_t handle,uint8_t enable)630 uint8_t ll_sync_recv_enable(uint16_t handle, uint8_t enable)
631 {
632 struct ll_sync_set *sync;
633
634 sync = ull_sync_is_enabled_get(handle);
635 if (!sync) {
636 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
637 }
638
639 /* Reporting enabled/disabled */
640 sync->rx_enable = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_ENABLE) ?
641 1U : 0U;
642
643 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
644 sync->nodups = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) ?
645 1U : 0U;
646 #endif
647
648 return 0;
649 }
650
651 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
652 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
653 * Advertising Sync Transfer command.
654 *
655 * @param[in] conn_handle Connection_Handle identifying the connected device
656 * Range: 0x0000 to 0x0EFF.
657 * @param[in] service_data Service_Data value provided by the Host for use by the
658 * Host of the peer device.
659 * @param[in] sync_handle Sync_Handle identifying the periodic advertising
660 * train. Range: 0x0000 to 0x0EFF.
661 *
662 * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
663 */
ll_sync_transfer(uint16_t conn_handle,uint16_t service_data,uint16_t sync_handle)664 uint8_t ll_sync_transfer(uint16_t conn_handle, uint16_t service_data, uint16_t sync_handle)
665 {
666 struct ll_sync_set *sync;
667 struct ll_conn *conn;
668
669 conn = ll_connected_get(conn_handle);
670 if (!conn) {
671 return BT_HCI_ERR_UNKNOWN_CONN_ID;
672 }
673
674 /* Verify that sync_handle is valid */
675 sync = ull_sync_is_enabled_get(sync_handle);
676 if (!sync) {
677 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
678 }
679
680 /* Call llcp to start LLCP_PERIODIC_SYNC_IND */
681 return ull_cp_periodic_sync(conn, sync, NULL, service_data);
682 }
683 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
684
685 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
686 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
687 * Advertising Sync Transfer Parameters command.
688 *
689 * @param[in] conn_handle Connection_Handle identifying the connected device
690 * Range: 0x0000 to 0x0EFF.
691 * @param[in] mode Mode specifies the action to be taken when a periodic advertising
692 * synchronization is received.
693 * @param[in] skip Skip specifying the number of consectutive periodic advertising
694 * packets that the receiver may skip after successfully reciving a
695 * periodic advertising packet. Range: 0x0000 to 0x01F3.
696 * @param[in] timeout Sync_timeout specifying the maximum permitted time between
697 * successful receives. Range: 0x000A to 0x4000.
698 * @param[in] cte_type CTE_Type specifying whether to only synchronize to periodic
699 * advertising with certain types of Constant Tone Extension.
700 *
701 * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
702 */
ll_past_param(uint16_t conn_handle,uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)703 uint8_t ll_past_param(uint16_t conn_handle, uint8_t mode, uint16_t skip, uint16_t timeout,
704 uint8_t cte_type)
705 {
706 struct ll_conn *conn;
707
708 conn = ll_connected_get(conn_handle);
709 if (!conn) {
710 return BT_HCI_ERR_UNKNOWN_CONN_ID;
711 }
712
713 if (mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES &&
714 !IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)) {
715 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
716 }
717
718 /* Set PAST Param for connection instance */
719 conn->past.mode = mode;
720 conn->past.skip = skip;
721 conn->past.timeout = timeout;
722 conn->past.cte_type = cte_type;
723
724 return 0;
725 }
726
727 /* @brief Link Layer interface function corresponding to HCI LE Set Default Periodic
728 * Advertising Sync Transfer Parameters command.
729 *
730 * @param[in] mode Mode specifies the action to be taken when a periodic advertising
731 * synchronization is received.
732 * @param[in] skip Skip specifying the number of consectutive periodic advertising
733 * packets that the receiver may skip after successfully reciving a
734 * periodic advertising packet. Range: 0x0000 to 0x01F3.
735 * @param[in] timeout Sync_timeout specifying the maximum permitted time between
736 * successful receives. Range: 0x000A to 0x4000.
737 * @param[in] cte_type CTE_Type specifying whether to only synchronize to periodic
738 * advertising with certain types of Constant Tone Extension.
739 *
740 * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
741 */
ll_default_past_param(uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)742 uint8_t ll_default_past_param(uint8_t mode, uint16_t skip, uint16_t timeout, uint8_t cte_type)
743 {
744 if (mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES &&
745 !IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)) {
746 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
747 }
748
749 /* Set default past param */
750 ull_conn_default_past_param_set(mode, skip, timeout, cte_type);
751
752 return 0;
753 }
754 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
755
ull_sync_init(void)756 int ull_sync_init(void)
757 {
758 int err;
759
760 err = init_reset();
761 if (err) {
762 return err;
763 }
764
765 return 0;
766 }
767
ull_sync_reset(void)768 int ull_sync_reset(void)
769 {
770 uint16_t handle;
771 void *rx;
772 int err;
773
774 (void)ll_sync_create_cancel(&rx);
775
776 for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
777 (void)ll_sync_terminate(handle);
778 }
779
780 err = init_reset();
781 if (err) {
782 return err;
783 }
784
785 return 0;
786 }
787
ull_sync_set_get(uint16_t handle)788 struct ll_sync_set *ull_sync_set_get(uint16_t handle)
789 {
790 if (handle >= CONFIG_BT_PER_ADV_SYNC_MAX) {
791 return NULL;
792 }
793
794 return &ll_sync_pool[handle];
795 }
796
ull_sync_is_enabled_get(uint16_t handle)797 struct ll_sync_set *ull_sync_is_enabled_get(uint16_t handle)
798 {
799 struct ll_sync_set *sync;
800
801 sync = ull_sync_set_get(handle);
802 if (!sync || !sync->timeout_reload) {
803 return NULL;
804 }
805
806 return sync;
807 }
808
ull_sync_is_valid_get(struct ll_sync_set * sync)809 struct ll_sync_set *ull_sync_is_valid_get(struct ll_sync_set *sync)
810 {
811 if (((uint8_t *)sync < (uint8_t *)ll_sync_pool) ||
812 ((uint8_t *)sync > ((uint8_t *)ll_sync_pool +
813 (sizeof(struct ll_sync_set) * (CONFIG_BT_PER_ADV_SYNC_MAX - 1))))) {
814 return NULL;
815 }
816
817 return sync;
818 }
819
ull_sync_lll_is_valid_get(struct lll_sync * lll)820 struct lll_sync *ull_sync_lll_is_valid_get(struct lll_sync *lll)
821 {
822 struct ll_sync_set *sync;
823
824 sync = HDR_LLL2ULL(lll);
825 sync = ull_sync_is_valid_get(sync);
826 if (sync) {
827 return &sync->lll;
828 }
829
830 return NULL;
831 }
832
ull_sync_handle_get(struct ll_sync_set * sync)833 uint16_t ull_sync_handle_get(struct ll_sync_set *sync)
834 {
835 return mem_index_get(sync, ll_sync_pool, sizeof(struct ll_sync_set));
836 }
837
ull_sync_lll_handle_get(struct lll_sync * lll)838 uint16_t ull_sync_lll_handle_get(struct lll_sync *lll)
839 {
840 return ull_sync_handle_get(HDR_LLL2ULL(lll));
841 }
842
ull_sync_release(struct ll_sync_set * sync)843 void ull_sync_release(struct ll_sync_set *sync)
844 {
845 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
846 struct lll_sync *lll = &sync->lll;
847
848 if (lll->node_cte_incomplete) {
849 const uint8_t release_cnt = 1U;
850 struct node_rx_pdu *node_rx;
851 memq_link_t *link;
852
853 node_rx = &lll->node_cte_incomplete->rx;
854 link = node_rx->hdr.link;
855
856 ll_rx_link_release(link);
857 ull_iq_report_link_inc_quota(release_cnt);
858 ull_df_iq_report_mem_release(node_rx);
859 ull_df_rx_iq_report_alloc(release_cnt);
860
861 lll->node_cte_incomplete = NULL;
862 }
863 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
864
865 /* Mark the sync context as sync create cancelled */
866 if (IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)) {
867 sync->timeout = 0U;
868 }
869
870 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
871 /* reset accumulated data len */
872 sync->data_len = 0U;
873 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
874
875 mem_release(sync, &sync_free);
876 }
877
ull_sync_setup_addr_check(struct ll_sync_set * sync,uint8_t filter_policy,uint8_t addr_type,uint8_t * addr,uint8_t rl_idx)878 bool ull_sync_setup_addr_check(struct ll_sync_set *sync, uint8_t filter_policy,
879 uint8_t addr_type, uint8_t *addr, uint8_t rl_idx)
880 {
881 /* Check if Periodic Advertiser list to be used */
882 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
883 filter_policy) {
884 /* Check in Periodic Advertiser List */
885 if (ull_filter_ull_pal_addr_match(addr_type, addr)) {
886 /* Remember the address, to check with
887 * SID in Sync Info
888 */
889 sync->peer_id_addr_type = addr_type;
890 (void)memcpy(sync->peer_id_addr, addr,
891 BDADDR_SIZE);
892
893 /* Address matched */
894 return true;
895
896 /* Check in Resolving List */
897 } else if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) &&
898 ull_filter_ull_pal_listed(rl_idx, &addr_type,
899 sync->peer_id_addr)) {
900 /* Remember the address, to check with the
901 * SID in Sync Info
902 */
903 sync->peer_id_addr_type = addr_type;
904
905 /* Mark it as identity address from RPA */
906 sync->peer_addr_resolved = 1U;
907
908 /* Address matched */
909 return true;
910 }
911
912 /* Check with explicitly supplied address */
913 } else if ((addr_type == sync->peer_id_addr_type) &&
914 !memcmp(addr, sync->peer_id_addr, BDADDR_SIZE)) {
915 /* Address matched */
916 return true;
917
918 /* Check identity address with explicitly supplied address */
919 } else if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) &&
920 (rl_idx < ll_rl_size_get())) {
921 ll_rl_id_addr_get(rl_idx, &addr_type, addr);
922 if ((addr_type == sync->peer_id_addr_type) &&
923 !memcmp(addr, sync->peer_id_addr, BDADDR_SIZE)) {
924 /* Mark it as identity address from RPA */
925 sync->peer_addr_resolved = 1U;
926
927 /* Identity address matched */
928 return true;
929 }
930 }
931
932 return false;
933 }
934
ull_sync_setup_sid_match(struct ll_sync_set * sync,struct ll_scan_set * scan,uint8_t sid)935 bool ull_sync_setup_sid_match(struct ll_sync_set *sync, struct ll_scan_set *scan, uint8_t sid)
936 {
937 return (scan->periodic.state == LL_SYNC_STATE_ADDR_MATCH) &&
938 ((IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
939 scan->periodic.filter_policy &&
940 ull_filter_ull_pal_match(sync->peer_id_addr_type,
941 sync->peer_id_addr, sid)) ||
942 (!scan->periodic.filter_policy &&
943 (sid == sync->sid)));
944 }
945
ull_sync_setup(struct ll_scan_set * scan,uint8_t phy,struct node_rx_pdu * node_rx,struct pdu_adv_sync_info * si)946 void ull_sync_setup(struct ll_scan_set *scan, uint8_t phy,
947 struct node_rx_pdu *node_rx, struct pdu_adv_sync_info *si)
948 {
949 uint32_t ticks_slot_overhead;
950 uint32_t ticks_slot_offset;
951 struct ll_sync_set *sync;
952 struct node_rx_sync *se;
953 struct node_rx_ftr *ftr;
954 uint32_t sync_offset_us;
955 uint32_t ready_delay_us;
956 struct node_rx_pdu *rx;
957 uint8_t *data_chan_map;
958 struct lll_sync *lll;
959 uint16_t sync_handle;
960 uint32_t interval_us;
961 uint32_t overhead_us;
962 struct pdu_adv *pdu;
963 uint16_t interval;
964 uint32_t slot_us;
965 uint8_t chm_last;
966 uint32_t ret;
967 uint8_t sca;
968
969 /* Populate the LLL context */
970 sync = scan->periodic.sync;
971 lll = &sync->lll;
972
973 /* Copy channel map from sca_chm field in sync_info structure, and
974 * clear the SCA bits.
975 */
976 chm_last = lll->chm_first;
977 lll->chm_last = chm_last;
978 data_chan_map = lll->chm[chm_last].data_chan_map;
979 (void)memcpy(data_chan_map, si->sca_chm,
980 sizeof(lll->chm[chm_last].data_chan_map));
981 data_chan_map[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
982 ~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
983 lll->chm[chm_last].data_chan_count =
984 util_ones_count_get(data_chan_map,
985 sizeof(lll->chm[chm_last].data_chan_map));
986 if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
987 /* Ignore sync setup, invalid available channel count */
988 return;
989 }
990
991 memcpy(lll->access_addr, si->aa, sizeof(lll->access_addr));
992 lll->data_chan_id = lll_chan_id(lll->access_addr);
993 memcpy(lll->crc_init, si->crc_init, sizeof(lll->crc_init));
994 lll->event_counter = sys_le16_to_cpu(si->evt_cntr);
995 lll->phy = phy;
996 lll->forced = 0U;
997
998 interval = sys_le16_to_cpu(si->interval);
999 interval_us = interval * PERIODIC_INT_UNIT_US;
1000
1001 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
1002 /* Save Periodic Advertisement Interval */
1003 sync->interval = interval;
1004 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
1005
1006 /* Convert fromm 10ms units to interval units */
1007 sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U *
1008 USEC_PER_MSEC), interval_us);
1009
1010 /* Adjust Skip value so that there is minimum of 6 events that can be
1011 * listened to before Sync_Timeout occurs.
1012 * The adjustment of the skip value is controller implementation
1013 * specific and not specified by the Bluetooth Core Specification v5.3.
1014 * The Controller `may` use the Skip value, and the implementation here
1015 * covers a case where Skip value could lead to less events being
1016 * listened to until Sync_Timeout. Listening to more consecutive events
1017 * before Sync_Timeout increases probability of retaining the Periodic
1018 * Synchronization.
1019 */
1020 if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) {
1021 uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN;
1022
1023 if (sync->skip > skip_max) {
1024 sync->skip = skip_max;
1025 }
1026 } else {
1027 sync->skip = 0U;
1028 }
1029
1030 sync->sync_expire = CONN_ESTAB_COUNTDOWN;
1031
1032 /* Extract the SCA value from the sca_chm field of the sync_info
1033 * structure.
1034 */
1035 sca = (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
1036 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
1037 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS;
1038
1039 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1040 lll->sca = sca;
1041 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1042
1043 lll->window_widening_periodic_us =
1044 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
1045 lll_clock_ppm_get(sca)) *
1046 interval_us), USEC_PER_SEC);
1047 lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
1048 if (PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si)) {
1049 lll->window_size_event_us = OFFS_UNIT_300_US;
1050 } else {
1051 lll->window_size_event_us = OFFS_UNIT_30_US;
1052 }
1053
1054 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1055 lll->node_cte_incomplete = NULL;
1056 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1057
1058 /* Set the state to sync create */
1059 scan->periodic.state = LL_SYNC_STATE_CREATED;
1060 scan->periodic.param = NULL;
1061 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
1062 struct ll_scan_set *scan_1m;
1063
1064 scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
1065 if (scan == scan_1m) {
1066 struct ll_scan_set *scan_coded;
1067
1068 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
1069 scan_coded->periodic.state = LL_SYNC_STATE_CREATED;
1070 scan_coded->periodic.param = NULL;
1071 } else {
1072 scan_1m->periodic.state = LL_SYNC_STATE_CREATED;
1073 scan_1m->periodic.param = NULL;
1074 }
1075 }
1076
1077 sync_handle = ull_sync_handle_get(sync);
1078
1079 /* Prepare sync notification, dispatch only on successful AUX_SYNC_IND
1080 * reception.
1081 */
1082 rx = (void *)sync->node_rx_sync_estab;
1083 rx->hdr.type = NODE_RX_TYPE_SYNC;
1084 rx->hdr.handle = sync_handle;
1085 rx->rx_ftr.param = sync;
1086 se = (void *)rx->pdu;
1087 se->interval = interval;
1088 se->phy = lll->phy;
1089 se->sca = sca;
1090
1091 /* Calculate offset and schedule sync radio events */
1092 ftr = &node_rx->rx_ftr;
1093 pdu = (void *)((struct node_rx_pdu *)node_rx)->pdu;
1094
1095 ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
1096
1097 sync_offset_us = ftr->radio_end_us;
1098 sync_offset_us += PDU_ADV_SYNC_INFO_OFFSET_GET(si) *
1099 lll->window_size_event_us;
1100 /* offs_adjust may be 1 only if sync setup by LL_PERIODIC_SYNC_IND */
1101 sync_offset_us += (PDU_ADV_SYNC_INFO_OFFS_ADJUST_GET(si) ? OFFS_ADJUST_US : 0U);
1102 sync_offset_us -= PDU_AC_US(pdu->len, lll->phy, ftr->phy_flags);
1103 sync_offset_us -= EVENT_TICKER_RES_MARGIN_US;
1104 sync_offset_us -= EVENT_JITTER_US;
1105 sync_offset_us -= ready_delay_us;
1106
1107 /* Minimum prepare tick offset + minimum preempt tick offset are the
1108 * overheads before ULL scheduling can setup radio for reception
1109 */
1110 overhead_us = HAL_TICKER_TICKS_TO_US(HAL_TICKER_CNTR_CMP_OFFSET_MIN << 1);
1111
1112 /* CPU execution overhead to setup the radio for reception */
1113 overhead_us += EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US;
1114
1115 /* If not sufficient CPU processing time, skip to receiving next
1116 * event.
1117 */
1118 if ((sync_offset_us - ftr->radio_end_us) < overhead_us) {
1119 sync_offset_us += interval_us;
1120 lll->event_counter++;
1121 }
1122
1123 interval_us -= lll->window_widening_periodic_us;
1124
1125 /* Calculate event time reservation */
1126 slot_us = PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll->phy);
1127 slot_us += ready_delay_us;
1128
1129 /* Add implementation defined radio event overheads */
1130 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
1131 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1132 }
1133
1134 sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1135
1136 ticks_slot_offset = HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1137 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1138 ticks_slot_overhead = ticks_slot_offset;
1139 } else {
1140 ticks_slot_overhead = 0U;
1141 }
1142 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1143
1144 sync->lll_sync_prepare = lll_sync_create_prepare;
1145
1146 ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1147 (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
1148 ftr->ticks_anchor - ticks_slot_offset,
1149 HAL_TICKER_US_TO_TICKS(sync_offset_us),
1150 HAL_TICKER_US_TO_TICKS(interval_us),
1151 HAL_TICKER_REMAINDER(interval_us),
1152 TICKER_NULL_LAZY,
1153 (sync->ull.ticks_slot + ticks_slot_overhead),
1154 ticker_cb, sync,
1155 ticker_start_op_cb, (void *)__LINE__);
1156 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1157 (ret == TICKER_STATUS_BUSY));
1158 }
1159
ull_sync_setup_reset(struct ll_sync_set * sync)1160 void ull_sync_setup_reset(struct ll_sync_set *sync)
1161 {
1162 struct ll_scan_set *scan;
1163
1164 /* Remove the sync context from being associated with scan contexts */
1165 scan = ull_scan_set_get(SCAN_HANDLE_1M);
1166
1167 scan->periodic.sync = NULL;
1168
1169 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
1170 scan->lll.is_sync = 0U;
1171 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1172
1173 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
1174 scan = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
1175
1176 scan->periodic.sync = NULL;
1177
1178 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
1179 scan->lll.is_sync = 0U;
1180 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1181 }
1182 }
1183
ull_sync_established_report(memq_link_t * link,struct node_rx_pdu * rx)1184 void ull_sync_established_report(memq_link_t *link, struct node_rx_pdu *rx)
1185 {
1186 struct node_rx_pdu *rx_establ;
1187 struct ll_sync_set *sync;
1188 struct node_rx_ftr *ftr;
1189 struct node_rx_sync *se;
1190 struct lll_sync *lll;
1191
1192 ftr = &rx->rx_ftr;
1193 lll = ftr->param;
1194 sync = HDR_LLL2ULL(lll);
1195
1196 /* Do nothing if sync is cancelled or lost. */
1197 if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1198 return;
1199 }
1200
1201 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1202 enum sync_status sync_status;
1203
1204 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1205 sync_status = ftr->sync_status;
1206 #else
1207 struct pdu_cte_info *rx_cte_info;
1208
1209 rx_cte_info = pdu_cte_info_get((struct pdu_adv *)rx->pdu);
1210 if (rx_cte_info != NULL) {
1211 sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy,
1212 rx_cte_info->time, rx_cte_info->type);
1213 } else {
1214 sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy, 0,
1215 BT_HCI_LE_NO_CTE);
1216 }
1217
1218 /* If there is no CTEInline support, notify done event handler to terminate periodic
1219 * advertising sync in case the CTE is not allowed.
1220 * If the periodic filtering list is not used then terminate synchronization and notify
1221 * host. If the periodic filtering list is used then stop synchronization with this
1222 * particular periodic advertised but continue to search for other one.
1223 */
1224 sync->is_term = ((sync_status == SYNC_STAT_TERM) || (sync_status == SYNC_STAT_CONT_SCAN));
1225 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1226
1227 /* Send periodic advertisement sync established report when sync has correct CTE type
1228 * or the CTE type is incorrect and filter policy doesn't allow to continue scanning.
1229 */
1230 if (sync_status == SYNC_STAT_ALLOWED || sync_status == SYNC_STAT_TERM) {
1231 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1232
1233 if (1) {
1234 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1235
1236 /* Prepare and dispatch sync notification */
1237 rx_establ = (void *)sync->node_rx_sync_estab;
1238 rx_establ->hdr.handle = ull_sync_handle_get(sync);
1239 se = (void *)rx_establ->pdu;
1240 /* Clear the node to mark the sync establish as being completed.
1241 * In this case the completion reason is sync being established.
1242 */
1243 sync->node_rx_sync_estab = NULL;
1244
1245 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1246 se->status = (ftr->sync_status == SYNC_STAT_TERM) ?
1247 BT_HCI_ERR_UNSUPP_REMOTE_FEATURE :
1248 BT_HCI_ERR_SUCCESS;
1249 #else
1250 se->status = BT_HCI_ERR_SUCCESS;
1251 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1252
1253 /* NOTE: footer param has already been populated during sync
1254 * setup.
1255 */
1256
1257 ll_rx_put_sched(rx_establ->hdr.link, rx_establ);
1258 }
1259
1260 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1261 /* Handle periodic advertising PDU and send periodic advertising scan report when
1262 * the sync was found or was established in the past. The report is not send if
1263 * scanning is terminated due to wrong CTE type.
1264 */
1265 if (sync_status == SYNC_STAT_ALLOWED || sync_status == SYNC_STAT_READY) {
1266 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1267
1268 if (1) {
1269 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1270
1271 /* Switch sync event prepare function to one responsible for regular PDUs receive */
1272 sync->lll_sync_prepare = lll_sync_prepare;
1273
1274 /* Change node type to appropriately handle periodic
1275 * advertising PDU report.
1276 */
1277 rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1278 ull_scan_aux_setup(link, rx);
1279 } else {
1280 rx->hdr.type = NODE_RX_TYPE_RELEASE;
1281 ll_rx_put_sched(link, rx);
1282 }
1283 }
1284
1285 void ull_sync_done(struct node_rx_event_done *done)
1286 {
1287 struct ll_sync_set *sync;
1288
1289 /* Get reference to ULL context */
1290 sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
1291
1292 /* Do nothing if local terminate requested or sync lost */
1293 if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1294 return;
1295 }
1296
1297 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1298 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1299 if (done->extra.sync_term) {
1300 #else
1301 if (sync->is_term) {
1302 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1303 /* In case the periodic advertising list filtering is not used the synchronization
1304 * must be terminated and host notification must be send.
1305 * In case the periodic advertising list filtering is used the synchronization with
1306 * this particular periodic advertiser but search for other one from the list.
1307 *
1308 * Stop periodic advertising sync ticker and clear variables informing the
1309 * sync is pending. That is a step to completely terminate the synchronization.
1310 * In case search for another periodic advertiser it allows to setup new ticker for
1311 * that.
1312 */
1313 sync_ticker_cleanup(sync, NULL);
1314 } else
1315 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1316 {
1317 uint32_t ticks_drift_minus;
1318 uint32_t ticks_drift_plus;
1319 uint16_t elapsed_event;
1320 struct lll_sync *lll;
1321 uint16_t skip_event;
1322 uint8_t force_lll;
1323 uint16_t lazy;
1324 uint8_t force;
1325
1326 lll = &sync->lll;
1327
1328 /* Events elapsed used in timeout checks below */
1329 skip_event = lll->skip_event;
1330
1331 /* Sync drift compensation and new skip calculation */
1332 ticks_drift_plus = 0U;
1333 ticks_drift_minus = 0U;
1334 if (done->extra.trx_cnt) {
1335 /* Calculate drift in ticks unit */
1336 ull_drift_ticks_get(done, &ticks_drift_plus, &ticks_drift_minus);
1337
1338 /* Enforce skip */
1339 lll->skip_event = sync->skip;
1340
1341 /* Reset failed to establish sync countdown */
1342 sync->sync_expire = 0U;
1343 }
1344
1345 elapsed_event = lll->lazy_prepare + 1U;
1346
1347 /* Reset supervision countdown */
1348 if (done->extra.crc_valid) {
1349 sync->timeout_expire = 0U;
1350 }
1351
1352 /* check sync failed to establish */
1353 else if (sync->sync_expire) {
1354 if (sync->sync_expire > elapsed_event) {
1355 sync->sync_expire -= elapsed_event;
1356 } else {
1357 sync_ticker_cleanup(sync, ticker_stop_sync_expire_op_cb);
1358
1359 return;
1360 }
1361 }
1362
1363 /* If anchor point not sync-ed, start timeout countdown, and break skip if any */
1364 else if (!sync->timeout_expire) {
1365 sync->timeout_expire = sync->timeout_reload;
1366 }
1367
1368 /* check timeout */
1369 force = 0U;
1370 force_lll = 0U;
1371 if (sync->timeout_expire) {
1372 if (sync->timeout_expire > elapsed_event) {
1373 sync->timeout_expire -= elapsed_event;
1374
1375 /* break skip */
1376 lll->skip_event = 0U;
1377
1378 if (sync->timeout_expire <= 6U) {
1379 force_lll = 1U;
1380
1381 force = 1U;
1382 } else if (skip_event) {
1383 force = 1U;
1384 }
1385 } else {
1386 sync_ticker_cleanup(sync, ticker_stop_sync_lost_op_cb);
1387
1388 return;
1389 }
1390 }
1391
1392 lll->forced = force_lll;
1393
1394 /* Check if skip needs update */
1395 lazy = 0U;
1396 if ((force) || (skip_event != lll->skip_event)) {
1397 lazy = lll->skip_event + 1U;
1398 }
1399
1400 /* Update Sync ticker instance */
1401 if (ticks_drift_plus || ticks_drift_minus || lazy || force) {
1402 uint16_t sync_handle = ull_sync_handle_get(sync);
1403 uint32_t ticker_status;
1404
1405 /* Call to ticker_update can fail under the race
1406 * condition where in the periodic sync role is being
1407 * stopped but at the same time it is preempted by
1408 * periodic sync event that gets into close state.
1409 * Accept failure when periodic sync role is being
1410 * stopped.
1411 */
1412 ticker_status =
1413 ticker_update(TICKER_INSTANCE_ID_CTLR,
1414 TICKER_USER_ID_ULL_HIGH,
1415 (TICKER_ID_SCAN_SYNC_BASE +
1416 sync_handle),
1417 ticks_drift_plus,
1418 ticks_drift_minus, 0, 0,
1419 lazy, force,
1420 ticker_update_op_cb, sync);
1421 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1422 (ticker_status == TICKER_STATUS_BUSY) ||
1423 ((void *)sync == ull_disable_mark_get()));
1424 }
1425 }
1426 }
1427
1428 void ull_sync_chm_update(uint8_t sync_handle, uint8_t *acad, uint8_t acad_len)
1429 {
1430 struct pdu_adv_sync_chm_upd_ind *chm_upd_ind;
1431 struct ll_sync_set *sync;
1432 struct lll_sync *lll;
1433 uint8_t chm_last;
1434 uint16_t ad_len;
1435
1436 /* Get reference to LLL context */
1437 sync = ull_sync_set_get(sync_handle);
1438 LL_ASSERT(sync);
1439 lll = &sync->lll;
1440
1441 /* Ignore if already in progress */
1442 if (lll->chm_last != lll->chm_first) {
1443 return;
1444 }
1445
1446 /* Find the Channel Map Update Indication */
1447 do {
1448 /* Pick the length and find the Channel Map Update Indication */
1449 ad_len = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
1450 if (ad_len &&
1451 (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] ==
1452 PDU_ADV_DATA_TYPE_CHANNEL_MAP_UPDATE_IND)) {
1453 break;
1454 }
1455
1456 /* Add length field size */
1457 ad_len += 1U;
1458 if (ad_len < acad_len) {
1459 acad_len -= ad_len;
1460 } else {
1461 return;
1462 }
1463
1464 /* Move to next AD data */
1465 acad += ad_len;
1466 } while (acad_len);
1467
1468 /* Validate the size of the Channel Map Update Indication */
1469 if (ad_len != (sizeof(*chm_upd_ind) + 1U)) {
1470 return;
1471 }
1472
1473 /* Pick the parameters into the procedure context */
1474 chm_last = lll->chm_last + 1U;
1475 if (chm_last == DOUBLE_BUFFER_SIZE) {
1476 chm_last = 0U;
1477 }
1478
1479 chm_upd_ind = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1480 (void)memcpy(lll->chm[chm_last].data_chan_map, chm_upd_ind->chm,
1481 sizeof(lll->chm[chm_last].data_chan_map));
1482 lll->chm[chm_last].data_chan_count =
1483 util_ones_count_get(lll->chm[chm_last].data_chan_map,
1484 sizeof(lll->chm[chm_last].data_chan_map));
1485 if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
1486 /* Ignore channel map, invalid available channel count */
1487 return;
1488 }
1489
1490 lll->chm_instant = sys_le16_to_cpu(chm_upd_ind->instant);
1491
1492 /* Set Channel Map Update Procedure in progress */
1493 lll->chm_last = chm_last;
1494 }
1495
1496 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1497 /* @brief Function updates periodic sync slot duration.
1498 *
1499 * @param[in] sync Pointer to sync instance
1500 * @param[in] slot_plus_us Number of microsecond to add to ticker slot
1501 * @param[in] slot_minus_us Number of microsecond to subtracks from ticker slot
1502 *
1503 * @retval 0 Successful ticker slot update.
1504 * @retval -ENOENT Ticker node related with provided sync is already stopped.
1505 * @retval -ENOMEM Couldn't enqueue update ticker job.
1506 * @retval -EFAULT Somethin else went wrong.
1507 */
1508 int ull_sync_slot_update(struct ll_sync_set *sync, uint32_t slot_plus_us,
1509 uint32_t slot_minus_us)
1510 {
1511 uint32_t volatile ret_cb;
1512 uint32_t ret;
1513
1514 ret_cb = TICKER_STATUS_BUSY;
1515 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1516 TICKER_USER_ID_THREAD,
1517 (TICKER_ID_SCAN_SYNC_BASE +
1518 ull_sync_handle_get(sync)),
1519 0, 0,
1520 HAL_TICKER_US_TO_TICKS(slot_plus_us),
1521 HAL_TICKER_US_TO_TICKS(slot_minus_us),
1522 0, 0,
1523 ticker_update_op_status_give,
1524 (void *)&ret_cb);
1525 if (ret == TICKER_STATUS_BUSY || ret == TICKER_STATUS_SUCCESS) {
1526 /* Wait for callback or clear semaphore is callback was already
1527 * executed.
1528 */
1529 k_sem_take(&sem_ticker_cb, K_FOREVER);
1530
1531 if (ret_cb == TICKER_STATUS_FAILURE) {
1532 return -EFAULT; /* Something went wrong */
1533 } else {
1534 return 0;
1535 }
1536 } else {
1537 if (ret_cb != TICKER_STATUS_BUSY) {
1538 /* Ticker callback was executed and job enqueue was successful.
1539 * Call k_sem_take to clear ticker callback semaphore.
1540 */
1541 k_sem_take(&sem_ticker_cb, K_FOREVER);
1542 }
1543 /* Ticker was already stopped or job was not enqueued. */
1544 return (ret_cb == TICKER_STATUS_FAILURE) ? -ENOENT : -ENOMEM;
1545 }
1546 }
1547 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1548
1549 static int init_reset(void)
1550 {
1551 /* Initialize sync pool. */
1552 mem_init(ll_sync_pool, sizeof(struct ll_sync_set),
1553 sizeof(ll_sync_pool) / sizeof(struct ll_sync_set),
1554 &sync_free);
1555
1556 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1557 k_sem_init(&sem_ticker_cb, 0, 1);
1558 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1559
1560 return 0;
1561 }
1562
1563 static inline struct ll_sync_set *sync_acquire(void)
1564 {
1565 return mem_acquire(&sync_free);
1566 }
1567
1568 static struct ll_sync_set *ull_sync_create(uint8_t sid, uint16_t timeout, uint16_t skip,
1569 uint8_t cte_type, uint8_t rx_enable, uint8_t nodups)
1570 {
1571 memq_link_t *link_sync_estab;
1572 memq_link_t *link_sync_lost;
1573 struct node_rx_pdu *node_rx;
1574 struct lll_sync *lll;
1575 struct ll_sync_set *sync;
1576
1577 link_sync_estab = ll_rx_link_alloc();
1578 if (!link_sync_estab) {
1579 return NULL;
1580 }
1581
1582 link_sync_lost = ll_rx_link_alloc();
1583 if (!link_sync_lost) {
1584 ll_rx_link_release(link_sync_estab);
1585
1586 return NULL;
1587 }
1588
1589 node_rx = ll_rx_alloc();
1590 if (!node_rx) {
1591 ll_rx_link_release(link_sync_lost);
1592 ll_rx_link_release(link_sync_estab);
1593
1594 return NULL;
1595 }
1596
1597 sync = sync_acquire();
1598 if (!sync) {
1599 ll_rx_release(node_rx);
1600 ll_rx_link_release(link_sync_lost);
1601 ll_rx_link_release(link_sync_estab);
1602
1603 return NULL;
1604 }
1605
1606 sync->peer_addr_resolved = 0U;
1607
1608 /* Initialize sync context */
1609 node_rx->hdr.link = link_sync_estab;
1610 sync->node_rx_lost.rx.hdr.link = link_sync_lost;
1611
1612 /* Make sure that the node_rx_sync_establ hasn't got anything assigned. It is used to
1613 * mark when sync establishment is in progress.
1614 */
1615 LL_ASSERT(!sync->node_rx_sync_estab);
1616 sync->node_rx_sync_estab = node_rx;
1617
1618 /* Reporting initially enabled/disabled */
1619 sync->rx_enable = rx_enable;
1620
1621 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1622 sync->nodups = nodups;
1623 #endif
1624 sync->skip = skip;
1625 sync->is_stop = 0U;
1626
1627 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1628 sync->enc = 0U;
1629 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1630
1631 /* NOTE: Use timeout not zero to represent sync context used for sync
1632 * create.
1633 */
1634 sync->timeout = timeout;
1635
1636 /* NOTE: Use timeout_reload not zero to represent sync established. */
1637 sync->timeout_reload = 0U;
1638 sync->timeout_expire = 0U;
1639
1640 /* Remember the SID */
1641 sync->sid = sid;
1642
1643 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1644 /* Reset Broadcast Isochronous Group Sync Establishment */
1645 sync->iso.sync_iso = NULL;
1646 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1647
1648 /* Initialize sync LLL context */
1649 lll = &sync->lll;
1650 lll->lll_aux = NULL;
1651 lll->is_rx_enabled = sync->rx_enable;
1652 lll->skip_prepare = 0U;
1653 lll->skip_event = 0U;
1654 lll->window_widening_prepare_us = 0U;
1655 lll->window_widening_event_us = 0U;
1656 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1657 lll->cte_type = cte_type;
1658 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1659
1660 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1661 ull_df_sync_cfg_init(&lll->df_cfg);
1662 LL_ASSERT(!lll->node_cte_incomplete);
1663 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1664
1665 /* Initialise ULL and LLL headers */
1666 ull_hdr_init(&sync->ull);
1667 lll_hdr_init(lll, sync);
1668
1669 return sync;
1670 }
1671
1672 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb)
1673 {
1674 uint16_t sync_handle = ull_sync_handle_get(sync);
1675 uint32_t ret;
1676
1677 /* Stop Periodic Sync Ticker */
1678 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1679 TICKER_ID_SCAN_SYNC_BASE + sync_handle, stop_op_cb, (void *)sync);
1680 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1681 (ret == TICKER_STATUS_BUSY));
1682
1683 /* Mark sync context not sync established */
1684 sync->timeout_reload = 0U;
1685 }
1686
1687 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1688 uint32_t remainder, uint16_t lazy, uint8_t force,
1689 void *param)
1690 {
1691 static memq_link_t link_lll_prepare;
1692 static struct mayfly mfy_lll_prepare = {
1693 0, 0, &link_lll_prepare, NULL, NULL};
1694 static struct lll_prepare_param p;
1695 struct ll_sync_set *sync = param;
1696 struct lll_sync *lll;
1697 uint32_t ret;
1698 uint8_t ref;
1699
1700 DEBUG_RADIO_PREPARE_O(1);
1701
1702 lll = &sync->lll;
1703
1704 /* Commit receive enable changed value */
1705 lll->is_rx_enabled = sync->rx_enable;
1706
1707 /* Increment prepare reference count */
1708 ref = ull_ref_inc(&sync->ull);
1709 LL_ASSERT(ref);
1710
1711 /* Append timing parameters */
1712 p.ticks_at_expire = ticks_at_expire;
1713 p.remainder = remainder;
1714 p.lazy = lazy;
1715 p.force = force;
1716 p.param = lll;
1717 mfy_lll_prepare.param = &p;
1718 mfy_lll_prepare.fp = sync->lll_sync_prepare;
1719
1720 /* Kick LLL prepare */
1721 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1722 &mfy_lll_prepare);
1723 LL_ASSERT(!ret);
1724
1725 DEBUG_RADIO_PREPARE_O(1);
1726 }
1727
1728 static void ticker_start_op_cb(uint32_t status, void *param)
1729 {
1730 ARG_UNUSED(param);
1731 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1732 }
1733
1734 static void ticker_update_op_cb(uint32_t status, void *param)
1735 {
1736 LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1737 param == ull_disable_mark_get());
1738 }
1739
1740 static void ticker_stop_sync_expire_op_cb(uint32_t status, void *param)
1741 {
1742 uint32_t retval;
1743 static memq_link_t link;
1744 static struct mayfly mfy = {0, 0, &link, NULL, sync_expire};
1745
1746 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1747
1748 mfy.param = param;
1749
1750 retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1751 0, &mfy);
1752 LL_ASSERT(!retval);
1753 }
1754
1755 static void sync_expire(void *param)
1756 {
1757 struct ll_sync_set *sync = param;
1758 struct node_rx_sync *se;
1759 struct node_rx_pdu *rx;
1760
1761 /* Generate Periodic advertising sync failed to establish */
1762 rx = (void *)sync->node_rx_sync_estab;
1763 rx->hdr.handle = LLL_HANDLE_INVALID;
1764
1765 /* Clear the node to mark the sync establish as being completed.
1766 * In this case the completion reason is sync expire.
1767 */
1768 sync->node_rx_sync_estab = NULL;
1769
1770 /* NOTE: struct node_rx_sync_estab has uint8_t member following the
1771 * struct node_rx_hdr to store the reason.
1772 */
1773 se = (void *)rx->pdu;
1774 se->status = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
1775
1776 /* NOTE: footer param has already been populated during sync setup */
1777
1778 /* Enqueue the sync failed to established towards ULL context */
1779 ll_rx_put_sched(rx->hdr.link, rx);
1780 }
1781
1782 static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param)
1783 {
1784 uint32_t retval;
1785 static memq_link_t link;
1786 static struct mayfly mfy = {0, 0, &link, NULL, sync_lost};
1787
1788 /* When in race between terminate requested in thread context and
1789 * sync lost scenario, do not generate the sync lost node rx from here
1790 */
1791 if (status != TICKER_STATUS_SUCCESS) {
1792 LL_ASSERT(param == ull_disable_mark_get());
1793
1794 return;
1795 }
1796
1797 mfy.param = param;
1798
1799 retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1800 0, &mfy);
1801 LL_ASSERT(!retval);
1802 }
1803
1804 static void sync_lost(void *param)
1805 {
1806 struct ll_sync_set *sync;
1807 struct node_rx_pdu *rx;
1808
1809 /* sync established was not generated yet, no free node rx */
1810 sync = param;
1811 if (sync->lll_sync_prepare != lll_sync_prepare) {
1812 sync_expire(param);
1813
1814 return;
1815 }
1816
1817 /* Generate Periodic advertising sync lost */
1818 rx = (void *)&sync->node_rx_lost;
1819 rx->hdr.handle = ull_sync_handle_get(sync);
1820 rx->hdr.type = NODE_RX_TYPE_SYNC_LOST;
1821 rx->rx_ftr.param = sync;
1822
1823 /* Enqueue the sync lost towards ULL context */
1824 ll_rx_put_sched(rx->hdr.link, rx);
1825
1826 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1827 if (sync->iso.sync_iso) {
1828 /* ISO create BIG flag in the periodic advertising context is still set */
1829 struct ll_sync_iso_set *sync_iso;
1830
1831 sync_iso = sync->iso.sync_iso;
1832
1833 rx = (void *)&sync_iso->node_rx_lost;
1834 rx->hdr.handle = sync_iso->big_handle;
1835 rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
1836 rx->rx_ftr.param = sync_iso;
1837 *((uint8_t *)rx->pdu) = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
1838
1839 /* Enqueue the sync iso lost towards ULL context */
1840 ll_rx_put_sched(rx->hdr.link, rx);
1841 }
1842 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1843 }
1844
1845 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
1846 static struct ll_sync_set *sync_is_create_get(uint16_t handle)
1847 {
1848 struct ll_sync_set *sync;
1849
1850 sync = ull_sync_set_get(handle);
1851 if (!sync || !sync->timeout) {
1852 return NULL;
1853 }
1854
1855 return sync;
1856 }
1857
1858 static bool peer_sid_sync_exists(uint8_t const peer_id_addr_type,
1859 uint8_t const *const peer_id_addr,
1860 uint8_t sid)
1861 {
1862 uint16_t handle;
1863
1864 for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
1865 struct ll_sync_set *sync = sync_is_create_get(handle);
1866
1867 if (sync &&
1868 (sync->peer_id_addr_type == peer_id_addr_type) &&
1869 !memcmp(sync->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
1870 (sync->sid == sid)) {
1871 return true;
1872 }
1873 }
1874
1875 return false;
1876 }
1877 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
1878
1879 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1880 static void ticker_update_op_status_give(uint32_t status, void *param)
1881 {
1882 *((uint32_t volatile *)param) = status;
1883
1884 k_sem_give(&sem_ticker_cb);
1885 }
1886 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1887
1888 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1889 !defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1890 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu)
1891 {
1892 struct pdu_adv_com_ext_adv *com_hdr;
1893 struct pdu_adv_ext_hdr *hdr;
1894
1895 com_hdr = &pdu->adv_ext_ind;
1896 hdr = &com_hdr->ext_hdr;
1897
1898 if (!com_hdr->ext_hdr_len || (com_hdr->ext_hdr_len != 0 && !hdr->cte_info)) {
1899 return NULL;
1900 }
1901
1902 /* Make sure there are no fields that are not allowed for AUX_SYNC_IND and AUX_CHAIN_IND */
1903 LL_ASSERT(!hdr->adv_addr);
1904 LL_ASSERT(!hdr->tgt_addr);
1905
1906 return (struct pdu_cte_info *)hdr->data;
1907 }
1908 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1909
1910 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1911 void ull_sync_transfer_received(struct ll_conn *conn, uint16_t service_data,
1912 struct pdu_adv_sync_info *si, uint16_t conn_event_count,
1913 uint16_t last_pa_event_counter, uint8_t sid,
1914 uint8_t addr_type, uint8_t sca, uint8_t phy,
1915 uint8_t *adv_addr, uint16_t sync_conn_event_count,
1916 uint8_t addr_resolved)
1917 {
1918 struct ll_sync_set *sync;
1919 uint16_t conn_evt_current;
1920 uint8_t rx_enable;
1921 uint8_t nodups;
1922
1923 if (conn->past.mode == BT_HCI_LE_PAST_MODE_NO_SYNC) {
1924 /* Ignore LL_PERIODIC_SYNC_IND - see Bluetooth Core Specification v5.4
1925 * Vol 6, Part E, Section 7.8.91
1926 */
1927 return;
1928 }
1929
1930 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
1931 /* Do not sync twice to the same peer and same SID */
1932 if (peer_sid_sync_exists(addr_type, adv_addr, sid)) {
1933 return;
1934 }
1935 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
1936
1937 nodups = (conn->past.mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES) ? 1U : 0U;
1938 rx_enable = (conn->past.mode == BT_HCI_LE_PAST_MODE_NO_REPORTS) ? 0U : 1U;
1939
1940 sync = ull_sync_create(sid, conn->past.timeout, conn->past.skip, conn->past.cte_type,
1941 rx_enable, nodups);
1942 if (!sync) {
1943 return;
1944 }
1945
1946 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1947 /* Reset filter policy in lll_sync */
1948 sync->lll.filter_policy = 0U;
1949 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1950
1951 sync->peer_id_addr_type = addr_type;
1952 sync->peer_addr_resolved = addr_resolved;
1953 memcpy(sync->peer_id_addr, adv_addr, BDADDR_SIZE);
1954 sync->lll.phy = phy;
1955
1956 conn_evt_current = ull_conn_event_counter(conn);
1957
1958 /* LLCP should have ensured this holds */
1959 LL_ASSERT(sync_conn_event_count != conn_evt_current);
1960
1961 ull_sync_setup_from_sync_transfer(conn, service_data, sync, si,
1962 conn_event_count - conn_evt_current,
1963 last_pa_event_counter, sync_conn_event_count,
1964 sca);
1965 }
1966 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1967