Lines Matching refs:sync

83 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb);
121 struct ll_sync_set *sync; in ll_sync_create() local
126 if (!scan || scan->periodic.sync) { in ll_sync_create()
132 if (!scan_coded || scan_coded->periodic.sync) { in ll_sync_create()
148 sync = ull_sync_create(sid, sync_timeout, skip, sync_cte_type, rx_enable, nodups); in ll_sync_create()
149 if (!sync) { in ll_sync_create()
167 sync->peer_id_addr_type = adv_addr_type; in ll_sync_create()
168 (void)memcpy(sync->peer_id_addr, adv_addr, BDADDR_SIZE); in ll_sync_create()
177 sync->peer_id_addr_type = adv_addr_type; in ll_sync_create()
178 (void)memcpy(sync->peer_id_addr, adv_addr, in ll_sync_create()
179 sizeof(sync->peer_id_addr)); in ll_sync_create()
184 sync->lll.filter_policy = scan->periodic.filter_policy; in ll_sync_create()
188 scan->periodic.sync = sync; in ll_sync_create()
194 scan_coded->periodic.sync = sync; in ll_sync_create()
206 struct ll_sync_set *sync, struct pdu_adv_sync_info *si, in ull_sync_setup_from_sync_transfer() argument
228 lll = &sync->lll; in ull_sync_setup_from_sync_transfer()
257 if (sync->timeout != 0 && interval_us != 0) { in ull_sync_setup_from_sync_transfer()
258 sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U * in ull_sync_setup_from_sync_transfer()
272 if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) { in ull_sync_setup_from_sync_transfer()
273 uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN; in ull_sync_setup_from_sync_transfer()
275 if (sync->skip > skip_max) { in ull_sync_setup_from_sync_transfer()
276 sync->skip = skip_max; in ull_sync_setup_from_sync_transfer()
280 sync->sync_expire = CONN_ESTAB_COUNTDOWN; in ull_sync_setup_from_sync_transfer()
307 sync_handle = ull_sync_handle_get(sync); in ull_sync_setup_from_sync_transfer()
308 rx = (void *)sync->node_rx_sync_estab; in ull_sync_setup_from_sync_transfer()
311 rx->rx_ftr.param = sync; in ull_sync_setup_from_sync_transfer()
317 se_past->rx_sync.phy = sync->lll.phy; in ull_sync_setup_from_sync_transfer()
397 sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us); in ull_sync_setup_from_sync_transfer()
407 sync->lll_sync_prepare = lll_sync_create_prepare; in ull_sync_setup_from_sync_transfer()
425 (sync->ull.ticks_slot + ticks_slot_overhead), in ull_sync_setup_from_sync_transfer()
426 ticker_cb, sync, in ull_sync_setup_from_sync_transfer()
441 struct ll_sync_set *sync; in ll_sync_create_cancel() local
445 if (!scan || !scan->periodic.sync) { in ll_sync_create_cancel()
451 if (!scan_coded || !scan_coded->periodic.sync) { in ll_sync_create_cancel()
469 sync = scan->periodic.sync; in ll_sync_create_cancel()
470 if (!sync) { in ll_sync_create_cancel()
482 if (!sync->node_rx_sync_estab) { in ll_sync_create_cancel()
487 sync->is_stop = 1U; in ll_sync_create_cancel()
490 if (sync->timeout_reload != 0U) { in ll_sync_create_cancel()
491 uint16_t sync_handle = ull_sync_handle_get(sync); in ll_sync_create_cancel()
499 sync, &sync->lll); in ll_sync_create_cancel()
508 ull_sync_setup_reset(sync); in ll_sync_create_cancel()
512 sync->timeout = 0U; in ll_sync_create_cancel()
515 node_rx = sync->node_rx_sync_estab; in ll_sync_create_cancel()
517 link_sync_lost = sync->node_rx_lost.rx.hdr.link; in ll_sync_create_cancel()
526 sync->node_rx_sync_estab = NULL; in ll_sync_create_cancel()
528 node_rx = (void *)&sync->node_rx_lost; in ll_sync_create_cancel()
541 node_rx->rx_ftr.param = sync; in ll_sync_create_cancel()
552 struct ll_sync_set *sync; in ll_sync_terminate() local
555 sync = ull_sync_is_enabled_get(handle); in ll_sync_terminate()
556 if (!sync) { in ll_sync_terminate()
561 sync->is_stop = 1U; in ll_sync_terminate()
566 sync, &sync->lll); in ll_sync_terminate()
573 lll_aux = sync->lll.lll_aux; in ll_sync_terminate()
576 err = ull_scan_aux_stop(&sync->lll); in ll_sync_terminate()
594 if (sync->node_rx_sync_estab) { in ll_sync_terminate()
598 node_rx = (void *)sync->node_rx_sync_estab; in ll_sync_terminate()
604 sync->node_rx_sync_estab = NULL; in ll_sync_terminate()
608 link_sync_lost = sync->node_rx_lost.rx.hdr.link; in ll_sync_terminate()
612 sync->timeout_reload = 0U; in ll_sync_terminate()
614 ull_sync_release(sync); in ll_sync_terminate()
632 struct ll_sync_set *sync; in ll_sync_recv_enable() local
634 sync = ull_sync_is_enabled_get(handle); in ll_sync_recv_enable()
635 if (!sync) { in ll_sync_recv_enable()
640 sync->rx_enable = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_ENABLE) ? in ll_sync_recv_enable()
644 sync->nodups = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) ? in ll_sync_recv_enable()
666 struct ll_sync_set *sync; in ll_sync_transfer() local
675 sync = ull_sync_is_enabled_get(sync_handle); in ll_sync_transfer()
676 if (!sync) { in ll_sync_transfer()
681 return ull_cp_periodic_sync(conn, sync, NULL, service_data); in ll_sync_transfer()
799 struct ll_sync_set *sync; in ull_sync_is_enabled_get() local
801 sync = ull_sync_set_get(handle); in ull_sync_is_enabled_get()
802 if (!sync || !sync->timeout_reload) { in ull_sync_is_enabled_get()
806 return sync; in ull_sync_is_enabled_get()
809 struct ll_sync_set *ull_sync_is_valid_get(struct ll_sync_set *sync) in ull_sync_is_valid_get() argument
811 if (((uint8_t *)sync < (uint8_t *)ll_sync_pool) || in ull_sync_is_valid_get()
812 ((uint8_t *)sync > ((uint8_t *)ll_sync_pool + in ull_sync_is_valid_get()
817 return sync; in ull_sync_is_valid_get()
822 struct ll_sync_set *sync; in ull_sync_lll_is_valid_get() local
824 sync = HDR_LLL2ULL(lll); in ull_sync_lll_is_valid_get()
825 sync = ull_sync_is_valid_get(sync); in ull_sync_lll_is_valid_get()
826 if (sync) { in ull_sync_lll_is_valid_get()
827 return &sync->lll; in ull_sync_lll_is_valid_get()
833 uint16_t ull_sync_handle_get(struct ll_sync_set *sync) in ull_sync_handle_get() argument
835 return mem_index_get(sync, ll_sync_pool, sizeof(struct ll_sync_set)); in ull_sync_handle_get()
843 void ull_sync_release(struct ll_sync_set *sync) in ull_sync_release() argument
846 struct lll_sync *lll = &sync->lll; in ull_sync_release()
867 sync->timeout = 0U; in ull_sync_release()
872 sync->data_len = 0U; in ull_sync_release()
875 mem_release(sync, &sync_free); in ull_sync_release()
878 bool ull_sync_setup_addr_check(struct ll_sync_set *sync, uint8_t filter_policy, in ull_sync_setup_addr_check() argument
889 sync->peer_id_addr_type = addr_type; in ull_sync_setup_addr_check()
890 (void)memcpy(sync->peer_id_addr, addr, in ull_sync_setup_addr_check()
899 sync->peer_id_addr)) { in ull_sync_setup_addr_check()
903 sync->peer_id_addr_type = addr_type; in ull_sync_setup_addr_check()
906 sync->peer_addr_resolved = 1U; in ull_sync_setup_addr_check()
913 } else if ((addr_type == sync->peer_id_addr_type) && in ull_sync_setup_addr_check()
914 !memcmp(addr, sync->peer_id_addr, BDADDR_SIZE)) { in ull_sync_setup_addr_check()
922 if ((addr_type == sync->peer_id_addr_type) && in ull_sync_setup_addr_check()
923 !memcmp(addr, sync->peer_id_addr, BDADDR_SIZE)) { in ull_sync_setup_addr_check()
925 sync->peer_addr_resolved = 1U; in ull_sync_setup_addr_check()
935 bool ull_sync_setup_sid_match(struct ll_sync_set *sync, struct ll_scan_set *scan, uint8_t sid) in ull_sync_setup_sid_match() argument
940 ull_filter_ull_pal_match(sync->peer_id_addr_type, in ull_sync_setup_sid_match()
941 sync->peer_id_addr, sid)) || in ull_sync_setup_sid_match()
943 (sid == sync->sid))); in ull_sync_setup_sid_match()
951 struct ll_sync_set *sync; in ull_sync_setup() local
970 sync = scan->periodic.sync; in ull_sync_setup()
971 lll = &sync->lll; in ull_sync_setup()
1003 sync->interval = interval; in ull_sync_setup()
1007 sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U * in ull_sync_setup()
1020 if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) { in ull_sync_setup()
1021 uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN; in ull_sync_setup()
1023 if (sync->skip > skip_max) { in ull_sync_setup()
1024 sync->skip = skip_max; in ull_sync_setup()
1027 sync->skip = 0U; in ull_sync_setup()
1030 sync->sync_expire = CONN_ESTAB_COUNTDOWN; in ull_sync_setup()
1077 sync_handle = ull_sync_handle_get(sync); in ull_sync_setup()
1082 rx = (void *)sync->node_rx_sync_estab; in ull_sync_setup()
1085 rx->rx_ftr.param = sync; in ull_sync_setup()
1134 sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us); in ull_sync_setup()
1144 sync->lll_sync_prepare = lll_sync_create_prepare; in ull_sync_setup()
1153 (sync->ull.ticks_slot + ticks_slot_overhead), in ull_sync_setup()
1154 ticker_cb, sync, in ull_sync_setup()
1160 void ull_sync_setup_reset(struct ll_sync_set *sync) in ull_sync_setup_reset() argument
1167 scan->periodic.sync = NULL; in ull_sync_setup_reset()
1176 scan->periodic.sync = NULL; in ull_sync_setup_reset()
1187 struct ll_sync_set *sync; local
1194 sync = HDR_LLL2ULL(lll);
1197 if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1224 sync->is_term = ((sync_status == SYNC_STAT_TERM) || (sync_status == SYNC_STAT_CONT_SCAN));
1237 rx_establ = (void *)sync->node_rx_sync_estab;
1238 rx_establ->hdr.handle = ull_sync_handle_get(sync);
1243 sync->node_rx_sync_estab = NULL;
1272 sync->lll_sync_prepare = lll_sync_prepare;
1287 struct ll_sync_set *sync; local
1290 sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
1293 if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1301 if (sync->is_term) {
1313 sync_ticker_cleanup(sync, NULL);
1326 lll = &sync->lll;
1339 lll->skip_event = sync->skip;
1342 sync->sync_expire = 0U;
1349 sync->timeout_expire = 0U;
1353 else if (sync->sync_expire) {
1354 if (sync->sync_expire > elapsed_event) {
1355 sync->sync_expire -= elapsed_event;
1357 sync_ticker_cleanup(sync, ticker_stop_sync_expire_op_cb);
1364 else if (!sync->timeout_expire) {
1365 sync->timeout_expire = sync->timeout_reload;
1371 if (sync->timeout_expire) {
1372 if (sync->timeout_expire > elapsed_event) {
1373 sync->timeout_expire -= elapsed_event;
1378 if (sync->timeout_expire <= 6U) {
1386 sync_ticker_cleanup(sync, ticker_stop_sync_lost_op_cb);
1402 uint16_t sync_handle = ull_sync_handle_get(sync);
1420 ticker_update_op_cb, sync);
1423 ((void *)sync == ull_disable_mark_get()));
1431 struct ll_sync_set *sync; local
1437 sync = ull_sync_set_get(sync_handle);
1438 LL_ASSERT(sync);
1439 lll = &sync->lll;
1508 int ull_sync_slot_update(struct ll_sync_set *sync, uint32_t slot_plus_us, argument
1518 ull_sync_handle_get(sync)),
1575 struct ll_sync_set *sync; local
1597 sync = sync_acquire();
1598 if (!sync) {
1606 sync->peer_addr_resolved = 0U;
1610 sync->node_rx_lost.rx.hdr.link = link_sync_lost;
1615 LL_ASSERT(!sync->node_rx_sync_estab);
1616 sync->node_rx_sync_estab = node_rx;
1619 sync->rx_enable = rx_enable;
1622 sync->nodups = nodups;
1624 sync->skip = skip;
1625 sync->is_stop = 0U;
1628 sync->enc = 0U;
1634 sync->timeout = timeout;
1637 sync->timeout_reload = 0U;
1638 sync->timeout_expire = 0U;
1641 sync->sid = sid;
1645 sync->iso.sync_iso = NULL;
1649 lll = &sync->lll;
1651 lll->is_rx_enabled = sync->rx_enable;
1666 ull_hdr_init(&sync->ull);
1667 lll_hdr_init(lll, sync);
1669 return sync;
1672 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb) argument
1674 uint16_t sync_handle = ull_sync_handle_get(sync);
1679 TICKER_ID_SCAN_SYNC_BASE + sync_handle, stop_op_cb, (void *)sync);
1684 sync->timeout_reload = 0U;
1695 struct ll_sync_set *sync = param; local
1702 lll = &sync->lll;
1705 lll->is_rx_enabled = sync->rx_enable;
1708 ref = ull_ref_inc(&sync->ull);
1718 mfy_lll_prepare.fp = sync->lll_sync_prepare;
1757 struct ll_sync_set *sync = param; local
1762 rx = (void *)sync->node_rx_sync_estab;
1768 sync->node_rx_sync_estab = NULL;
1806 struct ll_sync_set *sync; local
1810 sync = param;
1811 if (sync->lll_sync_prepare != lll_sync_prepare) {
1818 rx = (void *)&sync->node_rx_lost;
1819 rx->hdr.handle = ull_sync_handle_get(sync);
1821 rx->rx_ftr.param = sync;
1827 if (sync->iso.sync_iso) {
1831 sync_iso = sync->iso.sync_iso;
1848 struct ll_sync_set *sync; local
1850 sync = ull_sync_set_get(handle);
1851 if (!sync || !sync->timeout) {
1855 return sync;
1865 struct ll_sync_set *sync = sync_is_create_get(handle); local
1867 if (sync &&
1868 (sync->peer_id_addr_type == peer_id_addr_type) &&
1869 !memcmp(sync->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
1870 (sync->sid == sid)) {
1918 struct ll_sync_set *sync; local
1940 sync = ull_sync_create(sid, conn->past.timeout, conn->past.skip, conn->past.cte_type,
1942 if (!sync) {
1948 sync->lll.filter_policy = 0U;
1951 sync->peer_id_addr_type = addr_type;
1952 sync->peer_addr_resolved = addr_resolved;
1953 memcpy(sync->peer_id_addr, adv_addr, BDADDR_SIZE);
1954 sync->lll.phy = phy;
1961 ull_sync_setup_from_sync_transfer(conn, service_data, sync, si,