1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9
10 #include <zephyr/bluetooth/addr.h>
11 #include <zephyr/bluetooth/iso.h>
12
13 #include "util/util.h"
14 #include "util/memq.h"
15 #include "util/mayfly.h"
16 #include "util/dbuf.h"
17
18 #include "hal/ccm.h"
19 #include "hal/ticker.h"
20
21 #include "ticker/ticker.h"
22
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26
27 #include "lll.h"
28 #include "lll/lll_vendor.h"
29 #include "lll_clock.h"
30 #include "lll/lll_df_types.h"
31 #include "lll_conn.h"
32 #include "lll_conn_iso.h"
33 #include "lll_central_iso.h"
34
35 #include "isoal.h"
36
37 #include "ull_tx_queue.h"
38
39 #include "ull_conn_types.h"
40 #include "ull_iso_types.h"
41 #include "ull_conn_iso_types.h"
42
43 #include "ull_llcp.h"
44
45 #include "ull_internal.h"
46 #include "ull_sched_internal.h"
47 #include "ull_conn_internal.h"
48 #include "ull_conn_iso_internal.h"
49
50 #include "ll.h"
51 #include "ll_feat.h"
52
53 #include <zephyr/bluetooth/hci_types.h>
54
55 #include "hal/debug.h"
56
57 #define SDU_MAX_DRIFT_PPM 100
58 #define SUB_INTERVAL_MIN 400
59
60 #define STREAMS_PER_GROUP CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP
61
62 #if defined(CONFIG_BT_CTLR_PHY_CODED)
63 #define PHY_VALID_MASK (BT_HCI_ISO_PHY_VALID_MASK)
64 #else
65 #define PHY_VALID_MASK (BT_HCI_ISO_PHY_VALID_MASK & ~BIT(2))
66 #endif
67
68 /* CIS Create Procedure uses 3 PDU transmissions, and one connection interval to process the LLCP
69 * requested, hence minimum relative instant not be less than 4. I.e. the CIS_REQ PDU will be
70 * transmitted in the next ACL interval.
71 * The +1 also helps with the fact that currently we do not have Central implementation to handle
72 * event latencies at the instant. Refer to `ull_conn_iso_start()` implementation.
73 */
74 #define CIS_CREATE_INSTANT_DELTA_MIN 4U
75
76 #if (CONFIG_BT_CTLR_CENTRAL_SPACING == 0)
77 static void cig_offset_get(struct ll_conn_iso_stream *cis);
78 static void mfy_cig_offset_get(void *param);
79 static void cis_offset_get(struct ll_conn_iso_stream *cis);
80 static void mfy_cis_offset_get(void *param);
81 static void ticker_op_cb(uint32_t status, void *param);
82 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING == 0 */
83
84 static uint32_t iso_interval_adjusted_bn_max_pdu_get(bool framed, uint32_t iso_interval,
85 uint32_t iso_interval_cig,
86 uint32_t sdu_interval,
87 uint16_t max_sdu, uint8_t *bn,
88 uint8_t *max_pdu);
89 static uint8_t ll_cig_parameters_validate(void);
90 static uint8_t ll_cis_parameters_validate(uint8_t cis_idx, uint8_t cis_id,
91 uint16_t c_sdu, uint16_t p_sdu,
92 uint16_t c_phy, uint16_t p_phy);
93
94 #if defined(CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY)
95 static uint8_t ll_cis_calculate_ft(uint32_t cig_sync_delay, uint32_t iso_interval_us,
96 uint32_t sdu_interval, uint32_t latency, uint8_t framed);
97 #endif /* CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY */
98
99 /* Setup cache for CIG commit transaction */
100 static struct {
101 struct ll_conn_iso_group group;
102 uint8_t cis_count;
103 uint8_t c_ft;
104 uint8_t p_ft;
105 uint8_t cis_idx;
106 struct ll_conn_iso_stream stream[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
107 } ll_iso_setup;
108
ll_cig_parameters_open(uint8_t cig_id,uint32_t c_interval,uint32_t p_interval,uint8_t sca,uint8_t packing,uint8_t framing,uint16_t c_latency,uint16_t p_latency,uint8_t num_cis)109 uint8_t ll_cig_parameters_open(uint8_t cig_id,
110 uint32_t c_interval, uint32_t p_interval,
111 uint8_t sca, uint8_t packing, uint8_t framing,
112 uint16_t c_latency, uint16_t p_latency,
113 uint8_t num_cis)
114 {
115 memset(&ll_iso_setup, 0, sizeof(ll_iso_setup));
116
117 ll_iso_setup.group.cig_id = cig_id;
118 ll_iso_setup.group.c_sdu_interval = c_interval;
119 ll_iso_setup.group.p_sdu_interval = p_interval;
120 ll_iso_setup.group.c_latency = c_latency * USEC_PER_MSEC;
121 ll_iso_setup.group.p_latency = p_latency * USEC_PER_MSEC;
122 ll_iso_setup.group.central.sca = sca;
123 ll_iso_setup.group.central.packing = packing;
124 ll_iso_setup.group.central.framing = framing;
125 ll_iso_setup.cis_count = num_cis;
126
127 return ll_cig_parameters_validate();
128 }
129
ll_cis_parameters_set(uint8_t cis_id,uint16_t c_sdu,uint16_t p_sdu,uint8_t c_phy,uint8_t p_phy,uint8_t c_rtn,uint8_t p_rtn)130 uint8_t ll_cis_parameters_set(uint8_t cis_id,
131 uint16_t c_sdu, uint16_t p_sdu,
132 uint8_t c_phy, uint8_t p_phy,
133 uint8_t c_rtn, uint8_t p_rtn)
134 {
135 uint8_t cis_idx = ll_iso_setup.cis_idx;
136 uint8_t status;
137
138 status = ll_cis_parameters_validate(cis_idx, cis_id, c_sdu, p_sdu, c_phy, p_phy);
139 if (status) {
140 return status;
141 }
142
143 memset(&ll_iso_setup.stream[cis_idx], 0, sizeof(struct ll_conn_iso_stream));
144
145 ll_iso_setup.stream[cis_idx].cis_id = cis_id;
146 ll_iso_setup.stream[cis_idx].c_max_sdu = c_sdu;
147 ll_iso_setup.stream[cis_idx].p_max_sdu = p_sdu;
148 ll_iso_setup.stream[cis_idx].lll.tx.phy = c_phy;
149 ll_iso_setup.stream[cis_idx].lll.tx.phy_flags = PHY_FLAGS_S8;
150 ll_iso_setup.stream[cis_idx].lll.rx.phy = p_phy;
151 ll_iso_setup.stream[cis_idx].lll.rx.phy_flags = PHY_FLAGS_S8;
152 ll_iso_setup.stream[cis_idx].central.c_rtn = c_rtn;
153 ll_iso_setup.stream[cis_idx].central.p_rtn = p_rtn;
154 ll_iso_setup.cis_idx++;
155
156 return BT_HCI_ERR_SUCCESS;
157 }
158
159 /* TODO:
160 * - Calculate ISO_Interval to allow SDU_Interval < ISO_Interval
161 */
ll_cig_parameters_commit(uint8_t cig_id,uint16_t * handles)162 uint8_t ll_cig_parameters_commit(uint8_t cig_id, uint16_t *handles)
163 {
164 uint16_t cis_created_handles[STREAMS_PER_GROUP];
165 struct ll_conn_iso_stream *cis;
166 struct ll_conn_iso_group *cig;
167 uint32_t iso_interval_cig_us;
168 uint32_t iso_interval_us;
169 uint32_t cig_sync_delay;
170 uint32_t max_se_length;
171 uint32_t c_max_latency;
172 uint32_t p_max_latency;
173 uint16_t handle_iter;
174 uint32_t total_time;
175 bool force_framed;
176 bool cig_created;
177 uint8_t num_cis;
178 uint8_t err;
179
180 /* Intermediate subevent data */
181 struct {
182 uint32_t length;
183 uint8_t total_count;
184 } se[STREAMS_PER_GROUP];
185
186 for (uint8_t i = 0U; i < STREAMS_PER_GROUP; i++) {
187 cis_created_handles[i] = LLL_HANDLE_INVALID;
188 };
189
190 cig_created = false;
191
192 /* If CIG already exists, this is a reconfigure */
193 cig = ll_conn_iso_group_get_by_id(cig_id);
194 if (!cig) {
195 /* CIG does not exist - create it */
196 cig = ll_conn_iso_group_acquire();
197 if (!cig) {
198 ll_iso_setup.cis_idx = 0U;
199
200 /* No space for new CIG */
201 return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
202 }
203 cig->lll.num_cis = 0U;
204 cig_created = true;
205
206 } else if (cig->state != CIG_STATE_CONFIGURABLE) {
207 /* CIG is not in configurable state */
208 return BT_HCI_ERR_CMD_DISALLOWED;
209 }
210
211 /* Store currently configured number of CISes before cache transfer */
212 num_cis = cig->lll.num_cis;
213
214 /* Transfer parameters from configuration cache and clear LLL fields */
215 memcpy(cig, &ll_iso_setup.group, sizeof(struct ll_conn_iso_group));
216
217 cig->state = CIG_STATE_CONFIGURABLE;
218
219 /* Setup LLL parameters */
220 cig->lll.handle = ll_conn_iso_group_handle_get(cig);
221 cig->lll.role = BT_HCI_ROLE_CENTRAL;
222 cig->lll.resume_cis = LLL_HANDLE_INVALID;
223 cig->lll.num_cis = num_cis;
224 force_framed = false;
225
226 if (!cig->central.test) {
227 /* TODO: Calculate ISO_Interval based on SDU_Interval and Max_SDU vs Max_PDU,
228 * taking the policy into consideration. It may also be interesting to select an
229 * ISO_Interval which is less likely to collide with other connections.
230 * For instance:
231 *
232 * SDU_Interval ISO_Interval Max_SDU Max_SDU Collision risk (10 ms)
233 * ------------------------------------------------------------------------
234 * 10 ms 10 ms 40 40 100%
235 * 10 ms 12.5 ms 40 50 25%
236 */
237
238 /* Set ISO_Interval to the closest lower value of SDU_Interval to be able to
239 * handle the throughput. For unframed these must be divisible, if they're not,
240 * framed mode must be forced.
241 */
242 iso_interval_us = (cig->c_sdu_interval / ISO_INT_UNIT_US) * ISO_INT_UNIT_US;
243
244 if (iso_interval_us < ISO_INTERVAL_TO_US(BT_HCI_ISO_INTERVAL_MIN)) {
245 /* ISO_Interval is below minimum (5 ms) */
246 iso_interval_us = ISO_INTERVAL_TO_US(BT_HCI_ISO_INTERVAL_MIN);
247 }
248
249 #if defined(CONFIG_BT_CTLR_CONN_ISO_AVOID_SEGMENTATION)
250 /* Check if this is a HAP usecase which requires higher link bandwidth to ensure
251 * segmentation is not invoked in ISO-AL.
252 */
253 if (cig->central.framing && cig->c_sdu_interval == 10000U) {
254 iso_interval_us = 7500U; /* us */
255 }
256 #endif
257
258 if (!cig->central.framing && (cig->c_sdu_interval % ISO_INT_UNIT_US)) {
259 /* Framing not requested but requirement for unframed is not met. Force
260 * CIG into framed mode.
261 */
262 force_framed = true;
263 }
264 } else {
265 iso_interval_us = cig->iso_interval * ISO_INT_UNIT_US;
266 }
267
268 iso_interval_cig_us = iso_interval_us;
269
270 lll_hdr_init(&cig->lll, cig);
271 max_se_length = 0U;
272
273 /* Create all configurable CISes */
274 for (uint8_t i = 0U; i < ll_iso_setup.cis_count; i++) {
275 memq_link_t *link_tx_free;
276 memq_link_t link_tx;
277
278 cis = ll_conn_iso_stream_get_by_id(ll_iso_setup.stream[i].cis_id);
279 if (cis) {
280 /* Check if Max_SDU reconfigure violates datapath by changing
281 * non-zero Max_SDU with associated datapath, to zero.
282 */
283 if ((cis->c_max_sdu && cis->hdr.datapath_in &&
284 !ll_iso_setup.stream[i].c_max_sdu) ||
285 (cis->p_max_sdu && cis->hdr.datapath_out &&
286 !ll_iso_setup.stream[i].p_max_sdu)) {
287 /* Reconfiguring CIS with datapath to wrong direction is
288 * not allowed.
289 */
290 err = BT_HCI_ERR_CMD_DISALLOWED;
291 goto ll_cig_parameters_commit_cleanup;
292 }
293 } else {
294 /* Acquire new CIS */
295 cis = ll_conn_iso_stream_acquire();
296 if (!cis) {
297 /* No space for new CIS */
298 ll_iso_setup.cis_idx = 0U;
299
300 err = BT_HCI_ERR_CONN_LIMIT_EXCEEDED;
301 goto ll_cig_parameters_commit_cleanup;
302 }
303
304 cis_created_handles[i] = ll_conn_iso_stream_handle_get(cis);
305 cig->lll.num_cis++;
306 }
307
308 /* Store TX link and free link before transfer */
309 link_tx_free = cis->lll.link_tx_free;
310 link_tx = cis->lll.link_tx;
311
312 /* Transfer parameters from configuration cache */
313 memcpy(cis, &ll_iso_setup.stream[i], sizeof(struct ll_conn_iso_stream));
314
315 cis->group = cig;
316 cis->framed = cig->central.framing || force_framed;
317
318 cis->lll.link_tx_free = link_tx_free;
319 cis->lll.link_tx = link_tx;
320 cis->lll.handle = ll_conn_iso_stream_handle_get(cis);
321 handles[i] = cis->lll.handle;
322 }
323
324 num_cis = cig->lll.num_cis;
325
326 ll_cig_parameters_commit_retry:
327 handle_iter = UINT16_MAX;
328
329 /* 1) Acquire CIS instances and initialize instance data.
330 * 2) Calculate SE_Length for each CIS and store the largest
331 * 3) Calculate BN
332 * 4) Calculate total number of subevents needed to transfer payloads
333 *
334 * Sequential Interleaved
335 * CIS0 ___█_█_█_____________█_ ___█___█___█_________█_
336 * CIS1 _________█_█_█_________ _____█___█___█_________
337 * CIS_Sub_Interval |.| |...|
338 * CIG_Sync_Delay |............| |............|
339 * CIS_Sync_Delay 0 |............| |............|
340 * CIS_Sync_Delay 1 |......| |..........|
341 * ISO_Interval |.................|.. |.................|..
342 */
343 for (uint8_t i = 0U; i < num_cis; i++) {
344 uint32_t mpt_c;
345 uint32_t mpt_p;
346 bool tx;
347 bool rx;
348
349 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
350
351 if (cig->central.test) {
352 cis->lll.tx.ft = ll_iso_setup.c_ft;
353 cis->lll.rx.ft = ll_iso_setup.p_ft;
354
355 tx = cis->lll.tx.bn && cis->lll.tx.max_pdu;
356 rx = cis->lll.rx.bn && cis->lll.rx.max_pdu;
357 } else {
358 LL_ASSERT(cis->framed || iso_interval_us >= cig->c_sdu_interval);
359
360 tx = cig->c_sdu_interval && cis->c_max_sdu;
361 rx = cig->p_sdu_interval && cis->p_max_sdu;
362
363 /* Use Max_PDU = MIN(<buffer_size>, Max_SDU) as default.
364 * May be changed by set_bn_max_pdu.
365 */
366 cis->lll.tx.max_pdu = MIN(LL_CIS_OCTETS_TX_MAX,
367 cis->c_max_sdu);
368 cis->lll.rx.max_pdu = MIN(LL_CIS_OCTETS_RX_MAX,
369 cis->p_max_sdu);
370
371 /* Calculate BN and Max_PDU (framed) for both
372 * directions
373 */
374 if (tx) {
375 uint32_t iso_interval_adjust_us;
376 uint8_t max_pdu;
377 uint8_t bn;
378
379 bn = cis->lll.tx.bn;
380 max_pdu = cis->lll.tx.max_pdu;
381 iso_interval_adjust_us =
382 iso_interval_adjusted_bn_max_pdu_get(cis->framed,
383 iso_interval_us, iso_interval_cig_us,
384 cig->c_sdu_interval, cis->c_max_sdu, &bn, &max_pdu);
385 if (iso_interval_adjust_us != iso_interval_us) {
386 iso_interval_us = iso_interval_adjust_us;
387
388 goto ll_cig_parameters_commit_retry;
389 }
390 cis->lll.tx.bn = bn;
391 cis->lll.tx.max_pdu = max_pdu;
392 } else {
393 cis->lll.tx.bn = 0U;
394 }
395
396 if (rx) {
397 uint32_t iso_interval_adjust_us;
398 uint8_t max_pdu;
399 uint8_t bn;
400
401 bn = cis->lll.rx.bn;
402 max_pdu = cis->lll.rx.max_pdu;
403 iso_interval_adjust_us =
404 iso_interval_adjusted_bn_max_pdu_get(cis->framed,
405 iso_interval_us, iso_interval_cig_us,
406 cig->p_sdu_interval, cis->p_max_sdu, &bn, &max_pdu);
407 if (iso_interval_adjust_us != iso_interval_us) {
408 iso_interval_us = iso_interval_adjust_us;
409
410 goto ll_cig_parameters_commit_retry;
411 }
412 cis->lll.rx.bn = bn;
413 cis->lll.rx.max_pdu = max_pdu;
414 } else {
415 cis->lll.rx.bn = 0U;
416 }
417 }
418
419 /* Calculate SE_Length */
420 mpt_c = PDU_CIS_MAX_US(cis->lll.tx.max_pdu, tx, cis->lll.tx.phy);
421 mpt_p = PDU_CIS_MAX_US(cis->lll.rx.max_pdu, rx, cis->lll.rx.phy);
422
423 se[i].length = mpt_c + EVENT_IFS_US + mpt_p + EVENT_MSS_US;
424 max_se_length = MAX(max_se_length, se[i].length);
425
426 /* Total number of subevents needed */
427 se[i].total_count = MAX((cis->central.c_rtn + 1) * cis->lll.tx.bn,
428 (cis->central.p_rtn + 1) * cis->lll.rx.bn);
429 }
430
431 cig->lll.iso_interval_us = iso_interval_us;
432 cig->iso_interval = iso_interval_us / ISO_INT_UNIT_US;
433
434 handle_iter = UINT16_MAX;
435 total_time = 0U;
436
437 /* 1) Prepare calculation of the flush timeout by adding up the total time needed to
438 * transfer all payloads, including retransmissions.
439 */
440 if (cig->central.packing == BT_ISO_PACKING_SEQUENTIAL) {
441 /* Sequential CISes - add up the total duration */
442 for (uint8_t i = 0U; i < num_cis; i++) {
443 total_time += se[i].total_count * se[i].length;
444 }
445 }
446
447 handle_iter = UINT16_MAX;
448 cig_sync_delay = 0U;
449
450 /* 1) Calculate the flush timeout either by dividing the total time needed to transfer all,
451 * payloads including retransmissions, and divide by the ISO_Interval (low latency
452 * policy), or by dividing the Max_Transmission_Latency by the ISO_Interval (reliability
453 * policy).
454 * 2) Calculate the number of subevents (NSE) by distributing total number of subevents into
455 * FT ISO_intervals.
456 * 3) Calculate subinterval as either individual CIS subinterval (sequential), or the
457 * largest SE_Length times number of CISes (interleaved). Min. subinterval is 400 us.
458 * 4) Calculate CIG_Sync_Delay
459 */
460 for (uint8_t i = 0U; i < num_cis; i++) {
461 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
462
463 if (!cig->central.test) {
464 #if defined(CONFIG_BT_CTLR_CONN_ISO_LOW_LATENCY_POLICY)
465 /* TODO: Only implemented for sequential packing */
466 LL_ASSERT(cig->central.packing == BT_ISO_PACKING_SEQUENTIAL);
467
468 /* Use symmetric flush timeout */
469 cis->lll.tx.ft = DIV_ROUND_UP(total_time, iso_interval_us);
470 cis->lll.rx.ft = cis->lll.tx.ft;
471
472 #elif defined(CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY)
473 /* Utilize Max_Transport_latency */
474
475 /*
476 * Set CIG_Sync_Delay = ISO_Interval as largest possible CIG_Sync_Delay.
477 * This favors utilizing as much as possible of the Max_Transport_latency,
478 * and spreads out payloads over multiple CIS events (if necessary).
479 */
480 uint32_t cig_sync_delay_us_max = iso_interval_us;
481
482 cis->lll.tx.ft = ll_cis_calculate_ft(cig_sync_delay_us_max, iso_interval_us,
483 cig->c_sdu_interval, cig->c_latency,
484 cis->framed);
485
486 cis->lll.rx.ft = ll_cis_calculate_ft(cig_sync_delay_us_max, iso_interval_us,
487 cig->p_sdu_interval, cig->p_latency,
488 cis->framed);
489
490 if ((cis->lll.tx.ft == 0U) || (cis->lll.rx.ft == 0U)) {
491 /* Invalid FT caused by invalid combination of parameters */
492 err = BT_HCI_ERR_INVALID_PARAM;
493 goto ll_cig_parameters_commit_cleanup;
494 }
495
496 #else
497 LL_ASSERT(0);
498 #endif
499 cis->lll.nse = DIV_ROUND_UP(se[i].total_count, cis->lll.tx.ft);
500 }
501
502 if (cig->central.packing == BT_ISO_PACKING_SEQUENTIAL) {
503 /* Accumulate CIG sync delay for sequential CISes */
504 cis->lll.sub_interval = MAX(SUB_INTERVAL_MIN, se[i].length);
505 cig_sync_delay += cis->lll.nse * cis->lll.sub_interval;
506 } else {
507 /* For interleaved CISes, offset each CIS by a fraction of a subinterval,
508 * positioning them evenly within the subinterval.
509 */
510 cis->lll.sub_interval = MAX(SUB_INTERVAL_MIN, num_cis * max_se_length);
511 cig_sync_delay = MAX(cig_sync_delay,
512 (cis->lll.nse * cis->lll.sub_interval) +
513 (i * cis->lll.sub_interval / num_cis));
514 }
515 }
516
517 cig->sync_delay = cig_sync_delay;
518
519 handle_iter = UINT16_MAX;
520 c_max_latency = 0U;
521 p_max_latency = 0U;
522
523 /* 1) Calculate transport latencies for each CIS and validate against Max_Transport_Latency.
524 * 2) Lay out CISes by updating CIS_Sync_Delay, distributing according to the packing.
525 */
526 for (uint8_t i = 0U; i < num_cis; i++) {
527 uint32_t c_latency;
528 uint32_t p_latency;
529
530 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
531
532 if (cis->framed) {
533 /* Transport_Latency = CIG_Sync_Delay + FT x ISO_Interval + SDU_Interval */
534 c_latency = cig->sync_delay +
535 (cis->lll.tx.ft * iso_interval_us) +
536 cig->c_sdu_interval;
537 p_latency = cig->sync_delay +
538 (cis->lll.rx.ft * iso_interval_us) +
539 cig->p_sdu_interval;
540
541 } else {
542 /* Transport_Latency = CIG_Sync_Delay + FT x ISO_Interval - SDU_Interval */
543 c_latency = cig->sync_delay +
544 (cis->lll.tx.ft * iso_interval_us) -
545 cig->c_sdu_interval;
546 p_latency = cig->sync_delay +
547 (cis->lll.rx.ft * iso_interval_us) -
548 cig->p_sdu_interval;
549 }
550
551 if (!cig->central.test) {
552 /* Make sure specified Max_Transport_Latency is not exceeded */
553 if ((c_latency > cig->c_latency) || (p_latency > cig->p_latency)) {
554 /* Check if we can reduce RTN to meet requested latency */
555 if (!cis->central.c_rtn && !cis->central.p_rtn) {
556 /* Actual latency exceeds the Max. Transport Latency */
557 err = BT_HCI_ERR_INVALID_PARAM;
558
559 /* Release allocated resources and exit */
560 goto ll_cig_parameters_commit_cleanup;
561 }
562
563 /* Reduce the RTN to meet host requested latency.
564 * NOTE: Both central and peripheral retransmission is reduced for
565 * simplicity.
566 */
567 if (cis->central.c_rtn) {
568 cis->central.c_rtn--;
569 }
570 if (cis->central.p_rtn) {
571 cis->central.p_rtn--;
572 }
573
574 goto ll_cig_parameters_commit_retry;
575 }
576 }
577
578 c_max_latency = MAX(c_max_latency, c_latency);
579 p_max_latency = MAX(p_max_latency, p_latency);
580
581 if (cig->central.packing == BT_ISO_PACKING_SEQUENTIAL) {
582 /* Distribute CISes sequentially */
583 cis->sync_delay = cig_sync_delay;
584 cig_sync_delay -= cis->lll.nse * cis->lll.sub_interval;
585 } else {
586 /* Distribute CISes interleaved */
587 cis->sync_delay = cig_sync_delay;
588 cig_sync_delay -= (cis->lll.sub_interval / num_cis);
589 }
590
591 if (cis->lll.nse <= 1) {
592 cis->lll.sub_interval = 0U;
593 }
594 }
595
596 /* Update actual latency */
597 cig->c_latency = c_max_latency;
598 cig->p_latency = p_max_latency;
599
600 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
601 uint32_t slot_us;
602
603 /* CIG sync_delay has been calculated considering the configured
604 * packing.
605 */
606 slot_us = cig->sync_delay;
607
608 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
609
610 /* Populate the ULL hdr with event timings overheads */
611 cig->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
612 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
613
614 /* Reset params cache */
615 ll_iso_setup.cis_idx = 0U;
616
617 return BT_HCI_ERR_SUCCESS;
618
619 ll_cig_parameters_commit_cleanup:
620 /* Late configuration failure - clean up */
621 for (uint8_t i = 0U; i < ll_iso_setup.cis_count; i++) {
622 if (cis_created_handles[i] != LLL_HANDLE_INVALID) {
623 /* Release CIS instance created in failing configuration */
624 cis = ll_conn_iso_stream_get(cis_created_handles[i]);
625 ll_conn_iso_stream_release(cis);
626 } else {
627 break;
628 }
629 }
630
631 /* If CIG was created in this failed configuration - release it */
632 if (cig_created) {
633 ll_conn_iso_group_release(cig);
634 }
635
636 return err;
637 }
638
ll_cig_parameters_test_open(uint8_t cig_id,uint32_t c_interval,uint32_t p_interval,uint8_t c_ft,uint8_t p_ft,uint16_t iso_interval,uint8_t sca,uint8_t packing,uint8_t framing,uint8_t num_cis)639 uint8_t ll_cig_parameters_test_open(uint8_t cig_id, uint32_t c_interval,
640 uint32_t p_interval, uint8_t c_ft,
641 uint8_t p_ft, uint16_t iso_interval,
642 uint8_t sca, uint8_t packing,
643 uint8_t framing, uint8_t num_cis)
644 {
645 memset(&ll_iso_setup, 0, sizeof(ll_iso_setup));
646
647 ll_iso_setup.group.cig_id = cig_id;
648 ll_iso_setup.group.c_sdu_interval = c_interval;
649 ll_iso_setup.group.p_sdu_interval = p_interval;
650 ll_iso_setup.group.iso_interval = iso_interval;
651 ll_iso_setup.group.central.sca = sca;
652 ll_iso_setup.group.central.packing = packing;
653 ll_iso_setup.group.central.framing = framing;
654 ll_iso_setup.group.central.test = 1U;
655 ll_iso_setup.cis_count = num_cis;
656
657 /* TODO: Perhaps move FT to LLL CIG */
658 ll_iso_setup.c_ft = c_ft;
659 ll_iso_setup.p_ft = p_ft;
660
661 return ll_cig_parameters_validate();
662 }
663
ll_cis_parameters_test_set(uint8_t cis_id,uint8_t nse,uint16_t c_sdu,uint16_t p_sdu,uint16_t c_pdu,uint16_t p_pdu,uint8_t c_phy,uint8_t p_phy,uint8_t c_bn,uint8_t p_bn)664 uint8_t ll_cis_parameters_test_set(uint8_t cis_id, uint8_t nse,
665 uint16_t c_sdu, uint16_t p_sdu,
666 uint16_t c_pdu, uint16_t p_pdu,
667 uint8_t c_phy, uint8_t p_phy,
668 uint8_t c_bn, uint8_t p_bn)
669 {
670 uint8_t cis_idx = ll_iso_setup.cis_idx;
671 uint8_t status;
672
673 status = ll_cis_parameters_validate(cis_idx, cis_id, c_sdu, p_sdu, c_phy, p_phy);
674 if (status) {
675 return status;
676 }
677
678 memset(&ll_iso_setup.stream[cis_idx], 0, sizeof(struct ll_conn_iso_stream));
679
680 ll_iso_setup.stream[cis_idx].cis_id = cis_id;
681 ll_iso_setup.stream[cis_idx].c_max_sdu = c_sdu;
682 ll_iso_setup.stream[cis_idx].p_max_sdu = p_sdu;
683 ll_iso_setup.stream[cis_idx].lll.nse = nse;
684 ll_iso_setup.stream[cis_idx].lll.tx.max_pdu = c_bn ? c_pdu : 0U;
685 ll_iso_setup.stream[cis_idx].lll.rx.max_pdu = p_bn ? p_pdu : 0U;
686 ll_iso_setup.stream[cis_idx].lll.tx.phy = c_phy;
687 ll_iso_setup.stream[cis_idx].lll.tx.phy_flags = PHY_FLAGS_S8;
688 ll_iso_setup.stream[cis_idx].lll.rx.phy = p_phy;
689 ll_iso_setup.stream[cis_idx].lll.rx.phy_flags = PHY_FLAGS_S8;
690 ll_iso_setup.stream[cis_idx].lll.tx.bn = c_bn;
691 ll_iso_setup.stream[cis_idx].lll.rx.bn = p_bn;
692 ll_iso_setup.cis_idx++;
693
694 return BT_HCI_ERR_SUCCESS;
695 }
696
ll_cis_create_check(uint16_t cis_handle,uint16_t acl_handle)697 uint8_t ll_cis_create_check(uint16_t cis_handle, uint16_t acl_handle)
698 {
699 struct ll_conn *conn;
700
701 conn = ll_connected_get(acl_handle);
702 if (conn) {
703 struct ll_conn_iso_stream *cis;
704
705 /* Verify conn refers to a device acting as central */
706 if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
707 return BT_HCI_ERR_CMD_DISALLOWED;
708 }
709
710 /* Verify handle validity and association */
711 cis = ll_conn_iso_stream_get(cis_handle);
712
713 if (cis->group && (cis->lll.handle == cis_handle)) {
714 if (cis->established) {
715 /* CIS is already created */
716 return BT_HCI_ERR_CONN_ALREADY_EXISTS;
717 }
718
719 return BT_HCI_ERR_SUCCESS;
720 }
721 }
722
723 return BT_HCI_ERR_UNKNOWN_CONN_ID;
724 }
725
ll_cis_create(uint16_t cis_handle,uint16_t acl_handle)726 void ll_cis_create(uint16_t cis_handle, uint16_t acl_handle)
727 {
728 struct ll_conn_iso_stream *cis;
729 struct ll_conn *conn;
730 int err;
731
732 /* Handles have been verified prior to calling this function */
733 conn = ll_connected_get(acl_handle);
734 cis = ll_conn_iso_stream_get(cis_handle);
735 cis->lll.acl_handle = acl_handle;
736
737 /* Create access address */
738 err = util_aa_le32(cis->lll.access_addr);
739 LL_ASSERT(!err);
740
741 /* Initialize stream states */
742 cis->established = 0;
743 cis->teardown = 0;
744
745 (void)memset(&cis->hdr, 0U, sizeof(cis->hdr));
746
747 /* Initialize TX link */
748 if (!cis->lll.link_tx_free) {
749 cis->lll.link_tx_free = &cis->lll.link_tx;
750 }
751
752 memq_init(cis->lll.link_tx_free, &cis->lll.memq_tx.head, &cis->lll.memq_tx.tail);
753 cis->lll.link_tx_free = NULL;
754
755 /* Initiate CIS Request Control Procedure */
756 if (ull_cp_cis_create(conn, cis) == BT_HCI_ERR_SUCCESS) {
757 LL_ASSERT(cis->group);
758
759 if (cis->group->state == CIG_STATE_CONFIGURABLE) {
760 /* This CIG is now initiating an ISO connection */
761 cis->group->state = CIG_STATE_INITIATING;
762 }
763 }
764 }
765
766 /* Core 5.3 Vol 6, Part B section 7.8.100:
767 * The HCI_LE_Remove_CIG command is used by the Central’s Host to remove the CIG
768 * identified by CIG_ID.
769 * This command shall delete the CIG_ID and also delete the Connection_Handles
770 * of the CIS configurations stored in the CIG.
771 * This command shall also remove the isochronous data paths that are associated
772 * with the Connection_Handles of the CIS configurations.
773 */
ll_cig_remove(uint8_t cig_id)774 uint8_t ll_cig_remove(uint8_t cig_id)
775 {
776 struct ll_conn_iso_stream *cis;
777 struct ll_conn_iso_group *cig;
778 uint16_t handle_iter;
779
780 cig = ll_conn_iso_group_get_by_id(cig_id);
781 if (!cig) {
782 /* Unknown CIG id */
783 return BT_HCI_ERR_UNKNOWN_CONN_ID;
784 }
785
786 if ((cig->state == CIG_STATE_INITIATING) || (cig->state == CIG_STATE_ACTIVE)) {
787 /* CIG is in initiating- or active state */
788 return BT_HCI_ERR_CMD_DISALLOWED;
789 }
790
791 handle_iter = UINT16_MAX;
792 for (uint8_t i = 0U; i < cig->lll.num_cis; i++) {
793 struct ll_conn *conn;
794
795 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
796 if (!cis) {
797 break;
798 }
799
800 conn = ll_connected_get(cis->lll.acl_handle);
801
802 if (conn) {
803 if (ull_lp_cc_is_active(conn)) {
804 /* CIG creation is ongoing */
805 return BT_HCI_ERR_CMD_DISALLOWED;
806 }
807 }
808 }
809
810 /* CIG exists and is not active */
811 handle_iter = UINT16_MAX;
812
813 for (uint8_t i = 0U; i < cig->lll.num_cis; i++) {
814 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
815 if (cis) {
816 /* Release CIS instance */
817 ll_conn_iso_stream_release(cis);
818 }
819 }
820
821 /* Release the CIG instance */
822 ll_conn_iso_group_release(cig);
823
824 return BT_HCI_ERR_SUCCESS;
825 }
826
ull_central_iso_init(void)827 int ull_central_iso_init(void)
828 {
829 return 0;
830 }
831
ull_central_iso_reset(void)832 int ull_central_iso_reset(void)
833 {
834 return 0;
835 }
836
ull_central_iso_setup(uint16_t cis_handle,uint32_t * cig_sync_delay,uint32_t * cis_sync_delay,uint32_t * cis_offset_min,uint32_t * cis_offset_max,uint16_t * conn_event_count,uint8_t * access_addr)837 uint8_t ull_central_iso_setup(uint16_t cis_handle,
838 uint32_t *cig_sync_delay,
839 uint32_t *cis_sync_delay,
840 uint32_t *cis_offset_min,
841 uint32_t *cis_offset_max,
842 uint16_t *conn_event_count,
843 uint8_t *access_addr)
844 {
845 struct ll_conn_iso_stream *cis;
846 struct ll_conn_iso_group *cig;
847 struct ll_conn *conn;
848 uint16_t instant;
849
850 cis = ll_conn_iso_stream_get(cis_handle);
851 if (!cis) {
852 return BT_HCI_ERR_UNSPECIFIED;
853 }
854
855 cig = cis->group;
856 if (!cig) {
857 return BT_HCI_ERR_UNSPECIFIED;
858 }
859
860 /* ACL connection of the new CIS */
861 conn = ll_conn_get(cis->lll.acl_handle);
862 LL_ASSERT(conn != NULL);
863
864 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
865 uint16_t event_counter;
866 uint32_t cis_offset;
867
868 event_counter = ull_conn_event_counter(conn);
869 instant = MAX(*conn_event_count, event_counter + 1);
870
871 cis_offset = *cis_offset_min;
872
873 /* Calculate offset for CIS */
874 if (cig->state == CIG_STATE_ACTIVE) {
875 uint32_t time_of_intant;
876 uint32_t cig_ref_point;
877
878 /* CIG is started. Use the CIG reference point and latest ticks_at_expire
879 * for associated ACL, to calculate the offset.
880 * NOTE: The following calculations are done in a 32-bit time
881 * range with full consideration and expectation that the
882 * controller clock does not support the full 32-bit range in
883 * microseconds. However it is valid as the purpose is to
884 * calculate the difference and the spare higher order bits will
885 * ensure that no wrapping can occur before the termination
886 * condition of the while loop is met. Using time wrapping will
887 * complicate this.
888 */
889 time_of_intant = HAL_TICKER_TICKS_TO_US(conn->llcp.prep.ticks_at_expire) +
890 EVENT_OVERHEAD_START_US +
891 ((instant - event_counter) * conn->lll.interval * CONN_INT_UNIT_US);
892
893 cig_ref_point = cig->cig_ref_point;
894 while (cig_ref_point < time_of_intant) {
895 cig_ref_point += cig->iso_interval * ISO_INT_UNIT_US;
896 }
897
898 cis_offset = (cig_ref_point - time_of_intant) +
899 (cig->sync_delay - cis->sync_delay);
900
901 /* We have to narrow down the min/max offset to the calculated value */
902 *cis_offset_min = cis_offset;
903 *cis_offset_max = cis_offset;
904 }
905
906 cis->offset = cis_offset;
907
908 #else /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
909
910 instant = *conn_event_count;
911
912 if (false) {
913
914 #if defined(CONFIG_BT_CTLR_CENTRAL_SPACING)
915 } else if (CONFIG_BT_CTLR_CENTRAL_SPACING > 0) {
916 uint32_t cis_offset;
917
918 cis_offset = HAL_TICKER_TICKS_TO_US(conn->ull.ticks_slot) +
919 (EVENT_TICKER_RES_MARGIN_US << 1U);
920
921 cis_offset += cig->sync_delay - cis->sync_delay;
922
923 if (cis_offset < *cis_offset_min) {
924 cis_offset = *cis_offset_min;
925 }
926
927 cis->offset = cis_offset;
928 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING */
929
930 } else {
931 cis->offset = *cis_offset_min;
932 }
933
934 cis->lll.prepared = 0U;
935 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
936
937 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
938 cis->pkt_seq_num = 0U;
939 #endif /* CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
940
941 /* It is intentional to initialize to the 39 bit maximum value and rollover to 0 in the
942 * prepare function, the event counter is pre-incremented in prepare function for the
943 * current ISO event.
944 */
945 cis->lll.event_count_prepare = LLL_CONN_ISO_EVENT_COUNT_MAX;
946 cis->lll.event_count = LLL_CONN_ISO_EVENT_COUNT_MAX;
947 cis->lll.next_subevent = 0U;
948 cis->lll.tifs_us = conn->lll.tifs_cis_us;
949 cis->lll.sn = 0U;
950 cis->lll.nesn = 0U;
951 cis->lll.cie = 0U;
952 cis->lll.npi = 0U;
953 cis->lll.flush = LLL_CIS_FLUSH_NONE;
954 cis->lll.active = 0U;
955 cis->lll.datapath_ready_rx = 0U;
956 cis->lll.tx.payload_count = 0U;
957 cis->lll.rx.payload_count = 0U;
958
959 cis->lll.tx.bn_curr = 1U;
960 cis->lll.rx.bn_curr = 1U;
961
962 /* Transfer to caller */
963 *cig_sync_delay = cig->sync_delay;
964 *cis_sync_delay = cis->sync_delay;
965 *cis_offset_min = cis->offset;
966 memcpy(access_addr, cis->lll.access_addr, sizeof(cis->lll.access_addr));
967
968 *conn_event_count = instant;
969
970 return 0U;
971 }
972
ull_central_iso_cis_offset_get(uint16_t cis_handle,uint32_t * cis_offset_min,uint32_t * cis_offset_max,uint16_t * conn_event_count)973 int ull_central_iso_cis_offset_get(uint16_t cis_handle,
974 uint32_t *cis_offset_min,
975 uint32_t *cis_offset_max,
976 uint16_t *conn_event_count)
977 {
978 struct ll_conn_iso_stream *cis;
979 struct ll_conn_iso_group *cig;
980 struct ll_conn *conn;
981
982 cis = ll_conn_iso_stream_get(cis_handle);
983 LL_ASSERT(cis);
984
985 conn = ll_conn_get(cis->lll.acl_handle);
986 LL_ASSERT(conn != NULL);
987
988 /* `ull_conn_llcp()` (caller of this function) is called before `ull_ref_inc()` hence we do
989 * not need to use `ull_conn_event_counter()`.
990 */
991 *conn_event_count = conn->lll.event_counter + conn->lll.latency_prepare +
992 conn->llcp.prep.lazy + CIS_CREATE_INSTANT_DELTA_MIN;
993
994 /* Provide CIS offset range
995 * CIS_Offset_Max < (connInterval - (CIG_Sync_Delay + T_MSS))
996 */
997 cig = cis->group;
998 *cis_offset_max = (conn->lll.interval * CONN_INT_UNIT_US) -
999 cig->sync_delay;
1000
1001 if (IS_ENABLED(CONFIG_BT_CTLR_JIT_SCHEDULING)) {
1002 *cis_offset_min = MAX(CIS_MIN_OFFSET_MIN, EVENT_OVERHEAD_CIS_SETUP_US);
1003 return 0;
1004 }
1005
1006 #if (CONFIG_BT_CTLR_CENTRAL_SPACING == 0)
1007 if (cig->state == CIG_STATE_ACTIVE) {
1008 cis_offset_get(cis);
1009 } else {
1010 cig_offset_get(cis);
1011 }
1012
1013 return -EBUSY;
1014 #else /* CONFIG_BT_CTLR_CENTRAL_SPACING != 0 */
1015
1016 *cis_offset_min = HAL_TICKER_TICKS_TO_US(conn->ull.ticks_slot) +
1017 (EVENT_TICKER_RES_MARGIN_US << 1U);
1018
1019 *cis_offset_min += cig->sync_delay - cis->sync_delay;
1020
1021 return 0;
1022 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING != 0 */
1023 }
1024
1025 #if (CONFIG_BT_CTLR_CENTRAL_SPACING == 0)
cig_offset_get(struct ll_conn_iso_stream * cis)1026 static void cig_offset_get(struct ll_conn_iso_stream *cis)
1027 {
1028 static memq_link_t link;
1029 static struct mayfly mfy = {0, 0, &link, NULL, mfy_cig_offset_get};
1030 uint32_t ret;
1031
1032 mfy.param = cis;
1033 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
1034 &mfy);
1035 LL_ASSERT(!ret);
1036 }
1037
mfy_cig_offset_get(void * param)1038 static void mfy_cig_offset_get(void *param)
1039 {
1040 struct ll_conn_iso_stream *cis;
1041 struct ll_conn_iso_group *cig;
1042 uint32_t conn_interval_us;
1043 uint32_t offset_limit_us;
1044 uint32_t ticks_to_expire;
1045 uint32_t offset_max_us;
1046 uint32_t offset_min_us;
1047 struct ll_conn *conn;
1048 int err;
1049
1050 cis = param;
1051 cig = cis->group;
1052
1053 /* Find a free offset that does not overlap other periodically scheduled
1054 * states/roles.
1055 */
1056 err = ull_sched_conn_iso_free_offset_get(cig->ull.ticks_slot,
1057 &ticks_to_expire);
1058 LL_ASSERT(!err);
1059
1060 /* Calculate the offset for the select CIS in the CIG */
1061 offset_min_us = HAL_TICKER_TICKS_TO_US(ticks_to_expire) +
1062 (EVENT_TICKER_RES_MARGIN_US << 2U);
1063 offset_min_us += cig->sync_delay - cis->sync_delay;
1064
1065 conn = ll_conn_get(cis->lll.acl_handle);
1066 LL_ASSERT(conn != NULL);
1067
1068 /* Ensure the offset is not greater than the ACL interval, considering
1069 * the minimum CIS offset requirement.
1070 */
1071 conn_interval_us = (uint32_t)conn->lll.interval * CONN_INT_UNIT_US;
1072 offset_limit_us = conn_interval_us + PDU_CIS_OFFSET_MIN_US;
1073 while (offset_min_us >= offset_limit_us) {
1074 offset_min_us -= conn_interval_us;
1075 }
1076
1077 offset_max_us = conn_interval_us - cig->sync_delay;
1078
1079 ull_cp_cc_offset_calc_reply(conn, offset_min_us, offset_max_us);
1080 }
1081
cis_offset_get(struct ll_conn_iso_stream * cis)1082 static void cis_offset_get(struct ll_conn_iso_stream *cis)
1083 {
1084 static memq_link_t link;
1085 static struct mayfly mfy = {0, 0, &link, NULL, mfy_cis_offset_get};
1086 uint32_t ret;
1087
1088 mfy.param = cis;
1089 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
1090 &mfy);
1091 LL_ASSERT(!ret);
1092 }
1093
mfy_cis_offset_get(void * param)1094 static void mfy_cis_offset_get(void *param)
1095 {
1096 uint32_t elapsed_acl_us, elapsed_cig_us;
1097 struct ll_conn_iso_stream *cis;
1098 struct ll_conn_iso_group *cig;
1099 uint32_t cig_remainder_us;
1100 uint32_t acl_remainder_us;
1101 uint32_t cig_interval_us;
1102 uint32_t offset_limit_us;
1103 uint32_t ticks_to_expire;
1104 uint32_t remainder = 0U;
1105 uint32_t ticks_current;
1106 uint32_t offset_min_us;
1107 struct ll_conn *conn;
1108 uint16_t latency_cig;
1109 uint8_t ticker_id;
1110 uint16_t lazy;
1111 uint8_t retry;
1112 uint8_t id;
1113
1114 cis = param;
1115 cig = cis->group;
1116 ticker_id = TICKER_ID_CONN_ISO_BASE + ll_conn_iso_group_handle_get(cig);
1117
1118 id = TICKER_NULL;
1119 ticks_to_expire = 0U;
1120 ticks_current = 0U;
1121
1122 /* In the first iteration the actual ticks_current value is returned
1123 * which will be different from the initial value of 0 that is set.
1124 * Subsequent iterations should return the same ticks_current as the
1125 * reference tick.
1126 * In order to avoid infinite updates to ticker's reference due to any
1127 * race condition due to expiring tickers, we try upto 3 more times.
1128 * Hence, first iteration to get an actual ticks_current and 3 more as
1129 * retries when there could be race conditions that changes the value
1130 * of ticks_current.
1131 *
1132 * ticker_next_slot_get_ext() restarts iterating when updated value of
1133 * ticks_current is returned.
1134 */
1135 retry = 4U;
1136 do {
1137 uint32_t volatile ret_cb;
1138 uint32_t ticks_previous;
1139 uint32_t ret;
1140 bool success;
1141
1142 ticks_previous = ticks_current;
1143
1144 ret_cb = TICKER_STATUS_BUSY;
1145 ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR,
1146 TICKER_USER_ID_ULL_LOW,
1147 &id, &ticks_current,
1148 &ticks_to_expire, &remainder,
1149 &lazy, NULL, NULL,
1150 ticker_op_cb, (void *)&ret_cb);
1151 if (ret == TICKER_STATUS_BUSY) {
1152 /* Busy wait until Ticker Job is enabled after any Radio
1153 * event is done using the Radio hardware. Ticker Job
1154 * ISR is disabled during Radio events in LOW_LAT
1155 * feature to avoid Radio ISR latencies.
1156 */
1157 while (ret_cb == TICKER_STATUS_BUSY) {
1158 ticker_job_sched(TICKER_INSTANCE_ID_CTLR,
1159 TICKER_USER_ID_ULL_LOW);
1160 }
1161 }
1162
1163 success = (ret_cb == TICKER_STATUS_SUCCESS);
1164 LL_ASSERT(success);
1165
1166 LL_ASSERT((ticks_current == ticks_previous) || retry--);
1167
1168 LL_ASSERT(id != TICKER_NULL);
1169 } while (id != ticker_id);
1170
1171 /* Reduced a tick for negative remainder and return positive remainder
1172 * value.
1173 */
1174 hal_ticker_remove_jitter(&ticks_to_expire, &remainder);
1175 cig_remainder_us = remainder;
1176
1177 conn = ll_conn_get(cis->lll.acl_handle);
1178 LL_ASSERT(conn != NULL);
1179
1180 /* Add a tick for negative remainder and return positive remainder
1181 * value.
1182 */
1183 remainder = conn->llcp.prep.remainder;
1184 hal_ticker_add_jitter(&ticks_to_expire, &remainder);
1185 acl_remainder_us = remainder;
1186
1187 /* Calculate the CIS offset in the CIG */
1188 offset_min_us = HAL_TICKER_TICKS_TO_US(ticks_to_expire) +
1189 cig_remainder_us + cig->sync_delay -
1190 acl_remainder_us - cis->sync_delay;
1191
1192 /* Calculate instant latency */
1193 /* 32-bits are sufficient as maximum connection interval is 4 seconds,
1194 * and latency counts (typically 3) is low enough to avoid 32-bit
1195 * overflow. Refer to ull_central_iso_cis_offset_get().
1196 */
1197 elapsed_acl_us = CIS_CREATE_INSTANT_DELTA_MIN * conn->lll.interval * CONN_INT_UNIT_US;
1198
1199 /* Calculate elapsed CIG intervals until the instant */
1200 cig_interval_us = cig->iso_interval * ISO_INT_UNIT_US;
1201 latency_cig = DIV_ROUND_UP(elapsed_acl_us, cig_interval_us);
1202 elapsed_cig_us = latency_cig * cig_interval_us;
1203
1204 /* Compensate for the difference between ACL elapsed vs CIG elapsed */
1205 offset_min_us += elapsed_cig_us - elapsed_acl_us;
1206
1207 /* Ensure that the minimum offset is not greater than ISO interval
1208 * considering the select CIS in the CIG meets the minimum CIS offset
1209 * requirement.
1210 */
1211 offset_limit_us = cig_interval_us + cig->sync_delay - cis->sync_delay;
1212 while (offset_min_us >= offset_limit_us) {
1213 offset_min_us -= cig_interval_us;
1214 }
1215
1216 /* Decrement event_count to compensate for offset_min_us greater than
1217 * CIG interval.
1218 */
1219 if (offset_min_us > cig_interval_us) {
1220 cis->lll.event_count_prepare--;
1221 }
1222
1223 ull_cp_cc_offset_calc_reply(conn, offset_min_us, offset_min_us);
1224 }
1225
ticker_op_cb(uint32_t status,void * param)1226 static void ticker_op_cb(uint32_t status, void *param)
1227 {
1228 *((uint32_t volatile *)param) = status;
1229 }
1230 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING == 0 */
1231
iso_interval_adjusted_bn_max_pdu_get(bool framed,uint32_t iso_interval,uint32_t iso_interval_cig,uint32_t sdu_interval,uint16_t max_sdu,uint8_t * bn,uint8_t * max_pdu)1232 static uint32_t iso_interval_adjusted_bn_max_pdu_get(bool framed, uint32_t iso_interval,
1233 uint32_t iso_interval_cig,
1234 uint32_t sdu_interval,
1235 uint16_t max_sdu, uint8_t *bn,
1236 uint8_t *max_pdu)
1237 {
1238 if (framed) {
1239 uint32_t max_drift_us;
1240 uint32_t ceil_f;
1241
1242 /* BT Core 5.4 Vol 6, Part G, Section 2.2:
1243 * Max_PDU >= ((ceil(F) x 5 + ceil(F x Max_SDU)) / BN) + 2
1244 * F = (1 + MaxDrift) x ISO_Interval / SDU_Interval
1245 * SegmentationHeader + TimeOffset = 5 bytes
1246 * Continuation header = 2 bytes
1247 * MaxDrift (Max. allowed SDU delivery timing drift) = 100 ppm
1248 */
1249 max_drift_us = DIV_ROUND_UP(SDU_MAX_DRIFT_PPM * sdu_interval, USEC_PER_SEC);
1250 ceil_f = DIV_ROUND_UP((USEC_PER_SEC + max_drift_us) * (uint64_t)iso_interval,
1251 USEC_PER_SEC * (uint64_t)sdu_interval);
1252 if (false) {
1253 #if defined(CONFIG_BT_CTLR_CONN_ISO_AVOID_SEGMENTATION)
1254 /* To avoid segmentation according to HAP, if the ISO_Interval is less than
1255 * the SDU_Interval, we assume BN=1 and calculate the Max_PDU as:
1256 * Max_PDU = celi(F / BN) x (5 / Max_SDU)
1257 *
1258 * This is in accordance with the "Core enhancement for ISOAL CR".
1259 *
1260 * This ensures that the drift can be contained in the difference between
1261 * SDU_Interval and link bandwidth. For BN=1, ceil(F) == ceil(F/BN).
1262 */
1263 } else if (iso_interval < sdu_interval) {
1264 *bn = 1;
1265 *max_pdu = ceil_f * (PDU_ISO_SEG_HDR_SIZE + PDU_ISO_SEG_TIMEOFFSET_SIZE +
1266 max_sdu);
1267 #endif
1268 } else {
1269 uint32_t ceil_f_x_max_sdu;
1270 uint16_t max_pdu_bn1;
1271
1272 ceil_f_x_max_sdu = DIV_ROUND_UP(max_sdu * ((USEC_PER_SEC + max_drift_us) *
1273 (uint64_t)iso_interval),
1274 USEC_PER_SEC * (uint64_t)sdu_interval);
1275
1276 /* Strategy: Keep lowest possible BN.
1277 * TODO: Implement other strategies, possibly as policies.
1278 */
1279 max_pdu_bn1 = ceil_f * (PDU_ISO_SEG_HDR_SIZE +
1280 PDU_ISO_SEG_TIMEOFFSET_SIZE) + ceil_f_x_max_sdu;
1281 *bn = DIV_ROUND_UP(max_pdu_bn1, LL_CIS_OCTETS_TX_MAX);
1282 *max_pdu = DIV_ROUND_UP(max_pdu_bn1, *bn) + PDU_ISO_SEG_HDR_SIZE;
1283 }
1284 } else {
1285 /* For unframed, ISO_Interval must be N x SDU_Interval */
1286 if ((iso_interval % sdu_interval) != 0) {
1287 /* The requested ISO interval is doubled until it is multiple of
1288 * SDU_interval.
1289 * For example, between 7.5 and 10 ms, 7.5 is added in iterations to reach
1290 * 30 ms ISO interval; or between 10 and 7.5 ms, 10 is added in iterations
1291 * to reach the same 30 ms ISO interval.
1292 */
1293 iso_interval += iso_interval_cig;
1294 }
1295
1296 /* Core 5.3 Vol 6, Part G section 2.1:
1297 * BN >= ceil(Max_SDU/Max_PDU * ISO_Interval/SDU_Interval)
1298 */
1299 *bn = DIV_ROUND_UP(max_sdu * iso_interval, (*max_pdu) * sdu_interval);
1300 }
1301
1302 return iso_interval;
1303 }
1304
ll_cig_parameters_validate(void)1305 static uint8_t ll_cig_parameters_validate(void)
1306 {
1307 if (ll_iso_setup.cis_count > BT_HCI_ISO_CIS_COUNT_MAX) {
1308 /* Invalid CIS_Count */
1309 return BT_HCI_ERR_INVALID_PARAM;
1310 }
1311
1312 if (ll_iso_setup.group.cig_id > BT_HCI_ISO_CIG_ID_MAX) {
1313 /* Invalid CIG_ID */
1314 return BT_HCI_ERR_INVALID_PARAM;
1315 }
1316
1317 if (!IN_RANGE(ll_iso_setup.group.c_sdu_interval, BT_HCI_ISO_SDU_INTERVAL_MIN,
1318 BT_HCI_ISO_SDU_INTERVAL_MAX) ||
1319 !IN_RANGE(ll_iso_setup.group.p_sdu_interval, BT_HCI_ISO_SDU_INTERVAL_MIN,
1320 BT_HCI_ISO_SDU_INTERVAL_MAX)) {
1321 /* Parameter out of range */
1322 return BT_HCI_ERR_INVALID_PARAM;
1323 }
1324
1325 if (ll_iso_setup.group.central.test) {
1326 if (!IN_RANGE(ll_iso_setup.group.iso_interval,
1327 BT_HCI_ISO_INTERVAL_MIN, BT_HCI_ISO_INTERVAL_MAX)) {
1328 /* Parameter out of range */
1329 return BT_HCI_ERR_INVALID_PARAM;
1330 }
1331 } else {
1332 if (!IN_RANGE(ll_iso_setup.group.c_latency,
1333 BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MIN * USEC_PER_MSEC,
1334 BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MAX * USEC_PER_MSEC) ||
1335 !IN_RANGE(ll_iso_setup.group.p_latency,
1336 BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MIN * USEC_PER_MSEC,
1337 BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MAX * USEC_PER_MSEC)) {
1338 /* Parameter out of range */
1339 return BT_HCI_ERR_INVALID_PARAM;
1340 }
1341 }
1342
1343 if (((ll_iso_setup.group.central.sca & ~BT_HCI_ISO_WORST_CASE_SCA_VALID_MASK) != 0U) ||
1344 ((ll_iso_setup.group.central.packing & ~BT_HCI_ISO_PACKING_VALID_MASK) != 0U) ||
1345 ((ll_iso_setup.group.central.framing & ~BT_HCI_ISO_FRAMING_VALID_MASK) != 0U)) {
1346 /* Worst_Case_SCA, Packing or Framing sets RFU value */
1347 return BT_HCI_ERR_INVALID_PARAM;
1348 }
1349
1350 if (ll_iso_setup.cis_count > STREAMS_PER_GROUP) {
1351 /* Requested number of CISes not available by configuration. Check as last
1352 * to avoid interfering with qualification parameter checks.
1353 */
1354 return BT_HCI_ERR_CONN_LIMIT_EXCEEDED;
1355 }
1356
1357 return BT_HCI_ERR_SUCCESS;
1358 }
1359
ll_cis_parameters_validate(uint8_t cis_idx,uint8_t cis_id,uint16_t c_sdu,uint16_t p_sdu,uint16_t c_phy,uint16_t p_phy)1360 static uint8_t ll_cis_parameters_validate(uint8_t cis_idx, uint8_t cis_id,
1361 uint16_t c_sdu, uint16_t p_sdu,
1362 uint16_t c_phy, uint16_t p_phy)
1363 {
1364 if ((cis_id > BT_HCI_ISO_CIS_ID_VALID_MAX) ||
1365 ((c_sdu & ~BT_HCI_ISO_MAX_SDU_VALID_MASK) != 0U) ||
1366 ((p_sdu & ~BT_HCI_ISO_MAX_SDU_VALID_MASK) != 0U)) {
1367 return BT_HCI_ERR_INVALID_PARAM;
1368 }
1369
1370 if (!c_phy || ((c_phy & ~PHY_VALID_MASK) != 0U) ||
1371 !p_phy || ((p_phy & ~PHY_VALID_MASK) != 0U)) {
1372 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1373 }
1374
1375 if (cis_idx >= STREAMS_PER_GROUP) {
1376 return BT_HCI_ERR_CONN_LIMIT_EXCEEDED;
1377 }
1378
1379 return BT_HCI_ERR_SUCCESS;
1380 }
1381
1382 #if defined(CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY)
ll_cis_calculate_ft(uint32_t cig_sync_delay,uint32_t iso_interval_us,uint32_t sdu_interval,uint32_t latency,uint8_t framed)1383 static uint8_t ll_cis_calculate_ft(uint32_t cig_sync_delay, uint32_t iso_interval_us,
1384 uint32_t sdu_interval, uint32_t latency, uint8_t framed)
1385 {
1386 uint32_t tl;
1387
1388 /* Framed:
1389 * TL = CIG_Sync_Delay + FT x ISO_Interval + SDU_Interval
1390 *
1391 * Unframed:
1392 * TL = CIG_Sync_Delay + FT x ISO_Interval - SDU_Interval
1393 */
1394 for (uint16_t ft = 1U; ft <= CONFIG_BT_CTLR_CONN_ISO_STREAMS_MAX_FT; ft++) {
1395 if (framed) {
1396 tl = cig_sync_delay + ft * iso_interval_us + sdu_interval;
1397 } else {
1398 tl = cig_sync_delay + ft * iso_interval_us - sdu_interval;
1399 }
1400
1401 if (tl > latency) {
1402 /* Latency exceeded - use one less */
1403 return ft - 1U;
1404 }
1405 }
1406
1407 return 0;
1408 }
1409 #endif /* CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY */
1410