1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stddef.h>
8
9 #include <zephyr/kernel.h>
10 #include <zephyr/sys/byteorder.h>
11 #include <zephyr/bluetooth/hci_types.h>
12 #include <zephyr/bluetooth/buf.h>
13 #include <zephyr/sys/util_macro.h>
14
15 #include "hal/cpu.h"
16 #include "hal/ccm.h"
17 #include "hal/ticker.h"
18
19 #include "util/util.h"
20 #include "util/mem.h"
21 #include "util/memq.h"
22 #include "util/mfifo.h"
23 #include "util/mayfly.h"
24 #include "util/dbuf.h"
25
26 #include "ticker/ticker.h"
27
28 #include "pdu_df.h"
29 #include "lll/pdu_vendor.h"
30 #include "pdu.h"
31
32 #include "lll.h"
33 #include "lll/lll_adv_types.h"
34 #include "lll_adv.h"
35 #include "lll/lll_adv_pdu.h"
36 #include "lll_adv_iso.h"
37 #include "lll/lll_df_types.h"
38 #include "lll_sync.h"
39 #include "lll_sync_iso.h"
40 #include "lll_conn.h"
41 #include "lll_conn_iso.h"
42 #include "lll_iso_tx.h"
43 #include "lll/lll_vendor.h"
44
45 #include "ll_sw/ull_tx_queue.h"
46
47 #include "isoal.h"
48
49 #include "ull_adv_types.h"
50 #include "ull_sync_types.h"
51 #include "ull_conn_types.h"
52 #include "ull_iso_types.h"
53 #include "ull_conn_iso_types.h"
54 #include "ull_llcp.h"
55
56 #include "ull_internal.h"
57 #include "ull_adv_internal.h"
58 #include "ull_conn_internal.h"
59 #include "ull_iso_internal.h"
60 #include "ull_sync_iso_internal.h"
61 #include "ull_conn_iso_internal.h"
62
63 #include "ll_feat.h"
64
65 #include "hal/debug.h"
66
67 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
68 #include <zephyr/logging/log.h>
69 LOG_MODULE_REGISTER(bt_ctlr_ull_iso);
70
71 #if defined(CONFIG_BT_CTLR_CONN_ISO_STREAMS)
72 #define BT_CTLR_CONN_ISO_STREAMS CONFIG_BT_CTLR_CONN_ISO_STREAMS
73 #else /* !CONFIG_BT_CTLR_CONN_ISO_STREAMS */
74 #define BT_CTLR_CONN_ISO_STREAMS 0
75 #endif /* !CONFIG_BT_CTLR_CONN_ISO_STREAMS */
76
77 #if defined(CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT)
78 #define BT_CTLR_ADV_ISO_STREAMS (CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT)
79 #else /* !CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT */
80 #define BT_CTLR_ADV_ISO_STREAMS 0
81 #endif /* CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT */
82
83 #if defined(CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT)
84 #define BT_CTLR_SYNC_ISO_STREAMS (CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT)
85 #else /* !CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT */
86 #define BT_CTLR_SYNC_ISO_STREAMS 0
87 #endif /* CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT */
88
89 static int init_reset(void);
90
91 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
92 static isoal_status_t ll_iso_pdu_alloc(struct isoal_pdu_buffer *pdu_buffer);
93 static isoal_status_t ll_iso_pdu_write(struct isoal_pdu_buffer *pdu_buffer,
94 const size_t offset,
95 const uint8_t *sdu_payload,
96 const size_t consume_len);
97 static isoal_status_t ll_iso_pdu_emit(struct node_tx_iso *node_tx,
98 const uint16_t handle);
99 static isoal_status_t ll_iso_pdu_release(struct node_tx_iso *node_tx,
100 const uint16_t handle,
101 const isoal_status_t status);
102 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
103
104 /* Allocate data path pools for RX/TX directions for each stream */
105 #define BT_CTLR_ISO_STREAMS ((2 * (BT_CTLR_CONN_ISO_STREAMS)) + \
106 BT_CTLR_ADV_ISO_STREAMS + \
107 BT_CTLR_SYNC_ISO_STREAMS)
108 #if BT_CTLR_ISO_STREAMS
109 static struct ll_iso_datapath datapath_pool[BT_CTLR_ISO_STREAMS];
110 #endif /* BT_CTLR_ISO_STREAMS */
111
112 static void *datapath_free;
113
114 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
115 static void ticker_resume_op_cb(uint32_t status, void *param);
116 static void ticker_resume_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
117 uint32_t remainder, uint16_t lazy, uint8_t force,
118 void *param);
119
120 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
121 #define ISO_RX_HEADER_SIZE (offsetof(struct pdu_bis, payload))
122
123 /* Ensure both BIS and CIS PDU headers are of equal size */
124 BUILD_ASSERT(ISO_RX_HEADER_SIZE == offsetof(struct pdu_cis, payload));
125
126 /* ISO LL conformance tests require a PDU size of maximum 251 bytes + header */
127 #define ISO_RX_BUFFER_SIZE (ISO_RX_HEADER_SIZE + \
128 MAX(MAX(LL_BIS_OCTETS_RX_MAX, LL_CIS_OCTETS_RX_MAX), \
129 LL_VND_OCTETS_RX_MIN))
130
131 /* Declare the ISO rx node RXFIFO. This is a composite pool-backed MFIFO for
132 * rx_nodes. The declaration constructs the following data structures:
133 * - mfifo_iso_rx: FIFO with pointers to PDU buffers
134 * - mem_iso_rx: Backing data pool for PDU buffer elements
135 * - mem_link_iso_rx: Pool of memq_link_t elements
136 *
137 * One extra rx buffer is reserved for empty ISO PDU reception.
138 * Two extra links are reserved for use by the ll_iso_rx and ull_iso_rx memq.
139 */
140 static RXFIFO_DEFINE(iso_rx, ((NODE_RX_HEADER_SIZE) + (ISO_RX_BUFFER_SIZE)),
141 (CONFIG_BT_CTLR_ISO_RX_BUFFERS + 1U), 2U);
142
143 static MEMQ_DECLARE(ll_iso_rx);
144 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
145 static MEMQ_DECLARE(ull_iso_rx);
146 static void iso_rx_demux(void *param);
147 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
148 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
149
150 #define ISO_TEST_PACKET_COUNTER_SIZE 4U
151
152 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
153 void ll_iso_link_tx_release(void *link);
154 void ll_iso_tx_mem_release(void *node_tx);
155
156 #define NODE_TX_BUFFER_SIZE MROUND(offsetof(struct node_tx_iso, pdu) + \
157 offsetof(struct pdu_iso, payload) + \
158 MAX(LL_BIS_OCTETS_TX_MAX, \
159 LL_CIS_OCTETS_TX_MAX))
160
161 #define ISO_TEST_TX_BUFFER_SIZE 32U
162
163 static struct {
164 void *free;
165 uint8_t pool[NODE_TX_BUFFER_SIZE * BT_CTLR_ISO_TX_PDU_BUFFERS];
166 } mem_iso_tx;
167
168 static struct {
169 void *free;
170 uint8_t pool[sizeof(memq_link_t) * BT_CTLR_ISO_TX_PDU_BUFFERS];
171 } mem_link_iso_tx;
172
173 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
174
ll_read_iso_tx_sync(uint16_t handle,uint16_t * seq,uint32_t * timestamp,uint32_t * offset)175 uint8_t ll_read_iso_tx_sync(uint16_t handle, uint16_t *seq,
176 uint32_t *timestamp, uint32_t *offset)
177 {
178 if (IS_CIS_HANDLE(handle)) {
179 struct ll_iso_datapath *dp = NULL;
180 struct ll_conn_iso_stream *cis;
181
182 cis = ll_conn_iso_stream_get(handle);
183
184 if (cis) {
185 dp = cis->hdr.datapath_in;
186 }
187
188 if (dp &&
189 isoal_tx_get_sync_info(dp->source_hdl, seq,
190 timestamp, offset) == ISOAL_STATUS_OK) {
191 return BT_HCI_ERR_SUCCESS;
192 }
193
194 return BT_HCI_ERR_CMD_DISALLOWED;
195
196 } else if (IS_ADV_ISO_HANDLE(handle)) {
197 const struct lll_adv_iso_stream *adv_stream;
198 uint16_t stream_handle;
199
200 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
201 adv_stream = ull_adv_iso_stream_get(stream_handle);
202 if (!adv_stream || !adv_stream->dp ||
203 isoal_tx_get_sync_info(adv_stream->dp->source_hdl, seq,
204 timestamp, offset) != ISOAL_STATUS_OK) {
205 return BT_HCI_ERR_CMD_DISALLOWED;
206 }
207
208 return BT_HCI_ERR_SUCCESS;
209
210 } else if (IS_SYNC_ISO_HANDLE(handle)) {
211 return BT_HCI_ERR_CMD_DISALLOWED;
212 }
213
214 return BT_HCI_ERR_UNKNOWN_CONN_ID;
215 }
216
path_is_vendor_specific(uint8_t path_id)217 static inline bool path_is_vendor_specific(uint8_t path_id)
218 {
219 return (path_id >= BT_HCI_DATAPATH_ID_VS &&
220 path_id <= BT_HCI_DATAPATH_ID_VS_END);
221 }
222
ll_setup_iso_path(uint16_t handle,uint8_t path_dir,uint8_t path_id,uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint32_t controller_delay,uint8_t codec_config_len,uint8_t * codec_config)223 uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
224 uint8_t coding_format, uint16_t company_id,
225 uint16_t vs_codec_id, uint32_t controller_delay,
226 uint8_t codec_config_len, uint8_t *codec_config)
227 {
228 struct lll_sync_iso_stream *sync_stream = NULL;
229 struct lll_adv_iso_stream *adv_stream = NULL;
230 struct ll_conn_iso_stream *cis = NULL;
231 struct ll_iso_datapath *dp;
232 uint32_t stream_sync_delay;
233 uint32_t group_sync_delay;
234 uint8_t flush_timeout;
235 uint16_t iso_interval;
236 uint32_t sdu_interval;
237 uint8_t burst_number;
238 uint8_t max_octets;
239 uint8_t framed;
240 uint8_t role;
241
242 ARG_UNUSED(controller_delay);
243 ARG_UNUSED(codec_config);
244
245 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && IS_CIS_HANDLE(handle)) {
246 struct ll_conn_iso_group *cig;
247 struct ll_conn *conn;
248
249 /* If the Host attempts to set a data path with a Connection
250 * Handle that does not exist or that is not for a CIS or a BIS,
251 * the Controller shall return the error code Unknown Connection
252 * Identifier (0x02)
253 */
254 cis = ll_conn_iso_stream_get(handle);
255 if (!cis || !cis->group) {
256 /* CIS does not belong to a CIG */
257 return BT_HCI_ERR_UNKNOWN_CONN_ID;
258 }
259
260 conn = ll_connected_get(cis->lll.acl_handle);
261 if (conn) {
262 /* If we're still waiting for accept/response from
263 * host, path setup is premature and we must return
264 * disallowed status.
265 */
266 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
267 const uint8_t cis_waiting = ull_cp_cc_awaiting_reply(conn);
268
269 if (cis_waiting) {
270 return BT_HCI_ERR_CMD_DISALLOWED;
271 }
272 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
273 }
274
275 if ((path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR && cis->hdr.datapath_in) ||
276 (path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST && cis->hdr.datapath_out)) {
277 /* Data path has been set up, can only do setup once */
278 return BT_HCI_ERR_CMD_DISALLOWED;
279 }
280
281 cig = cis->group;
282
283 role = cig->lll.role;
284 iso_interval = cig->iso_interval;
285 group_sync_delay = cig->sync_delay;
286 stream_sync_delay = cis->sync_delay;
287 framed = cis->framed;
288
289 if (path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST) {
290 /* Create sink for RX data path */
291 burst_number = cis->lll.rx.bn;
292 flush_timeout = cis->lll.rx.ft;
293 max_octets = cis->lll.rx.max_pdu;
294
295 if (role) {
296 /* peripheral */
297 sdu_interval = cig->c_sdu_interval;
298 } else {
299 /* central */
300 sdu_interval = cig->p_sdu_interval;
301 }
302 } else {
303 /* path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR */
304 burst_number = cis->lll.tx.bn;
305 flush_timeout = cis->lll.tx.ft;
306 max_octets = cis->lll.tx.max_pdu;
307
308 if (role) {
309 /* peripheral */
310 sdu_interval = cig->p_sdu_interval;
311 } else {
312 /* central */
313 sdu_interval = cig->c_sdu_interval;
314 }
315 }
316 #if defined(CONFIG_BT_CTLR_ADV_ISO)
317 } else if (IS_ADV_ISO_HANDLE(handle)) {
318 struct ll_adv_iso_set *adv_iso;
319 struct lll_adv_iso *lll_iso;
320 uint16_t stream_handle;
321
322 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
323 adv_stream = ull_adv_iso_stream_get(stream_handle);
324 if (!adv_stream || adv_stream->dp) {
325 return BT_HCI_ERR_CMD_DISALLOWED;
326 }
327
328 adv_iso = ull_adv_iso_by_stream_get(stream_handle);
329 lll_iso = &adv_iso->lll;
330
331 role = ISOAL_ROLE_BROADCAST_SOURCE;
332 iso_interval = lll_iso->iso_interval;
333 sdu_interval = lll_iso->sdu_interval;
334 burst_number = lll_iso->bn;
335 flush_timeout = 0U; /* Not used for Broadcast ISO */
336 group_sync_delay = ull_iso_big_sync_delay(lll_iso->num_bis, lll_iso->bis_spacing,
337 lll_iso->nse, lll_iso->sub_interval,
338 lll_iso->phy, lll_iso->max_pdu,
339 lll_iso->enc);
340 stream_sync_delay = group_sync_delay - stream_handle * lll_iso->bis_spacing;
341 framed = lll_iso->framing;
342 max_octets = lll_iso->max_pdu;
343 #endif /* CONFIG_BT_CTLR_ADV_ISO */
344
345 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
346 } else if (IS_SYNC_ISO_HANDLE(handle)) {
347 struct ll_sync_iso_set *sync_iso;
348 struct lll_sync_iso *lll_iso;
349 uint16_t stream_handle;
350
351 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
352 sync_stream = ull_sync_iso_stream_get(stream_handle);
353 if (!sync_stream || sync_stream->dp) {
354 return BT_HCI_ERR_CMD_DISALLOWED;
355 }
356
357 sync_iso = ull_sync_iso_by_stream_get(stream_handle);
358 lll_iso = &sync_iso->lll;
359
360 role = ISOAL_ROLE_BROADCAST_SINK;
361 iso_interval = lll_iso->iso_interval;
362 sdu_interval = lll_iso->sdu_interval;
363 burst_number = lll_iso->bn;
364
365 group_sync_delay = ull_iso_big_sync_delay(lll_iso->num_bis, lll_iso->bis_spacing,
366 lll_iso->nse, lll_iso->sub_interval,
367 lll_iso->phy, lll_iso->max_pdu,
368 lll_iso->enc);
369 stream_sync_delay = group_sync_delay - stream_handle * lll_iso->bis_spacing;
370 framed = lll_iso->framing;
371 max_octets = lll_iso->max_pdu;
372 flush_timeout = 0U; /* Not used for Broadcast ISO */
373 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
374
375 } else {
376 return BT_HCI_ERR_UNKNOWN_CONN_ID;
377 }
378
379 if (path_is_vendor_specific(path_id) &&
380 (!IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) ||
381 !ll_data_path_configured(path_dir, path_id))) {
382 /* Data path must be configured prior to setup */
383 return BT_HCI_ERR_CMD_DISALLOWED;
384 }
385
386 /* If Codec_Configuration_Length non-zero and Codec_ID set to
387 * transparent air mode, the Controller shall return the error code
388 * Invalid HCI Command Parameters (0x12).
389 */
390 if (codec_config_len &&
391 (vs_codec_id == BT_HCI_CODING_FORMAT_TRANSPARENT)) {
392 return BT_HCI_ERR_INVALID_PARAM;
393 }
394
395 /* Allocate and configure datapath */
396 dp = mem_acquire(&datapath_free);
397 if (!dp) {
398 return BT_HCI_ERR_CMD_DISALLOWED;
399 }
400
401 dp->path_dir = path_dir;
402 dp->path_id = path_id;
403 dp->coding_format = coding_format;
404 dp->company_id = company_id;
405
406 /* TODO dp->sync_delay = controller_delay; ?*/
407
408 if (false) {
409
410 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
411 } else if ((path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST) &&
412 (cis || sync_stream)) {
413 isoal_sink_handle_t sink_handle;
414 isoal_status_t err;
415
416 if (path_id == BT_HCI_DATAPATH_ID_HCI) {
417 /* Not vendor specific, thus alloc and emit functions
418 * known
419 */
420 err = isoal_sink_create(handle, role, framed,
421 burst_number, flush_timeout,
422 sdu_interval, iso_interval,
423 stream_sync_delay,
424 group_sync_delay,
425 sink_sdu_alloc_hci,
426 sink_sdu_emit_hci,
427 sink_sdu_write_hci,
428 &sink_handle);
429 } else {
430 /* Set up vendor specific data path */
431 isoal_sink_sdu_alloc_cb sdu_alloc;
432 isoal_sink_sdu_emit_cb sdu_emit;
433 isoal_sink_sdu_write_cb sdu_write;
434
435 /* Request vendor sink callbacks for path */
436 if (IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) &&
437 ll_data_path_sink_create(handle, dp, &sdu_alloc,
438 &sdu_emit, &sdu_write)) {
439 err = isoal_sink_create(handle, role, framed,
440 burst_number,
441 flush_timeout,
442 sdu_interval,
443 iso_interval,
444 stream_sync_delay,
445 group_sync_delay,
446 sdu_alloc, sdu_emit,
447 sdu_write,
448 &sink_handle);
449 } else {
450 ull_iso_datapath_release(dp);
451
452 return BT_HCI_ERR_CMD_DISALLOWED;
453 }
454 }
455
456 if (!err) {
457 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && cis != NULL) {
458 cis->hdr.datapath_out = dp;
459 }
460
461 if (sync_stream) {
462 sync_stream->dp = dp;
463 }
464
465 dp->sink_hdl = sink_handle;
466 isoal_sink_enable(sink_handle);
467 } else {
468 ull_iso_datapath_release(dp);
469
470 return BT_HCI_ERR_CMD_DISALLOWED;
471 }
472 #else /* !CONFIG_BT_CTLR_SYNC_ISO && !CONFIG_BT_CTLR_CONN_ISO */
473 ARG_UNUSED(sync_stream);
474 #endif /* !CONFIG_BT_CTLR_SYNC_ISO && !CONFIG_BT_CTLR_CONN_ISO */
475
476 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
477 } else if ((path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR) &&
478 (cis || adv_stream)) {
479 isoal_source_handle_t source_handle;
480 isoal_status_t err;
481
482 /* Create source for TX data path */
483 isoal_source_pdu_alloc_cb pdu_alloc;
484 isoal_source_pdu_write_cb pdu_write;
485 isoal_source_pdu_emit_cb pdu_emit;
486 isoal_source_pdu_release_cb pdu_release;
487
488 if (path_is_vendor_specific(path_id)) {
489 if (!IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH) ||
490 !ll_data_path_source_create(handle, dp,
491 &pdu_alloc, &pdu_write,
492 &pdu_emit,
493 &pdu_release)) {
494 ull_iso_datapath_release(dp);
495
496 return BT_HCI_ERR_CMD_DISALLOWED;
497 }
498 } else {
499 /* Set default callbacks when not vendor specific
500 * or that the vendor specific path is the same.
501 */
502 pdu_alloc = ll_iso_pdu_alloc;
503 pdu_write = ll_iso_pdu_write;
504 pdu_emit = ll_iso_pdu_emit;
505 pdu_release = ll_iso_pdu_release;
506 }
507
508 err = isoal_source_create(handle, role, framed, burst_number,
509 flush_timeout, max_octets,
510 sdu_interval, iso_interval,
511 stream_sync_delay, group_sync_delay,
512 pdu_alloc, pdu_write, pdu_emit,
513 pdu_release, &source_handle);
514
515 if (!err) {
516 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && cis != NULL) {
517 cis->hdr.datapath_in = dp;
518 }
519
520 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) && adv_stream != NULL) {
521 adv_stream->dp = dp;
522 }
523
524 dp->source_hdl = source_handle;
525 isoal_source_enable(source_handle);
526 } else {
527 ull_iso_datapath_release(dp);
528
529 return BT_HCI_ERR_CMD_DISALLOWED;
530 }
531
532 #else /* !CONFIG_BT_CTLR_ADV_ISO && !CONFIG_BT_CTLR_CONN_ISO */
533 ARG_UNUSED(adv_stream);
534 #endif /* !CONFIG_BT_CTLR_ADV_ISO && !CONFIG_BT_CTLR_CONN_ISO */
535
536 } else {
537 return BT_HCI_ERR_CMD_DISALLOWED;
538 }
539
540 return BT_HCI_ERR_SUCCESS;
541 }
542
ll_remove_iso_path(uint16_t handle,uint8_t path_dir)543 uint8_t ll_remove_iso_path(uint16_t handle, uint8_t path_dir)
544 {
545 /* If the Host issues this command with a Connection_Handle that does
546 * not exist or is not for a CIS or a BIS, the Controller shall return
547 * the error code Unknown Connection Identifier (0x02).
548 */
549 if (false) {
550
551 #if defined(CONFIG_BT_CTLR_CONN_ISO)
552 } else if (IS_CIS_HANDLE(handle)) {
553 struct ll_conn_iso_stream *cis;
554 struct ll_iso_stream_hdr *hdr;
555 struct ll_iso_datapath *dp;
556
557 cis = ll_conn_iso_stream_get(handle);
558 hdr = &cis->hdr;
559
560 if (path_dir & BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR)) {
561 dp = hdr->datapath_in;
562 if (dp) {
563 isoal_source_destroy(dp->source_hdl);
564
565 hdr->datapath_in = NULL;
566 ull_iso_datapath_release(dp);
567 } else {
568 /* Datapath was not previously set up */
569 return BT_HCI_ERR_CMD_DISALLOWED;
570 }
571 }
572
573 if (path_dir & BIT(BT_HCI_DATAPATH_DIR_CTLR_TO_HOST)) {
574 dp = hdr->datapath_out;
575 if (dp) {
576 isoal_sink_destroy(dp->sink_hdl);
577
578 hdr->datapath_out = NULL;
579 ull_iso_datapath_release(dp);
580 } else {
581 /* Datapath was not previously set up */
582 return BT_HCI_ERR_CMD_DISALLOWED;
583 }
584 }
585 #endif /* CONFIG_BT_CTLR_CONN_ISO */
586
587 #if defined(CONFIG_BT_CTLR_ADV_ISO)
588 } else if (IS_ADV_ISO_HANDLE(handle)) {
589 struct lll_adv_iso_stream *adv_stream;
590 struct ll_iso_datapath *dp;
591 uint16_t stream_handle;
592
593 if (!(path_dir & BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR))) {
594 return BT_HCI_ERR_CMD_DISALLOWED;
595 }
596
597 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
598 adv_stream = ull_adv_iso_stream_get(stream_handle);
599 if (!adv_stream) {
600 return BT_HCI_ERR_CMD_DISALLOWED;
601 }
602
603 dp = adv_stream->dp;
604 if (dp) {
605 adv_stream->dp = NULL;
606 isoal_source_destroy(dp->source_hdl);
607 ull_iso_datapath_release(dp);
608 } else {
609 /* Datapath was not previously set up */
610 return BT_HCI_ERR_CMD_DISALLOWED;
611 }
612 #endif /* CONFIG_BT_CTLR_ADV_ISO */
613
614 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
615 } else if (IS_SYNC_ISO_HANDLE(handle)) {
616 struct lll_sync_iso_stream *sync_stream;
617 struct ll_iso_datapath *dp;
618 uint16_t stream_handle;
619
620 if (!(path_dir & BIT(BT_HCI_DATAPATH_DIR_CTLR_TO_HOST))) {
621 return BT_HCI_ERR_CMD_DISALLOWED;
622 }
623
624 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
625 sync_stream = ull_sync_iso_stream_get(stream_handle);
626 if (!sync_stream) {
627 return BT_HCI_ERR_CMD_DISALLOWED;
628 }
629
630 dp = sync_stream->dp;
631 if (dp) {
632 sync_stream->dp = NULL;
633 isoal_sink_destroy(dp->sink_hdl);
634 ull_iso_datapath_release(dp);
635 } else {
636 /* Datapath was not previously set up */
637 return BT_HCI_ERR_CMD_DISALLOWED;
638 }
639 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
640
641 } else {
642 return BT_HCI_ERR_CMD_DISALLOWED;
643 }
644
645 return 0;
646 }
647
648 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
649 /* The sdu_alloc function is called before combining PDUs into an SDU. Here we
650 * store the paylaod number associated with the first PDU, for unframed usecase.
651 */
ll_iso_test_sdu_alloc(const struct isoal_sink * sink_ctx,const struct isoal_pdu_rx * valid_pdu,struct isoal_sdu_buffer * sdu_buffer)652 static isoal_status_t ll_iso_test_sdu_alloc(const struct isoal_sink *sink_ctx,
653 const struct isoal_pdu_rx *valid_pdu,
654 struct isoal_sdu_buffer *sdu_buffer)
655 {
656 uint16_t handle;
657
658 handle = sink_ctx->session.handle;
659
660 if (IS_CIS_HANDLE(handle)) {
661 if (!sink_ctx->session.framed) {
662 struct ll_conn_iso_stream *cis;
663
664 cis = ll_iso_stream_connected_get(sink_ctx->session.handle);
665 LL_ASSERT(cis);
666
667 /* For unframed, SDU counter is the payload number */
668 cis->hdr.test_mode.rx.sdu_counter =
669 (uint32_t)valid_pdu->meta->payload_number;
670 }
671 } else if (IS_SYNC_ISO_HANDLE(handle)) {
672 if (!sink_ctx->session.framed) {
673 struct lll_sync_iso_stream *sync_stream;
674 uint16_t stream_handle;
675
676 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
677 sync_stream = ull_sync_iso_stream_get(stream_handle);
678 LL_ASSERT(sync_stream);
679
680 sync_stream->test_mode->sdu_counter =
681 (uint32_t)valid_pdu->meta->payload_number;
682 }
683 }
684
685 return sink_sdu_alloc_hci(sink_ctx, valid_pdu, sdu_buffer);
686 }
687
688 /* The sdu_emit function is called whenever an SDU is combined and ready to be sent
689 * further in the data path. This injected implementation performs statistics on
690 * the SDU and then discards it.
691 */
ll_iso_test_sdu_emit(const struct isoal_sink * sink_ctx,const struct isoal_emitted_sdu_frag * sdu_frag,const struct isoal_emitted_sdu * sdu)692 static isoal_status_t ll_iso_test_sdu_emit(const struct isoal_sink *sink_ctx,
693 const struct isoal_emitted_sdu_frag *sdu_frag,
694 const struct isoal_emitted_sdu *sdu)
695 {
696 struct ll_iso_rx_test_mode *test_mode_rx;
697 isoal_sdu_len_t length;
698 isoal_status_t status;
699 struct net_buf *buf;
700 uint32_t sdu_counter;
701 uint16_t max_sdu;
702 uint16_t handle;
703 uint8_t framed;
704
705 handle = sink_ctx->session.handle;
706 buf = (struct net_buf *)sdu_frag->sdu.contents.dbuf;
707
708 if (IS_CIS_HANDLE(handle)) {
709 struct ll_conn_iso_stream *cis;
710
711 cis = ll_iso_stream_connected_get(sink_ctx->session.handle);
712 LL_ASSERT(cis);
713
714 test_mode_rx = &cis->hdr.test_mode.rx;
715 max_sdu = cis->c_max_sdu;
716 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
717 } else if (IS_SYNC_ISO_HANDLE(handle)) {
718 struct lll_sync_iso_stream *sync_stream;
719 struct ll_sync_iso_set *sync_iso;
720 uint16_t stream_handle;
721
722 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
723 sync_stream = ull_sync_iso_stream_get(stream_handle);
724 LL_ASSERT(sync_stream);
725
726 sync_iso = ull_sync_iso_by_stream_get(stream_handle);
727
728 test_mode_rx = sync_stream->test_mode;
729 max_sdu = sync_iso->lll.max_sdu;
730 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
731 } else {
732 /* Handle is out of range */
733 status = ISOAL_STATUS_ERR_SDU_EMIT;
734 net_buf_unref(buf);
735
736 return status;
737 }
738
739 length = sink_ctx->sdu_production.sdu_written;
740 framed = sink_ctx->session.framed;
741
742 /* In BT_HCI_ISO_TEST_ZERO_SIZE_SDU mode, all SDUs must have length 0 and there is
743 * no sdu_counter field. In the other modes, the first 4 bytes must contain a
744 * packet counter, which is used as SDU counter. The sdu_counter is extracted
745 * regardless of mode as a sanity check, unless the length does not allow it.
746 */
747 if (length >= ISO_TEST_PACKET_COUNTER_SIZE) {
748 sdu_counter = sys_get_le32(buf->data);
749 } else {
750 sdu_counter = 0U;
751 }
752
753 switch (sdu_frag->sdu.status) {
754 case ISOAL_SDU_STATUS_VALID:
755 if (framed && test_mode_rx->sdu_counter == 0U) {
756 /* BT 5.3, Vol 6, Part B, section 7.2:
757 * When using framed PDUs the expected value of the SDU counter
758 * shall be initialized with the value of the SDU counter of the
759 * first valid received SDU.
760 */
761 test_mode_rx->sdu_counter = sdu_counter;
762 }
763
764 switch (test_mode_rx->payload_type) {
765 case BT_HCI_ISO_TEST_ZERO_SIZE_SDU:
766 if (length == 0) {
767 test_mode_rx->received_cnt++;
768 } else {
769 test_mode_rx->failed_cnt++;
770 }
771 break;
772
773 case BT_HCI_ISO_TEST_VARIABLE_SIZE_SDU:
774 if ((length >= ISO_TEST_PACKET_COUNTER_SIZE) &&
775 (length <= max_sdu) &&
776 (sdu_counter == test_mode_rx->sdu_counter)) {
777 test_mode_rx->received_cnt++;
778 } else {
779 test_mode_rx->failed_cnt++;
780 }
781 break;
782
783 case BT_HCI_ISO_TEST_MAX_SIZE_SDU:
784 if ((length == max_sdu) &&
785 (sdu_counter == test_mode_rx->sdu_counter)) {
786 test_mode_rx->received_cnt++;
787 } else {
788 test_mode_rx->failed_cnt++;
789 }
790 break;
791
792 default:
793 LL_ASSERT(0);
794 return ISOAL_STATUS_ERR_SDU_EMIT;
795 }
796 break;
797
798 case ISOAL_SDU_STATUS_ERRORS:
799 case ISOAL_SDU_STATUS_LOST_DATA:
800 test_mode_rx->missed_cnt++;
801 break;
802 }
803
804 /* In framed mode, we may start incrementing the SDU counter when rx_sdu_counter
805 * becomes non zero (initial state), or in case of zero-based counting, if zero
806 * is actually the first valid SDU counter received.
807 */
808 if (framed && (test_mode_rx->sdu_counter ||
809 (sdu_frag->sdu.status == ISOAL_SDU_STATUS_VALID))) {
810 test_mode_rx->sdu_counter++;
811 }
812
813 status = ISOAL_STATUS_OK;
814 net_buf_unref(buf);
815
816 return status;
817 }
818
ll_iso_receive_test(uint16_t handle,uint8_t payload_type)819 uint8_t ll_iso_receive_test(uint16_t handle, uint8_t payload_type)
820 {
821 struct ll_iso_rx_test_mode *test_mode_rx;
822 isoal_sink_handle_t sink_handle;
823 struct ll_iso_datapath *dp;
824 uint32_t sdu_interval;
825 isoal_status_t err;
826
827 struct ll_iso_datapath **stream_dp;
828
829 uint32_t stream_sync_delay;
830 uint32_t group_sync_delay;
831 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
832 uint16_t stream_handle;
833 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
834 uint16_t iso_interval;
835 uint8_t framed;
836 uint8_t role;
837 uint8_t ft;
838 uint8_t bn;
839
840 if (IS_CIS_HANDLE(handle)) {
841 struct ll_conn_iso_stream *cis;
842 struct ll_conn_iso_group *cig;
843
844 cis = ll_iso_stream_connected_get(handle);
845 if (!cis) {
846 /* CIS is not connected */
847 return BT_HCI_ERR_UNKNOWN_CONN_ID;
848 }
849
850 if (cis->lll.rx.bn == 0) {
851 /* CIS is not configured for RX */
852 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
853 }
854
855 test_mode_rx = &cis->hdr.test_mode.rx;
856 stream_dp = &cis->hdr.datapath_out;
857 cig = cis->group;
858
859 if (cig->lll.role == BT_HCI_ROLE_PERIPHERAL) {
860 /* peripheral */
861 sdu_interval = cig->c_sdu_interval;
862 } else {
863 /* central */
864 sdu_interval = cig->p_sdu_interval;
865 }
866
867 role = cig->lll.role;
868 framed = cis->framed;
869 bn = cis->lll.rx.bn;
870 ft = cis->lll.rx.ft;
871 iso_interval = cig->iso_interval;
872 stream_sync_delay = cis->sync_delay;
873 group_sync_delay = cig->sync_delay;
874 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
875 } else if (IS_SYNC_ISO_HANDLE(handle)) {
876 /* Get the sync stream from the handle */
877 struct lll_sync_iso_stream *sync_stream;
878 struct ll_sync_iso_set *sync_iso;
879 struct lll_sync_iso *lll_iso;
880
881 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
882 sync_stream = ull_sync_iso_stream_get(stream_handle);
883 if (!sync_stream) {
884 return BT_HCI_ERR_UNKNOWN_CONN_ID;
885 }
886
887 if (sync_stream->dp) {
888 /* Data path already set up */
889 return BT_HCI_ERR_CMD_DISALLOWED;
890 }
891
892 sync_iso = ull_sync_iso_by_stream_get(stream_handle);
893 lll_iso = &sync_iso->lll;
894
895 test_mode_rx = sync_stream->test_mode;
896 stream_dp = &sync_stream->dp;
897
898 /* BT Core v5.4 - Vol 6, Part B, Section 4.4.6.4:
899 * BIG_Sync_Delay = (Num_BIS – 1) × BIS_Spacing
900 * + (NSE – 1) × Sub_Interval + MPT.
901 */
902 group_sync_delay = ull_iso_big_sync_delay(lll_iso->num_bis, lll_iso->bis_spacing,
903 lll_iso->nse, lll_iso->sub_interval,
904 lll_iso->phy, lll_iso->max_pdu,
905 lll_iso->enc);
906 stream_sync_delay = group_sync_delay - stream_handle * lll_iso->bis_spacing;
907
908 role = ISOAL_ROLE_BROADCAST_SINK;
909 framed = lll_iso->framing;
910 bn = lll_iso->bn;
911 ft = 0;
912 sdu_interval = lll_iso->sdu_interval;
913 iso_interval = lll_iso->iso_interval;
914 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
915 } else {
916 /* Handle is out of range */
917 return BT_HCI_ERR_UNKNOWN_CONN_ID;
918 }
919
920 if (*stream_dp) {
921 /* Data path already set up */
922 return BT_HCI_ERR_CMD_DISALLOWED;
923 }
924
925 if (payload_type > BT_HCI_ISO_TEST_MAX_SIZE_SDU) {
926 return BT_HCI_ERR_INVALID_LL_PARAM;
927 }
928
929 /* Allocate and configure test datapath */
930 dp = mem_acquire(&datapath_free);
931 if (!dp) {
932 return BT_HCI_ERR_CMD_DISALLOWED;
933 }
934
935 dp->path_dir = BT_HCI_DATAPATH_DIR_CTLR_TO_HOST;
936 dp->path_id = BT_HCI_DATAPATH_ID_HCI;
937
938 *stream_dp = dp;
939 memset(test_mode_rx, 0, sizeof(struct ll_iso_rx_test_mode));
940
941 err = isoal_sink_create(handle, role, framed, bn, ft,
942 sdu_interval, iso_interval,
943 stream_sync_delay, group_sync_delay,
944 ll_iso_test_sdu_alloc,
945 ll_iso_test_sdu_emit,
946 sink_sdu_write_hci, &sink_handle);
947 if (err) {
948 /* Error creating test source - cleanup source and
949 * datapath
950 */
951 isoal_sink_destroy(sink_handle);
952 ull_iso_datapath_release(dp);
953 *stream_dp = NULL;
954
955 return BT_HCI_ERR_CMD_DISALLOWED;
956 }
957
958 dp->sink_hdl = sink_handle;
959 isoal_sink_enable(sink_handle);
960
961 /* Enable Receive Test Mode */
962 test_mode_rx->enabled = 1;
963 test_mode_rx->payload_type = payload_type;
964
965 return BT_HCI_ERR_SUCCESS;
966 }
967
ll_iso_read_test_counters(uint16_t handle,uint32_t * received_cnt,uint32_t * missed_cnt,uint32_t * failed_cnt)968 uint8_t ll_iso_read_test_counters(uint16_t handle, uint32_t *received_cnt,
969 uint32_t *missed_cnt,
970 uint32_t *failed_cnt)
971 {
972 struct ll_iso_rx_test_mode *test_mode_rx;
973
974 *received_cnt = 0U;
975 *missed_cnt = 0U;
976 *failed_cnt = 0U;
977
978 if (IS_CIS_HANDLE(handle)) {
979 struct ll_conn_iso_stream *cis;
980
981 cis = ll_iso_stream_connected_get(handle);
982 if (!cis) {
983 /* CIS is not connected */
984 return BT_HCI_ERR_UNKNOWN_CONN_ID;
985 }
986
987 test_mode_rx = &cis->hdr.test_mode.rx;
988
989 } else if (IS_SYNC_ISO_HANDLE(handle)) {
990 /* Get the sync stream from the handle */
991 struct lll_sync_iso_stream *sync_stream;
992 uint16_t stream_handle;
993
994 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
995 sync_stream = ull_sync_iso_stream_get(stream_handle);
996 if (!sync_stream) {
997 return BT_HCI_ERR_UNKNOWN_CONN_ID;
998 }
999
1000 test_mode_rx = sync_stream->test_mode;
1001
1002 } else {
1003 /* Handle is out of range */
1004 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1005 }
1006
1007 if (!test_mode_rx->enabled) {
1008 /* ISO receive Test is not active */
1009 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1010 }
1011
1012 /* Return SDU statistics */
1013 *received_cnt = test_mode_rx->received_cnt;
1014 *missed_cnt = test_mode_rx->missed_cnt;
1015 *failed_cnt = test_mode_rx->failed_cnt;
1016
1017 return BT_HCI_ERR_SUCCESS;
1018 }
1019
1020 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
ll_read_iso_link_quality(uint16_t handle,uint32_t * tx_unacked_packets,uint32_t * tx_flushed_packets,uint32_t * tx_last_subevent_packets,uint32_t * retransmitted_packets,uint32_t * crc_error_packets,uint32_t * rx_unreceived_packets,uint32_t * duplicate_packets)1021 uint8_t ll_read_iso_link_quality(uint16_t handle,
1022 uint32_t *tx_unacked_packets,
1023 uint32_t *tx_flushed_packets,
1024 uint32_t *tx_last_subevent_packets,
1025 uint32_t *retransmitted_packets,
1026 uint32_t *crc_error_packets,
1027 uint32_t *rx_unreceived_packets,
1028 uint32_t *duplicate_packets)
1029 {
1030 uint8_t status;
1031
1032 *tx_unacked_packets = 0;
1033 *tx_flushed_packets = 0;
1034 *tx_last_subevent_packets = 0;
1035 *retransmitted_packets = 0;
1036 *crc_error_packets = 0;
1037 *rx_unreceived_packets = 0;
1038 *duplicate_packets = 0;
1039
1040 status = BT_HCI_ERR_SUCCESS;
1041
1042 if (IS_CIS_HANDLE(handle)) {
1043 struct ll_conn_iso_stream *cis;
1044
1045 cis = ll_iso_stream_connected_get(handle);
1046
1047 if (!cis) {
1048 /* CIS is not connected */
1049 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1050 }
1051
1052 *tx_unacked_packets = cis->hdr.link_quality.tx_unacked_packets;
1053 *tx_flushed_packets = cis->hdr.link_quality.tx_flushed_packets;
1054 *tx_last_subevent_packets = cis->hdr.link_quality.tx_last_subevent_packets;
1055 *retransmitted_packets = cis->hdr.link_quality.retransmitted_packets;
1056 *crc_error_packets = cis->hdr.link_quality.crc_error_packets;
1057 *rx_unreceived_packets = cis->hdr.link_quality.rx_unreceived_packets;
1058 *duplicate_packets = cis->hdr.link_quality.duplicate_packets;
1059
1060 } else if (IS_SYNC_ISO_HANDLE(handle)) {
1061 /* FIXME: Implement for sync receiver */
1062 status = BT_HCI_ERR_CMD_DISALLOWED;
1063 } else {
1064 /* Handle is out of range */
1065 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
1066 }
1067
1068 return status;
1069 }
1070 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
1071
1072 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
1073
1074 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_test_pdu_release(struct node_tx_iso * node_tx,const uint16_t handle,const isoal_status_t status)1075 static isoal_status_t ll_iso_test_pdu_release(struct node_tx_iso *node_tx,
1076 const uint16_t handle,
1077 const isoal_status_t status)
1078 {
1079 /* Release back to memory pool */
1080 if (node_tx->link) {
1081 ll_iso_link_tx_release(node_tx->link);
1082 }
1083 ll_iso_tx_mem_release(node_tx);
1084
1085 return ISOAL_STATUS_OK;
1086 }
1087
1088 #if defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_transmit_test_send_sdu(uint16_t handle,uint32_t ticks_at_expire)1089 void ll_iso_transmit_test_send_sdu(uint16_t handle, uint32_t ticks_at_expire)
1090 {
1091 isoal_source_handle_t source_handle;
1092 struct isoal_sdu_tx sdu;
1093 isoal_status_t err;
1094 uint8_t tx_buffer[ISO_TEST_TX_BUFFER_SIZE];
1095 uint64_t next_payload_number;
1096 uint16_t remaining_tx;
1097 uint32_t sdu_counter;
1098
1099 if (IS_CIS_HANDLE(handle)) {
1100 struct ll_conn_iso_stream *cis;
1101 struct ll_conn_iso_group *cig;
1102 uint32_t rand_max_sdu;
1103 uint8_t event_offset;
1104 uint8_t max_sdu;
1105 uint8_t rand_8;
1106
1107 cis = ll_iso_stream_connected_get(handle);
1108 LL_ASSERT(cis);
1109
1110 if (!cis->hdr.test_mode.tx.enabled) {
1111 /* Transmit Test Mode not enabled */
1112 return;
1113 }
1114
1115 cig = cis->group;
1116 source_handle = cis->hdr.datapath_in->source_hdl;
1117
1118 max_sdu = IS_PERIPHERAL(cig) ? cis->p_max_sdu : cis->c_max_sdu;
1119
1120 switch (cis->hdr.test_mode.tx.payload_type) {
1121 case BT_HCI_ISO_TEST_ZERO_SIZE_SDU:
1122 remaining_tx = 0;
1123 break;
1124
1125 case BT_HCI_ISO_TEST_VARIABLE_SIZE_SDU:
1126 /* Randomize the length [4..max_sdu] */
1127 lll_rand_get(&rand_8, sizeof(rand_8));
1128 rand_max_sdu = rand_8 * (max_sdu - ISO_TEST_PACKET_COUNTER_SIZE);
1129 remaining_tx = ISO_TEST_PACKET_COUNTER_SIZE + (rand_max_sdu >> 8);
1130 break;
1131
1132 case BT_HCI_ISO_TEST_MAX_SIZE_SDU:
1133 LL_ASSERT(max_sdu > ISO_TEST_PACKET_COUNTER_SIZE);
1134 remaining_tx = max_sdu;
1135 break;
1136
1137 default:
1138 LL_ASSERT(0);
1139 return;
1140 }
1141
1142 if (remaining_tx > ISO_TEST_TX_BUFFER_SIZE) {
1143 sdu.sdu_state = BT_ISO_START;
1144 } else {
1145 sdu.sdu_state = BT_ISO_SINGLE;
1146 }
1147
1148 /* Configure SDU similarly to one delivered via HCI */
1149 sdu.packet_sn = 0;
1150 sdu.dbuf = tx_buffer;
1151
1152 /* We must ensure sufficient time for ISO-AL to fragment SDU and
1153 * deliver PDUs to the TX queue. By checking ull_ref_get, we
1154 * know if we are within the subevents of an ISO event. If so,
1155 * we can assume that we have enough time to deliver in the next
1156 * ISO event. If we're not active within the ISO event, we don't
1157 * know if there is enough time to deliver in the next event,
1158 * and for safety we set the target to current event + 2.
1159 *
1160 * For FT > 1, we have the opportunity to retransmit in later
1161 * event(s), in which case we have the option to target an
1162 * earlier event (this or next) because being late does not
1163 * instantly flush the payload.
1164 */
1165 event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
1166 if (cis->lll.tx.ft > 1) {
1167 /* FT > 1, target an earlier event */
1168 event_offset -= 1;
1169 }
1170
1171 sdu.grp_ref_point = isoal_get_wrapped_time_us(cig->cig_ref_point,
1172 (event_offset * cig->iso_interval *
1173 ISO_INT_UNIT_US));
1174 sdu.target_event = cis->lll.event_count_prepare + event_offset;
1175 sdu.iso_sdu_length = remaining_tx;
1176
1177 /* Send all SDU fragments */
1178 do {
1179 sdu.cntr_time_stamp = HAL_TICKER_TICKS_TO_US(ticks_at_expire);
1180 sdu.time_stamp = sdu.cntr_time_stamp;
1181 sdu.size = MIN(remaining_tx, ISO_TEST_TX_BUFFER_SIZE);
1182 memset(tx_buffer, 0, sdu.size);
1183
1184 /* If this is the first fragment of a framed SDU, inject the SDU
1185 * counter.
1186 */
1187 if ((sdu.size >= ISO_TEST_PACKET_COUNTER_SIZE) &&
1188 ((sdu.sdu_state == BT_ISO_START) || (sdu.sdu_state == BT_ISO_SINGLE))) {
1189 if (cis->framed) {
1190 sdu_counter = (uint32_t)cis->hdr.test_mode.tx.sdu_counter;
1191 } else {
1192 /* Unframed. Get the next payload counter.
1193 *
1194 * BT 5.3, Vol 6, Part B, Section 7.1:
1195 * When using unframed PDUs, the SDU counter shall be equal
1196 * to the payload counter.
1197 */
1198 isoal_tx_unframed_get_next_payload_number(source_handle,
1199 &sdu,
1200 &next_payload_number);
1201 sdu_counter = (uint32_t)next_payload_number;
1202 }
1203
1204 sys_put_le32(sdu_counter, tx_buffer);
1205 }
1206
1207 /* Send to ISOAL */
1208 err = isoal_tx_sdu_fragment(source_handle, &sdu);
1209 LL_ASSERT(!err);
1210
1211 remaining_tx -= sdu.size;
1212
1213 if (remaining_tx > ISO_TEST_TX_BUFFER_SIZE) {
1214 sdu.sdu_state = BT_ISO_CONT;
1215 } else {
1216 sdu.sdu_state = BT_ISO_END;
1217 }
1218 } while (remaining_tx);
1219
1220 cis->hdr.test_mode.tx.sdu_counter++;
1221
1222 } else if (IS_ADV_ISO_HANDLE(handle)) {
1223 /* FIXME: Implement for broadcaster */
1224 } else {
1225 LL_ASSERT(0);
1226 }
1227 }
1228 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1229
ll_iso_transmit_test(uint16_t handle,uint8_t payload_type)1230 uint8_t ll_iso_transmit_test(uint16_t handle, uint8_t payload_type)
1231 {
1232 isoal_source_handle_t source_handle;
1233 struct ll_iso_datapath *dp;
1234 uint32_t sdu_interval;
1235 isoal_status_t err;
1236 uint8_t status;
1237
1238 status = BT_HCI_ERR_SUCCESS;
1239
1240 if (IS_CIS_HANDLE(handle)) {
1241 struct ll_conn_iso_stream *cis;
1242 struct ll_conn_iso_group *cig;
1243
1244 cis = ll_iso_stream_connected_get(handle);
1245 if (!cis) {
1246 /* CIS is not connected */
1247 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1248 }
1249
1250 if (cis->lll.tx.bn == 0U) {
1251 /* CIS is not configured for TX */
1252 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1253 }
1254
1255 if (cis->hdr.datapath_in) {
1256 /* Data path already set up */
1257 return BT_HCI_ERR_CMD_DISALLOWED;
1258 }
1259
1260 if (payload_type > BT_HCI_ISO_TEST_MAX_SIZE_SDU) {
1261 return BT_HCI_ERR_INVALID_LL_PARAM;
1262 }
1263
1264 /* Allocate and configure test datapath */
1265 dp = mem_acquire(&datapath_free);
1266 if (!dp) {
1267 return BT_HCI_ERR_CMD_DISALLOWED;
1268 }
1269
1270 dp->path_dir = BT_HCI_DATAPATH_DIR_HOST_TO_CTLR;
1271 dp->path_id = BT_HCI_DATAPATH_ID_HCI;
1272
1273 cis->hdr.datapath_in = dp;
1274 cig = cis->group;
1275
1276 sdu_interval = IS_PERIPHERAL(cig) ? cig->p_sdu_interval : cig->c_sdu_interval;
1277
1278 /* Setup the test source */
1279 err = isoal_source_create(handle, cig->lll.role, cis->framed,
1280 cis->lll.tx.bn, cis->lll.tx.ft,
1281 cis->lll.tx.max_pdu, sdu_interval,
1282 cig->iso_interval, cis->sync_delay,
1283 cig->sync_delay, ll_iso_pdu_alloc,
1284 ll_iso_pdu_write, ll_iso_pdu_emit,
1285 ll_iso_test_pdu_release,
1286 &source_handle);
1287
1288 if (err) {
1289 /* Error creating test source - cleanup source and datapath */
1290 isoal_source_destroy(source_handle);
1291 ull_iso_datapath_release(dp);
1292 cis->hdr.datapath_in = NULL;
1293
1294 return BT_HCI_ERR_CMD_DISALLOWED;
1295 }
1296
1297 dp->source_hdl = source_handle;
1298 isoal_source_enable(source_handle);
1299
1300 /* Enable Transmit Test Mode */
1301 cis->hdr.test_mode.tx.enabled = 1;
1302 cis->hdr.test_mode.tx.payload_type = payload_type;
1303
1304 } else if (IS_ADV_ISO_HANDLE(handle)) {
1305 struct lll_adv_iso_stream *stream;
1306 uint16_t stream_handle;
1307
1308 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1309 stream = ull_adv_iso_stream_get(stream_handle);
1310 if (!stream) {
1311 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1312 }
1313
1314 /* FIXME: Implement use of common header in stream to enable code sharing
1315 * between CIS and BIS for test commands (and other places).
1316 */
1317 status = BT_HCI_ERR_CMD_DISALLOWED;
1318 } else {
1319 /* Handle is out of range */
1320 status = BT_HCI_ERR_UNKNOWN_CONN_ID;
1321 }
1322
1323 return status;
1324 }
1325 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1326
ll_iso_test_end(uint16_t handle,uint32_t * received_cnt,uint32_t * missed_cnt,uint32_t * failed_cnt)1327 uint8_t ll_iso_test_end(uint16_t handle, uint32_t *received_cnt,
1328 uint32_t *missed_cnt, uint32_t *failed_cnt)
1329 {
1330 *received_cnt = 0U;
1331 *missed_cnt = 0U;
1332 *failed_cnt = 0U;
1333
1334 if (IS_CIS_HANDLE(handle)) {
1335 struct ll_conn_iso_stream *cis;
1336
1337 cis = ll_iso_stream_connected_get(handle);
1338 if (!cis) {
1339 /* CIS is not connected */
1340 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1341 }
1342
1343 if (!cis->hdr.test_mode.rx.enabled && !cis->hdr.test_mode.tx.enabled) {
1344 /* Test Mode is not active */
1345 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1346 }
1347
1348 if (cis->hdr.test_mode.rx.enabled) {
1349 isoal_sink_destroy(cis->hdr.datapath_out->sink_hdl);
1350 ull_iso_datapath_release(cis->hdr.datapath_out);
1351 cis->hdr.datapath_out = NULL;
1352
1353 /* Return SDU statistics */
1354 *received_cnt = cis->hdr.test_mode.rx.received_cnt;
1355 *missed_cnt = cis->hdr.test_mode.rx.missed_cnt;
1356 *failed_cnt = cis->hdr.test_mode.rx.failed_cnt;
1357 }
1358
1359 if (cis->hdr.test_mode.tx.enabled) {
1360 /* Tear down source and datapath */
1361 isoal_source_destroy(cis->hdr.datapath_in->source_hdl);
1362 ull_iso_datapath_release(cis->hdr.datapath_in);
1363 cis->hdr.datapath_in = NULL;
1364 }
1365
1366 /* Disable Test Mode */
1367 (void)memset(&cis->hdr.test_mode, 0U, sizeof(cis->hdr.test_mode));
1368
1369 } else if (IS_ADV_ISO_HANDLE(handle)) {
1370 /* FIXME: Implement for broadcaster */
1371 return BT_HCI_ERR_CMD_DISALLOWED;
1372
1373 } else if (IS_SYNC_ISO_HANDLE(handle)) {
1374 struct lll_sync_iso_stream *sync_stream;
1375 uint16_t stream_handle;
1376
1377 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
1378 sync_stream = ull_sync_iso_stream_get(stream_handle);
1379 if (!sync_stream) {
1380 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1381 }
1382
1383 if (!sync_stream->test_mode->enabled || !sync_stream->dp) {
1384 /* Test Mode is not active */
1385 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1386 }
1387
1388 isoal_sink_destroy(sync_stream->dp->sink_hdl);
1389 ull_iso_datapath_release(sync_stream->dp);
1390 sync_stream->dp = NULL;
1391
1392 /* Return SDU statistics */
1393 *received_cnt = sync_stream->test_mode->received_cnt;
1394 *missed_cnt = sync_stream->test_mode->missed_cnt;
1395 *failed_cnt = sync_stream->test_mode->failed_cnt;
1396
1397 (void)memset(&sync_stream->test_mode, 0U, sizeof(sync_stream->test_mode));
1398
1399 } else {
1400 /* Handle is out of range */
1401 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1402 }
1403
1404 return BT_HCI_ERR_SUCCESS;
1405 }
1406
1407 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_tx_mem_acquire(void)1408 void *ll_iso_tx_mem_acquire(void)
1409 {
1410 return mem_acquire(&mem_iso_tx.free);
1411 }
1412
ll_iso_tx_mem_release(void * node_tx)1413 void ll_iso_tx_mem_release(void *node_tx)
1414 {
1415 mem_release(node_tx, &mem_iso_tx.free);
1416 }
1417
ll_iso_tx_mem_enqueue(uint16_t handle,void * node_tx,void * link)1418 int ll_iso_tx_mem_enqueue(uint16_t handle, void *node_tx, void *link)
1419 {
1420 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) &&
1421 IS_CIS_HANDLE(handle)) {
1422 struct ll_conn_iso_stream *cis;
1423
1424 cis = ll_conn_iso_stream_get(handle);
1425 memq_enqueue(link, node_tx, &cis->lll.memq_tx.tail);
1426
1427 } else if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) &&
1428 IS_ADV_ISO_HANDLE(handle)) {
1429 struct lll_adv_iso_stream *stream;
1430 uint16_t stream_handle;
1431
1432 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1433 stream = ull_adv_iso_stream_get(stream_handle);
1434 memq_enqueue(link, node_tx, &stream->memq_tx.tail);
1435
1436 } else {
1437 return -EINVAL;
1438 }
1439
1440 return 0;
1441 }
1442 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1443
ull_iso_init(void)1444 int ull_iso_init(void)
1445 {
1446 int err;
1447
1448 err = init_reset();
1449 if (err) {
1450 return err;
1451 }
1452
1453 return 0;
1454 }
1455
ull_iso_reset(void)1456 int ull_iso_reset(void)
1457 {
1458 int err;
1459
1460 err = init_reset();
1461 if (err) {
1462 return err;
1463 }
1464
1465 return 0;
1466 }
1467
1468 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ull_iso_lll_ack_enqueue(uint16_t handle,struct node_tx_iso * node_tx)1469 void ull_iso_lll_ack_enqueue(uint16_t handle, struct node_tx_iso *node_tx)
1470 {
1471 if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && IS_CIS_HANDLE(handle)) {
1472 struct ll_conn_iso_stream *cis;
1473 struct ll_iso_datapath *dp;
1474
1475 cis = ll_conn_iso_stream_get(handle);
1476 dp = cis->hdr.datapath_in;
1477
1478 if (dp) {
1479 isoal_tx_pdu_release(dp->source_hdl, node_tx);
1480 } else {
1481 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
1482 /* Possible race with Data Path remove - handle release in vendor
1483 * function.
1484 */
1485 ll_data_path_tx_pdu_release(handle, node_tx);
1486 #else
1487 /* FIXME: ll_tx_ack_put is not LLL callable as it is
1488 * used by ACL connections in ULL context to dispatch
1489 * ack.
1490 */
1491 ll_tx_ack_put(handle, (void *)node_tx);
1492 ll_rx_sched();
1493 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1494 }
1495 } else if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) && IS_ADV_ISO_HANDLE(handle)) {
1496 /* Process as TX ack. TODO: Can be unified with CIS and use
1497 * ISOAL.
1498 */
1499 /* FIXME: ll_tx_ack_put is not LLL callable as it is
1500 * used by ACL connections in ULL context to dispatch
1501 * ack.
1502 */
1503 ll_tx_ack_put(handle, (void *)node_tx);
1504 ll_rx_sched();
1505 } else {
1506 LL_ASSERT(0);
1507 }
1508 }
1509
ull_iso_lll_event_prepare(uint16_t handle,uint64_t event_count)1510 void ull_iso_lll_event_prepare(uint16_t handle, uint64_t event_count)
1511 {
1512 if (IS_CIS_HANDLE(handle)) {
1513 struct ll_iso_datapath *dp = NULL;
1514 struct ll_conn_iso_stream *cis;
1515
1516 cis = ll_iso_stream_connected_get(handle);
1517
1518 if (cis) {
1519 dp = cis->hdr.datapath_in;
1520 }
1521
1522 if (dp) {
1523 isoal_tx_event_prepare(dp->source_hdl, event_count);
1524 }
1525 } else if (IS_ADV_ISO_HANDLE(handle)) {
1526 struct ll_iso_datapath *dp = NULL;
1527 struct lll_adv_iso_stream *stream;
1528 uint16_t stream_handle;
1529
1530 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
1531 stream = ull_adv_iso_stream_get(stream_handle);
1532
1533 if (stream) {
1534 dp = stream->dp;
1535 }
1536
1537 if (dp) {
1538 isoal_tx_event_prepare(dp->source_hdl, event_count);
1539 }
1540 } else {
1541 LL_ASSERT(0);
1542 }
1543 }
1544 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1545
1546 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_SYNC_ISO)
ull_iso_big_sync_delay(uint8_t num_bis,uint32_t bis_spacing,uint8_t nse,uint32_t sub_interval,uint8_t phy,uint8_t max_pdu,bool enc)1547 uint32_t ull_iso_big_sync_delay(uint8_t num_bis, uint32_t bis_spacing, uint8_t nse,
1548 uint32_t sub_interval, uint8_t phy, uint8_t max_pdu, bool enc)
1549 {
1550 /* BT Core v5.4 - Vol 6, Part B, Section 4.4.6.4:
1551 * BIG_Sync_Delay = (Num_BIS – 1) × BIS_Spacing + (NSE – 1) × Sub_Interval + MPT.
1552 */
1553 return (num_bis - 1) * bis_spacing + (nse - 1) * sub_interval +
1554 BYTES2US(PDU_OVERHEAD_SIZE(phy) + max_pdu + (enc ? 4 : 0), phy);
1555 }
1556 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_SYNC_ISO */
1557
1558 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ull_iso_pdu_rx_alloc_peek(uint8_t count)1559 void *ull_iso_pdu_rx_alloc_peek(uint8_t count)
1560 {
1561 if (count > MFIFO_AVAIL_COUNT_GET(iso_rx)) {
1562 return NULL;
1563 }
1564
1565 return MFIFO_DEQUEUE_PEEK(iso_rx);
1566 }
1567
ull_iso_pdu_rx_alloc(void)1568 void *ull_iso_pdu_rx_alloc(void)
1569 {
1570 return MFIFO_DEQUEUE(iso_rx);
1571 }
1572
1573 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
ull_iso_rx_put(memq_link_t * link,void * rx)1574 void ull_iso_rx_put(memq_link_t *link, void *rx)
1575 {
1576 /* Enqueue the Rx object */
1577 memq_enqueue(link, rx, &memq_ull_iso_rx.tail);
1578 }
1579
ull_iso_rx_sched(void)1580 void ull_iso_rx_sched(void)
1581 {
1582 static memq_link_t link;
1583 static struct mayfly mfy = {0, 0, &link, NULL, iso_rx_demux};
1584
1585 /* Kick the ULL (using the mayfly, tailchain it) */
1586 mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
1587 }
1588
1589 #if defined(CONFIG_BT_CTLR_CONN_ISO)
iso_rx_cig_ref_point_update(struct ll_conn_iso_group * cig,const struct ll_conn_iso_stream * cis,const struct node_rx_iso_meta * meta)1590 static void iso_rx_cig_ref_point_update(struct ll_conn_iso_group *cig,
1591 const struct ll_conn_iso_stream *cis,
1592 const struct node_rx_iso_meta *meta)
1593 {
1594 uint32_t cig_sync_delay;
1595 uint32_t cis_sync_delay;
1596 uint64_t event_count;
1597 uint8_t burst_number;
1598 uint8_t role;
1599
1600 role = cig->lll.role;
1601 cig_sync_delay = cig->sync_delay;
1602 cis_sync_delay = cis->sync_delay;
1603 burst_number = cis->lll.rx.bn;
1604 event_count = cis->lll.event_count_prepare;
1605
1606 if (role) {
1607 /* Peripheral */
1608
1609 /* Check if this is the first payload received for this cis in
1610 * this event
1611 */
1612 if (meta->payload_number == (burst_number * event_count)) {
1613 /* Update the CIG reference point based on the CIS
1614 * anchor point
1615 */
1616 cig->cig_ref_point = isoal_get_wrapped_time_us(meta->timestamp,
1617 cis_sync_delay - cig_sync_delay);
1618 }
1619 }
1620 }
1621 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1622
iso_rx_demux(void * param)1623 static void iso_rx_demux(void *param)
1624 {
1625 #if defined(CONFIG_BT_CTLR_CONN_ISO) || \
1626 defined(CONFIG_BT_CTLR_SYNC_ISO)
1627 struct ll_iso_datapath *dp;
1628 #endif /* CONFIG_BT_CTLR_CONN_ISO || CONFIG_BT_CTLR_SYNC_ISO */
1629 struct node_rx_pdu *rx_pdu;
1630 struct node_rx_hdr *rx;
1631 memq_link_t *link;
1632 uint16_t handle;
1633
1634 do {
1635 link = memq_peek(memq_ull_iso_rx.head, memq_ull_iso_rx.tail,
1636 (void **)&rx);
1637 if (link) {
1638 /* Demux Rx objects */
1639 switch (rx->type) {
1640 case NODE_RX_TYPE_RELEASE:
1641 (void)memq_dequeue(memq_ull_iso_rx.tail,
1642 &memq_ull_iso_rx.head, NULL);
1643 ll_iso_rx_put(link, rx);
1644 ll_rx_sched();
1645 break;
1646
1647 case NODE_RX_TYPE_ISO_PDU:
1648 /* Remove from receive-queue; ULL has received this now */
1649 (void)memq_dequeue(memq_ull_iso_rx.tail, &memq_ull_iso_rx.head,
1650 NULL);
1651
1652 rx_pdu = (struct node_rx_pdu *)rx;
1653 handle = rx_pdu->hdr.handle;
1654 dp = NULL;
1655
1656 if (false) {
1657 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1658 } else if (IS_CIS_HANDLE(handle)) {
1659 struct ll_conn_iso_stream *cis;
1660 struct ll_conn_iso_group *cig;
1661
1662 cis = ll_conn_iso_stream_get(handle);
1663 cig = cis->group;
1664 dp = cis->hdr.datapath_out;
1665
1666 iso_rx_cig_ref_point_update(cig, cis,
1667 &rx_pdu->hdr.rx_iso_meta);
1668 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1669 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1670 } else if (IS_SYNC_ISO_HANDLE(handle)) {
1671 struct lll_sync_iso_stream *sync_stream;
1672 uint16_t stream_handle;
1673
1674 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(handle);
1675 sync_stream = ull_sync_iso_stream_get(stream_handle);
1676 dp = sync_stream ? sync_stream->dp : NULL;
1677 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1678 }
1679
1680 #if defined(CONFIG_BT_CTLR_CONN_ISO) || defined(CONFIG_BT_CTLR_SYNC_ISO)
1681 if (dp && dp->path_id != BT_HCI_DATAPATH_ID_HCI) {
1682 /* If vendor specific datapath pass to ISO AL here,
1683 * in case of HCI destination it will be passed in
1684 * HCI context.
1685 */
1686 struct isoal_pdu_rx pckt_meta = {
1687 .meta = &rx_pdu->rx_iso_meta,
1688 .pdu = (struct pdu_iso *)&rx_pdu->pdu[0]
1689 };
1690
1691 /* Pass the ISO PDU through ISO-AL */
1692 const isoal_status_t err =
1693 isoal_rx_pdu_recombine(dp->sink_hdl, &pckt_meta);
1694
1695 LL_ASSERT(err == ISOAL_STATUS_OK); /* TODO handle err */
1696 }
1697 #endif /* CONFIG_BT_CTLR_CONN_ISO || CONFIG_BT_CTLR_SYNC_ISO */
1698
1699 /* Let ISO PDU start its long journey upwards */
1700 ll_iso_rx_put(link, rx);
1701 ll_rx_sched();
1702 break;
1703
1704 default:
1705 LL_ASSERT(0);
1706 break;
1707 }
1708 }
1709 } while (link);
1710 }
1711 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1712
ll_iso_rx_put(memq_link_t * link,void * rx)1713 void ll_iso_rx_put(memq_link_t *link, void *rx)
1714 {
1715 /* Enqueue the Rx object */
1716 memq_enqueue(link, rx, &memq_ll_iso_rx.tail);
1717 }
1718
ll_iso_rx_get(void)1719 void *ll_iso_rx_get(void)
1720 {
1721 struct node_rx_hdr *rx;
1722 memq_link_t *link;
1723
1724 link = memq_peek(memq_ll_iso_rx.head, memq_ll_iso_rx.tail, (void **)&rx);
1725 while (link) {
1726 /* Do not send up buffers to Host thread that are
1727 * marked for release
1728 */
1729 if (rx->type == NODE_RX_TYPE_RELEASE) {
1730 (void)memq_dequeue(memq_ll_iso_rx.tail,
1731 &memq_ll_iso_rx.head, NULL);
1732 mem_release(link, &mem_link_iso_rx.free);
1733 mem_release(rx, &mem_pool_iso_rx.free);
1734 RXFIFO_ALLOC(iso_rx, 1);
1735
1736 link = memq_peek(memq_ll_iso_rx.head, memq_ll_iso_rx.tail, (void **)&rx);
1737 continue;
1738 }
1739 return rx;
1740 }
1741
1742 return NULL;
1743 }
1744
ll_iso_rx_dequeue(void)1745 void ll_iso_rx_dequeue(void)
1746 {
1747 struct node_rx_hdr *rx = NULL;
1748 memq_link_t *link;
1749
1750 link = memq_dequeue(memq_ll_iso_rx.tail, &memq_ll_iso_rx.head,
1751 (void **)&rx);
1752 LL_ASSERT(link);
1753
1754 mem_release(link, &mem_link_iso_rx.free);
1755
1756 /* Handle object specific clean up */
1757 switch (rx->type) {
1758 case NODE_RX_TYPE_ISO_PDU:
1759 break;
1760 default:
1761 LL_ASSERT(0);
1762 break;
1763 }
1764 }
1765
ll_iso_rx_mem_release(void ** node_rx)1766 void ll_iso_rx_mem_release(void **node_rx)
1767 {
1768 struct node_rx_hdr *rx;
1769
1770 rx = *node_rx;
1771 while (rx) {
1772 struct node_rx_hdr *rx_free;
1773
1774 rx_free = rx;
1775 rx = rx->next;
1776
1777 switch (rx_free->type) {
1778 case NODE_RX_TYPE_ISO_PDU:
1779 mem_release(rx_free, &mem_pool_iso_rx.free);
1780 break;
1781 default:
1782 /* Ignore other types as node may have been initialized due to
1783 * race with HCI reset.
1784 */
1785 break;
1786 }
1787 }
1788
1789 *node_rx = rx;
1790
1791 RXFIFO_ALLOC(iso_rx, UINT8_MAX);
1792 }
1793 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
1794
ull_iso_datapath_alloc(void)1795 struct ll_iso_datapath *ull_iso_datapath_alloc(void)
1796 {
1797 return mem_acquire(&datapath_free);
1798 }
1799
ull_iso_datapath_release(struct ll_iso_datapath * dp)1800 void ull_iso_datapath_release(struct ll_iso_datapath *dp)
1801 {
1802 mem_release(dp, &datapath_free);
1803 }
1804
1805 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
ll_iso_link_tx_release(void * link)1806 void ll_iso_link_tx_release(void *link)
1807 {
1808 mem_release(link, &mem_link_iso_tx.free);
1809 }
1810
1811 /**
1812 * Allocate a PDU from the LL and store the details in the given buffer. Allocation
1813 * is not expected to fail as there must always be sufficient PDU buffers. Any
1814 * failure will trigger the assert.
1815 * @param[in] pdu_buffer Buffer to store PDU details in
1816 * @return Error status of operation
1817 */
ll_iso_pdu_alloc(struct isoal_pdu_buffer * pdu_buffer)1818 static isoal_status_t ll_iso_pdu_alloc(struct isoal_pdu_buffer *pdu_buffer)
1819 {
1820 struct node_tx_iso *node_tx;
1821
1822 node_tx = ll_iso_tx_mem_acquire();
1823 if (!node_tx) {
1824 LOG_ERR("Tx Buffer Overflow");
1825 /* TODO: Report overflow to HCI and remove assert
1826 * data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO)
1827 */
1828 LL_ASSERT(0);
1829 return ISOAL_STATUS_ERR_PDU_ALLOC;
1830 }
1831
1832 node_tx->link = NULL;
1833
1834 /* node_tx handle will be required to emit the PDU later */
1835 pdu_buffer->handle = (void *)node_tx;
1836 pdu_buffer->pdu = (void *)node_tx->pdu;
1837
1838 /* Use TX buffer size as the limit here. Actual size will be decided in
1839 * the ISOAL based on the minimum of the buffer size and the respective
1840 * Max_PDU_C_To_P or Max_PDU_P_To_C.
1841 */
1842 pdu_buffer->size = MAX(LL_BIS_OCTETS_TX_MAX, LL_CIS_OCTETS_TX_MAX);
1843
1844 return ISOAL_STATUS_OK;
1845 }
1846
1847 /**
1848 * Write the given SDU payload to the target PDU buffer at the given offset.
1849 * @param[in,out] pdu_buffer Target PDU buffer
1850 * @param[in] pdu_offset Offset / current write position within PDU
1851 * @param[in] sdu_payload Location of source data
1852 * @param[in] consume_len Length of data to copy
1853 * @return Error status of write operation
1854 */
ll_iso_pdu_write(struct isoal_pdu_buffer * pdu_buffer,const size_t pdu_offset,const uint8_t * sdu_payload,const size_t consume_len)1855 static isoal_status_t ll_iso_pdu_write(struct isoal_pdu_buffer *pdu_buffer,
1856 const size_t pdu_offset,
1857 const uint8_t *sdu_payload,
1858 const size_t consume_len)
1859 {
1860 ARG_UNUSED(pdu_offset);
1861 ARG_UNUSED(consume_len);
1862
1863 LL_ASSERT(pdu_buffer);
1864 LL_ASSERT(pdu_buffer->pdu);
1865 LL_ASSERT(sdu_payload);
1866
1867 if ((pdu_offset + consume_len) > pdu_buffer->size) {
1868 /* Exceeded PDU buffer */
1869 return ISOAL_STATUS_ERR_UNSPECIFIED;
1870 }
1871
1872 /* Copy source to destination at given offset */
1873 memcpy(&pdu_buffer->pdu->payload[pdu_offset], sdu_payload, consume_len);
1874
1875 return ISOAL_STATUS_OK;
1876 }
1877
1878 /**
1879 * Emit the encoded node to the transmission queue
1880 * @param node_tx TX node to enqueue
1881 * @param handle CIS/BIS handle
1882 * @return Error status of enqueue operation
1883 */
ll_iso_pdu_emit(struct node_tx_iso * node_tx,const uint16_t handle)1884 static isoal_status_t ll_iso_pdu_emit(struct node_tx_iso *node_tx,
1885 const uint16_t handle)
1886 {
1887 memq_link_t *link;
1888
1889 link = mem_acquire(&mem_link_iso_tx.free);
1890 LL_ASSERT(link);
1891
1892 if (ll_iso_tx_mem_enqueue(handle, node_tx, link)) {
1893 return ISOAL_STATUS_ERR_PDU_EMIT;
1894 }
1895
1896 return ISOAL_STATUS_OK;
1897 }
1898
1899 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1900 /**
1901 * Release the given payload back to the memory pool.
1902 * @param node_tx TX node to release or forward
1903 * @param handle CIS/BIS handle
1904 * @param status Reason for release
1905 * @return Error status of release operation
1906 */
ll_iso_pdu_release(struct node_tx_iso * node_tx,const uint16_t handle,const isoal_status_t status)1907 static isoal_status_t ll_iso_pdu_release(struct node_tx_iso *node_tx,
1908 const uint16_t handle,
1909 const isoal_status_t status)
1910 {
1911 if (status == ISOAL_STATUS_OK) {
1912 /* Process as TX ack, we are in LLL execution context here.
1913 * status == ISOAL_STATUS_OK when an ISO PDU has been acked.
1914 *
1915 * Call Path:
1916 * ull_iso_lll_ack_enqueue() --> isoal_tx_pdu_release() -->
1917 * pdu_release() == ll_iso_pdu_release() (this function).
1918 */
1919 /* FIXME: ll_tx_ack_put is not LLL callable as it is used by
1920 * ACL connections in ULL context to dispatch ack.
1921 */
1922 ll_tx_ack_put(handle, (void *)node_tx);
1923 ll_rx_sched();
1924 } else {
1925 /* Release back to memory pool, we are in Thread context
1926 * Callers:
1927 * isoal_source_deallocate() with ISOAL_STATUS_ERR_PDU_EMIT
1928 * isoal_tx_pdu_emit with status != ISOAL_STATUS_OK
1929 */
1930 if (node_tx->link) {
1931 ll_iso_link_tx_release(node_tx->link);
1932 }
1933 ll_iso_tx_mem_release(node_tx);
1934 }
1935
1936 return ISOAL_STATUS_OK;
1937 }
1938 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1939 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1940
init_reset(void)1941 static int init_reset(void)
1942 {
1943 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1944 memq_link_t *link;
1945
1946 RXFIFO_INIT(iso_rx);
1947
1948 /* Acquire a link to initialize ull rx memq */
1949 link = mem_acquire(&mem_link_iso_rx.free);
1950 LL_ASSERT(link);
1951
1952 #if defined(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)
1953 /* Initialize ull rx memq */
1954 MEMQ_INIT(ull_iso_rx, link);
1955 #endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
1956
1957 /* Acquire a link to initialize ll_iso_rx memq */
1958 link = mem_acquire(&mem_link_iso_rx.free);
1959 LL_ASSERT(link);
1960
1961 /* Initialize ll_iso_rx memq */
1962 MEMQ_INIT(ll_iso_rx, link);
1963
1964 RXFIFO_ALLOC(iso_rx, UINT8_MAX);
1965 #endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
1966
1967 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1968 /* Initialize tx pool. */
1969 mem_init(mem_iso_tx.pool, NODE_TX_BUFFER_SIZE, BT_CTLR_ISO_TX_PDU_BUFFERS,
1970 &mem_iso_tx.free);
1971
1972 /* Initialize tx link pool. */
1973 mem_init(mem_link_iso_tx.pool, sizeof(memq_link_t), BT_CTLR_ISO_TX_PDU_BUFFERS,
1974 &mem_link_iso_tx.free);
1975 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1976
1977 #if BT_CTLR_ISO_STREAMS
1978 /* Initialize ISO Datapath pool */
1979 mem_init(datapath_pool, sizeof(struct ll_iso_datapath),
1980 sizeof(datapath_pool) / sizeof(struct ll_iso_datapath), &datapath_free);
1981 #endif /* BT_CTLR_ISO_STREAMS */
1982
1983 /* Initialize ISO Adaptation Layer */
1984 isoal_init();
1985
1986 return 0;
1987 }
1988
1989 #if defined(CONFIG_BT_CTLR_CONN_ISO) || defined(CONFIG_BT_CTLR_SYNC_ISO)
ull_iso_resume_ticker_start(struct lll_event * resume_event,uint16_t group_handle,uint16_t stream_handle,uint8_t role,uint32_t ticks_anchor,uint32_t resume_timeout)1990 void ull_iso_resume_ticker_start(struct lll_event *resume_event,
1991 uint16_t group_handle,
1992 uint16_t stream_handle,
1993 uint8_t role,
1994 uint32_t ticks_anchor,
1995 uint32_t resume_timeout)
1996 {
1997 uint32_t resume_delay_us;
1998 int32_t resume_offset_us;
1999 uint8_t ticker_id = 0;
2000 uint32_t ret;
2001
2002 resume_delay_us = EVENT_OVERHEAD_START_US;
2003 resume_delay_us += EVENT_TICKER_RES_MARGIN_US;
2004
2005 if (0) {
2006 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2007 } else if (IS_CIS_HANDLE(stream_handle)) {
2008 ticker_id = TICKER_ID_CONN_ISO_RESUME_BASE + group_handle;
2009 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2010 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
2011 } else if (IS_SYNC_ISO_HANDLE(stream_handle)) {
2012 ticker_id = TICKER_ID_SCAN_SYNC_ISO_RESUME_BASE + group_handle;
2013 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
2014 } else {
2015 LL_ASSERT(0);
2016 }
2017
2018 if (role == BT_HCI_ROLE_PERIPHERAL) {
2019 /* Add peripheral specific delay */
2020 if (0) {
2021 #if defined(CONFIG_BT_CTLR_PHY)
2022 } else {
2023 uint8_t phy = 0;
2024
2025 if (0) {
2026 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2027 } else if (IS_CIS_HANDLE(stream_handle)) {
2028 struct ll_conn_iso_stream *cis;
2029 struct ll_conn *conn;
2030
2031 cis = ll_conn_iso_stream_get(stream_handle);
2032
2033 conn = ll_conn_get(cis->lll.acl_handle);
2034 LL_ASSERT(conn != NULL);
2035
2036 phy = conn->lll.phy_rx;
2037 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2038 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
2039 } else if (IS_SYNC_ISO_HANDLE(stream_handle)) {
2040 struct ll_sync_iso_set *sync_iso;
2041 uint16_t stream_idx;
2042
2043 stream_idx = LL_BIS_SYNC_IDX_FROM_HANDLE(stream_handle);
2044 sync_iso = ull_sync_iso_by_stream_get(stream_idx);
2045 phy = sync_iso->lll.phy;
2046 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
2047 } else {
2048 LL_ASSERT(0);
2049 }
2050
2051 resume_delay_us +=
2052 lll_radio_rx_ready_delay_get(phy, PHY_FLAGS_S8);
2053 #else
2054 } else {
2055 resume_delay_us += lll_radio_rx_ready_delay_get(0, 0);
2056 #endif /* CONFIG_BT_CTLR_PHY */
2057 }
2058 }
2059
2060 resume_offset_us = (int32_t)(resume_timeout - resume_delay_us);
2061 LL_ASSERT(resume_offset_us >= 0);
2062
2063 /* Setup resume timeout as single-shot */
2064 ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
2065 TICKER_USER_ID_LLL,
2066 ticker_id,
2067 ticks_anchor,
2068 HAL_TICKER_US_TO_TICKS(resume_offset_us),
2069 TICKER_NULL_PERIOD,
2070 TICKER_NULL_REMAINDER,
2071 TICKER_NULL_LAZY,
2072 TICKER_NULL_SLOT,
2073 ticker_resume_cb, resume_event,
2074 ticker_resume_op_cb, NULL);
2075
2076 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2077 (ret == TICKER_STATUS_BUSY));
2078 }
2079
ticker_resume_op_cb(uint32_t status,void * param)2080 static void ticker_resume_op_cb(uint32_t status, void *param)
2081 {
2082 ARG_UNUSED(param);
2083
2084 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2085 }
2086
ticker_resume_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)2087 static void ticker_resume_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2088 uint32_t remainder, uint16_t lazy, uint8_t force,
2089 void *param)
2090 {
2091 static memq_link_t link;
2092 static struct mayfly mfy = {0, 0, &link, NULL, lll_resume};
2093 struct lll_event *resume_event;
2094 uint32_t ret;
2095
2096 ARG_UNUSED(ticks_drift);
2097 LL_ASSERT(lazy == 0);
2098
2099 resume_event = param;
2100
2101 /* Append timing parameters */
2102 resume_event->prepare_param.ticks_at_expire = ticks_at_expire;
2103 resume_event->prepare_param.remainder = remainder;
2104 resume_event->prepare_param.lazy = 0;
2105 resume_event->prepare_param.force = force;
2106 mfy.param = resume_event;
2107
2108 /* Kick LLL resume */
2109 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
2110 0, &mfy);
2111
2112 LL_ASSERT(!ret);
2113 }
2114 #endif /* CONFIG_BT_CTLR_CONN_ISO || CONFIG_BT_CTLR_SYNC_ISO */
2115