1 /*
2  * Copyright (c) 2017-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stddef.h>
8 #include <stdbool.h>
9 #include <errno.h>
10 
11 #include <zephyr/kernel.h>
12 #include <soc.h>
13 #include <zephyr/device.h>
14 #include <zephyr/drivers/entropy.h>
15 #include <zephyr/bluetooth/hci_types.h>
16 
17 #include "hal/cpu.h"
18 #include "hal/ecb.h"
19 #include "hal/ccm.h"
20 #include "hal/cntr.h"
21 #include "hal/ticker.h"
22 
23 #include "util/util.h"
24 #include "util/mem.h"
25 #include "util/mfifo.h"
26 #include "util/memq.h"
27 #include "util/mayfly.h"
28 #include "util/dbuf.h"
29 
30 #include "ticker/ticker.h"
31 
32 #include "pdu_df.h"
33 #include "lll/pdu_vendor.h"
34 #include "pdu.h"
35 
36 #include "lll.h"
37 #include "lll/lll_vendor.h"
38 #include "lll/lll_adv_types.h"
39 #include "lll_adv.h"
40 #include "lll/lll_adv_pdu.h"
41 #include "lll_chan.h"
42 #include "lll_scan.h"
43 #include "lll/lll_df_types.h"
44 #include "lll_sync.h"
45 #include "lll_sync_iso.h"
46 #include "lll_iso_tx.h"
47 #include "lll_conn.h"
48 #include "lll_conn_iso.h"
49 #include "lll_df.h"
50 
51 #include "ull_adv_types.h"
52 #include "ull_scan_types.h"
53 #include "ull_sync_types.h"
54 #include "ll_sw/ull_tx_queue.h"
55 #include "ull_conn_types.h"
56 #include "ull_filter.h"
57 #include "ull_df_types.h"
58 #include "ull_df_internal.h"
59 
60 #if defined(CONFIG_BT_CTLR_USER_EXT)
61 #include "ull_vendor.h"
62 #endif /* CONFIG_BT_CTLR_USER_EXT */
63 
64 #include "isoal.h"
65 #include "ll_feat_internal.h"
66 #include "ull_internal.h"
67 #include "ull_chan_internal.h"
68 #include "ull_iso_internal.h"
69 #include "ull_adv_internal.h"
70 #include "ull_scan_internal.h"
71 #include "ull_sync_internal.h"
72 #include "ull_sync_iso_internal.h"
73 #include "ull_central_internal.h"
74 #include "ull_iso_types.h"
75 #include "ull_conn_internal.h"
76 #include "ull_conn_iso_types.h"
77 #include "ull_central_iso_internal.h"
78 #include "ull_llcp_internal.h"
79 #include "ull_llcp.h"
80 
81 #include "ull_conn_iso_internal.h"
82 #include "ull_peripheral_iso_internal.h"
83 
84 #include "ll.h"
85 #include "ll_feat.h"
86 #include "ll_test.h"
87 #include "ll_settings.h"
88 
89 #include "hal/debug.h"
90 
91 #if defined(CONFIG_BT_BROADCASTER)
92 #define BT_ADV_TICKER_NODES ((TICKER_ID_ADV_LAST) - (TICKER_ID_ADV_STOP) + 1)
93 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
94 #define BT_ADV_AUX_TICKER_NODES ((TICKER_ID_ADV_AUX_LAST) - \
95 				 (TICKER_ID_ADV_AUX_BASE) + 1)
96 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
97 #define BT_ADV_SYNC_TICKER_NODES ((TICKER_ID_ADV_SYNC_LAST) - \
98 				  (TICKER_ID_ADV_SYNC_BASE) + 1)
99 #if defined(CONFIG_BT_CTLR_ADV_ISO)
100 #define BT_ADV_ISO_TICKER_NODES ((TICKER_ID_ADV_ISO_LAST) - \
101 				  (TICKER_ID_ADV_ISO_BASE) + 1)
102 #else /* !CONFIG_BT_CTLR_ADV_ISO */
103 #define BT_ADV_ISO_TICKER_NODES 0
104 #endif /* !CONFIG_BT_CTLR_ADV_ISO */
105 #else /* !CONFIG_BT_CTLR_ADV_PERIODIC */
106 #define BT_ADV_SYNC_TICKER_NODES 0
107 #define BT_ADV_ISO_TICKER_NODES 0
108 #endif /* !CONFIG_BT_CTLR_ADV_PERIODIC */
109 #else /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
110 #define BT_ADV_AUX_TICKER_NODES 0
111 #define BT_ADV_SYNC_TICKER_NODES 0
112 #define BT_ADV_ISO_TICKER_NODES 0
113 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
114 #else /* !CONFIG_BT_BROADCASTER */
115 #define BT_ADV_TICKER_NODES 0
116 #define BT_ADV_AUX_TICKER_NODES 0
117 #define BT_ADV_SYNC_TICKER_NODES 0
118 #define BT_ADV_ISO_TICKER_NODES 0
119 #endif /* !CONFIG_BT_BROADCASTER */
120 
121 #if defined(CONFIG_BT_OBSERVER)
122 #define BT_SCAN_TICKER_NODES ((TICKER_ID_SCAN_LAST) - (TICKER_ID_SCAN_STOP) + 1)
123 #if defined(CONFIG_BT_CTLR_ADV_EXT)
124 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
125 #define BT_SCAN_AUX_TICKER_NODES 1
126 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
127 #define BT_SCAN_AUX_TICKER_NODES ((TICKER_ID_SCAN_AUX_LAST) - \
128 				  (TICKER_ID_SCAN_AUX_BASE) + 1)
129 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
130 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
131 #define BT_SCAN_SYNC_TICKER_NODES ((TICKER_ID_SCAN_SYNC_LAST) - \
132 				   (TICKER_ID_SCAN_SYNC_BASE) + 1)
133 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
134 #define BT_SCAN_SYNC_ISO_TICKER_NODES ((TICKER_ID_SCAN_SYNC_ISO_LAST) - \
135 				       (TICKER_ID_SCAN_SYNC_ISO_BASE) + 1 + \
136 				       (TICKER_ID_SCAN_SYNC_ISO_RESUME_LAST) - \
137 				       (TICKER_ID_SCAN_SYNC_ISO_RESUME_BASE) + 1)
138 #else /* !CONFIG_BT_CTLR_SYNC_ISO */
139 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
140 #endif /* !CONFIG_BT_CTLR_SYNC_ISO */
141 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
142 #define BT_SCAN_SYNC_TICKER_NODES 0
143 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
144 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
145 #else /* !CONFIG_BT_CTLR_ADV_EXT */
146 #define BT_SCAN_AUX_TICKER_NODES 0
147 #define BT_SCAN_SYNC_TICKER_NODES 0
148 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
149 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
150 #else
151 #define BT_SCAN_TICKER_NODES 0
152 #define BT_SCAN_AUX_TICKER_NODES 0
153 #define BT_SCAN_SYNC_TICKER_NODES 0
154 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
155 #endif
156 
157 #if defined(CONFIG_BT_CONN)
158 #define BT_CONN_TICKER_NODES ((TICKER_ID_CONN_LAST) - (TICKER_ID_CONN_BASE) + 1)
159 #else
160 #define BT_CONN_TICKER_NODES 0
161 #endif
162 
163 #if defined(CONFIG_BT_CTLR_CONN_ISO)
164 #define BT_CIG_TICKER_NODES ((TICKER_ID_CONN_ISO_LAST) - \
165 			     (TICKER_ID_CONN_ISO_BASE) + 1 + \
166 			     (TICKER_ID_CONN_ISO_RESUME_LAST) - \
167 			     (TICKER_ID_CONN_ISO_RESUME_BASE) + 1)
168 
169 #else
170 #define BT_CIG_TICKER_NODES 0
171 #endif
172 
173 #if defined(CONFIG_BT_CTLR_USER_EXT)
174 #define USER_TICKER_NODES         CONFIG_BT_CTLR_USER_TICKER_ID_RANGE
175 #else
176 #define USER_TICKER_NODES         0
177 #endif
178 
179 
180 #if defined(CONFIG_BT_CTLR_COEX_TICKER)
181 #define COEX_TICKER_NODES             1
182 					/* No. of tickers reserved for coex drivers */
183 #else
184 #define COEX_TICKER_NODES             0
185 #endif
186 
187 
188 #if defined(CONFIG_SOC_FLASH_NRF_RADIO_SYNC_TICKER)
189 #define FLASH_TICKER_NODES             2 /* No. of tickers reserved for flash
190 					  * driver
191 					  */
192 #define TICKER_USER_ULL_HIGH_FLASH_OPS 1 /* No. of additional ticker ULL_HIGH
193 					  * context operations
194 					  */
195 #define TICKER_USER_THREAD_FLASH_OPS   1 /* No. of additional ticker thread
196 					  * context operations
197 					  */
198 #else
199 #define FLASH_TICKER_NODES             0
200 #define TICKER_USER_ULL_HIGH_FLASH_OPS 0
201 #define TICKER_USER_THREAD_FLASH_OPS   0
202 #endif
203 
204 /* Define ticker nodes */
205 /* NOTE: FLASH_TICKER_NODES shall be after Link Layer's list of ticker id
206  *       allocations, refer to ll_timeslice_ticker_id_get on how ticker id
207  *       used by flash driver is returned.
208  */
209 #define TICKER_NODES              (TICKER_ID_ULL_BASE + \
210 				   BT_ADV_TICKER_NODES + \
211 				   BT_ADV_AUX_TICKER_NODES + \
212 				   BT_ADV_SYNC_TICKER_NODES + \
213 				   BT_ADV_ISO_TICKER_NODES + \
214 				   BT_SCAN_TICKER_NODES + \
215 				   BT_SCAN_AUX_TICKER_NODES + \
216 				   BT_SCAN_SYNC_TICKER_NODES + \
217 				   BT_SCAN_SYNC_ISO_TICKER_NODES + \
218 				   BT_CONN_TICKER_NODES + \
219 				   BT_CIG_TICKER_NODES + \
220 				   USER_TICKER_NODES + \
221 				   FLASH_TICKER_NODES + \
222 				   COEX_TICKER_NODES)
223 
224 /* Ticker implementation supports up to 255 ticker node count value */
225 BUILD_ASSERT(TICKER_NODES <= UINT8_MAX);
226 
227 /* When both central and peripheral are supported, one each Rx node will be
228  * needed by connectable advertising and the initiator to generate connection
229  * complete event, hence conditionally set the count.
230  */
231 #if defined(CONFIG_BT_MAX_CONN)
232 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_PERIPHERAL)
233 #define BT_CTLR_MAX_CONNECTABLE (1U + MIN(((CONFIG_BT_MAX_CONN) - 1U), \
234 					  (BT_CTLR_ADV_SET)))
235 #else
236 #define BT_CTLR_MAX_CONNECTABLE MAX(1U, (BT_CTLR_ADV_SET))
237 #endif
238 #define BT_CTLR_MAX_CONN        CONFIG_BT_MAX_CONN
239 #else
240 #define BT_CTLR_MAX_CONNECTABLE 0
241 #define BT_CTLR_MAX_CONN        0
242 #endif
243 
244 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
245 #if defined(CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX)
246 /* Note: Need node for PDU and CTE sample */
247 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
248 #define BT_CTLR_ADV_EXT_RX_CNT  (MIN(CONFIG_BT_CTLR_SCAN_AUX_CHAIN_COUNT, \
249 				     CONFIG_BT_PER_ADV_SYNC_MAX) * \
250 				 CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX * 2)
251 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
252 #define BT_CTLR_ADV_EXT_RX_CNT  (CONFIG_BT_CTLR_SCAN_AUX_SET * \
253 				 CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX * 2)
254 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
255 #else /* !CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX */
256 #define BT_CTLR_ADV_EXT_RX_CNT  1
257 #endif /* !CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX */
258 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
259 #define BT_CTLR_ADV_EXT_RX_CNT  0
260 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
261 
262 #if !defined(TICKER_USER_LLL_VENDOR_OPS)
263 #define TICKER_USER_LLL_VENDOR_OPS 0
264 #endif /* TICKER_USER_LLL_VENDOR_OPS */
265 
266 #if !defined(TICKER_USER_ULL_HIGH_VENDOR_OPS)
267 #define TICKER_USER_ULL_HIGH_VENDOR_OPS 0
268 #endif /* TICKER_USER_ULL_HIGH_VENDOR_OPS */
269 
270 #if !defined(TICKER_USER_ULL_LOW_VENDOR_OPS)
271 #define TICKER_USER_ULL_LOW_VENDOR_OPS 0
272 #endif /* TICKER_USER_ULL_LOW_VENDOR_OPS */
273 
274 #if !defined(TICKER_USER_THREAD_VENDOR_OPS)
275 #define TICKER_USER_THREAD_VENDOR_OPS 0
276 #endif /* TICKER_USER_THREAD_VENDOR_OPS */
277 
278 /* Define ticker user operations */
279 #if defined(CONFIG_BT_CTLR_LOW_LAT) && \
280 	(CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
281 /* NOTE: When ticker job is disabled inside radio events then all advertising,
282  *       scanning, and peripheral latency cancel ticker operations will be deferred,
283  *       requiring increased ticker thread context operation queue count.
284  */
285 #define TICKER_USER_THREAD_OPS   (BT_CTLR_ADV_SET + BT_CTLR_SCAN_SET + \
286 				  BT_CTLR_MAX_CONN + \
287 				  TICKER_USER_THREAD_VENDOR_OPS + \
288 				  TICKER_USER_THREAD_FLASH_OPS + \
289 				  1)
290 #else /* !CONFIG_BT_CTLR_LOW_LAT */
291 /* NOTE: As ticker job is not disabled inside radio events, no need for extra
292  *       thread operations queue element for flash driver.
293  */
294 #define TICKER_USER_THREAD_OPS   (1 + TICKER_USER_THREAD_VENDOR_OPS + 1)
295 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
296 
297 #define TICKER_USER_ULL_LOW_OPS  (1 + TICKER_USER_ULL_LOW_VENDOR_OPS + 1)
298 
299 /* NOTE: Extended Advertising needs one extra ticker operation being enqueued
300  *       for scheduling the auxiliary PDU reception while there can already
301  *       be three other operations being enqueued.
302  *
303  *       This value also covers the case were initiator with 1M and Coded PHY
304  *       scan window is stopping the two scan tickers, stopping one scan stop
305  *       ticker and starting one new ticker for establishing an ACL connection.
306  */
307 #if defined(CONFIG_BT_CTLR_ADV_EXT)
308 #define TICKER_USER_ULL_HIGH_OPS (4 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
309 				  TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
310 #else /* !CONFIG_BT_CTLR_ADV_EXT */
311 #define TICKER_USER_ULL_HIGH_OPS (3 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
312 				  TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
313 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
314 
315 #define TICKER_USER_LLL_OPS      (3 + TICKER_USER_LLL_VENDOR_OPS + 1)
316 
317 #define TICKER_USER_OPS           (TICKER_USER_LLL_OPS + \
318 				   TICKER_USER_ULL_HIGH_OPS + \
319 				   TICKER_USER_ULL_LOW_OPS + \
320 				   TICKER_USER_THREAD_OPS)
321 
322 /* Memory for ticker nodes/instances */
323 static uint8_t MALIGN(4) ticker_nodes[TICKER_NODES][TICKER_NODE_T_SIZE];
324 
325 /* Memory for users/contexts operating on ticker module */
326 static uint8_t MALIGN(4) ticker_users[MAYFLY_CALLER_COUNT][TICKER_USER_T_SIZE];
327 
328 /* Memory for user/context simultaneous API operations */
329 static uint8_t MALIGN(4) ticker_user_ops[TICKER_USER_OPS][TICKER_USER_OP_T_SIZE];
330 
331 /* Semaphore to wakeup thread on ticker API callback */
332 static struct k_sem sem_ticker_api_cb;
333 
334 /* Semaphore to wakeup thread on Rx-ed objects */
335 static struct k_sem *sem_recv;
336 
337 /* Declare prepare-event FIFO: mfifo_prep.
338  * Queue of struct node_rx_event_done
339  */
340 static MFIFO_DEFINE(prep, sizeof(struct lll_event), EVENT_PIPELINE_MAX);
341 
342 /* Declare done-event RXFIFO. This is a composite pool-backed MFIFO for rx_nodes.
343  * The declaration constructs the following data structures:
344  * - mfifo_done:    FIFO with pointers to struct node_rx_event_done
345  * - mem_done:      Backing data pool for struct node_rx_event_done elements
346  * - mem_link_done: Pool of memq_link_t elements
347  *
348  * Queue of pointers to struct node_rx_event_done.
349  * The actual backing behind these pointers is mem_done.
350  *
351  * When there are radio events with time reservations lower than the preemption
352  * timeout of 1.5 ms, the pipeline has to account for the maximum radio events
353  * that can be enqueued during the preempt timeout duration. All these enqueued
354  * events could be aborted in case of late scheduling, needing as many done
355  * event buffers.
356  *
357  * During continuous scanning, there can be 1 active radio event, 1 scan resume
358  * and 1 new scan prepare. If there are peripheral prepares in addition, and due
359  * to late scheduling all these will abort needing 4 done buffers.
360  *
361  * If there are additional peripheral prepares enqueued, which are apart by
362  * their time reservations, these are not yet late and hence no more additional
363  * done buffers are needed.
364  *
365  * If Extended Scanning is supported, then an additional auxiliary scan event's
366  * prepare could be enqueued in the pipeline during the preemption duration.
367  *
368  * If Extended Scanning with Coded PHY is supported, then an additional 1 resume
369  * prepare could be enqueued in the pipeline during the preemption duration.
370  */
371 #if !defined(VENDOR_EVENT_DONE_MAX)
372 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
373 #if defined(CONFIG_BT_CTLR_PHY_CODED)
374 #define EVENT_DONE_MAX 6
375 #else /* !CONFIG_BT_CTLR_PHY_CODED */
376 #define EVENT_DONE_MAX 5
377 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
378 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
379 #define EVENT_DONE_MAX 4
380 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
381 #else
382 #define EVENT_DONE_MAX VENDOR_EVENT_DONE_MAX
383 #endif
384 
385 /* Maximum time allowed for comleting synchronous LLL disabling via
386  * ull_disable.
387  */
388 #define ULL_DISABLE_TIMEOUT K_MSEC(1000)
389 
390 static RXFIFO_DEFINE(done, sizeof(struct node_rx_event_done),
391 		     EVENT_DONE_MAX, 0U);
392 
393 /* Minimum number of node rx for ULL to LL/HCI thread per connection.
394  * Increasing this by times the max. simultaneous connection count will permit
395  * simultaneous parallel PHY update or Connection Update procedures amongst
396  * active connections.
397  * Minimum node rx of 2 that can be reserved happens when:
398  *   Central and peripheral always use two new nodes for handling completion
399  *   notification one for PHY update complete and another for Data Length Update
400  *   complete.
401  */
402 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) && defined(CONFIG_BT_CTLR_PHY)
403 #define LL_PDU_RX_CNT (2 * (CONFIG_BT_CTLR_LLCP_CONN))
404 #elif defined(CONFIG_BT_CONN)
405 #define LL_PDU_RX_CNT (CONFIG_BT_CTLR_LLCP_CONN)
406 #else
407 #define LL_PDU_RX_CNT 0
408 #endif
409 
410 /* No. of node rx for LLL to ULL.
411  * Reserve 3, 1 for adv data, 1 for scan response and 1 for empty PDU reception.
412  */
413 #define PDU_RX_CNT    (3 + BT_CTLR_ADV_EXT_RX_CNT + CONFIG_BT_CTLR_RX_BUFFERS)
414 
415 /* Part sum of LLL to ULL and ULL to LL/HCI thread node rx count.
416  * Will be used below in allocating node rx pool.
417  */
418 #define RX_CNT        (PDU_RX_CNT + LL_PDU_RX_CNT)
419 
420 static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT);
421 
422 #if defined(CONFIG_BT_RX_USER_PDU_LEN)
423 #define PDU_RX_USER_PDU_OCTETS_MAX (CONFIG_BT_RX_USER_PDU_LEN)
424 #else
425 #define PDU_RX_USER_PDU_OCTETS_MAX 0
426 #endif
427 
428 #define PDU_ADV_SIZE  MAX(PDU_AC_LL_SIZE_MAX, \
429 			  (PDU_AC_LL_HEADER_SIZE + LL_EXT_OCTETS_RX_MAX))
430 
431 #define PDU_DATA_SIZE (PDU_DC_LL_HEADER_SIZE + LL_LENGTH_OCTETS_RX_MAX)
432 
433 #define PDU_CTRL_SIZE (PDU_DC_LL_HEADER_SIZE + PDU_DC_CTRL_RX_SIZE_MAX)
434 
435 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
436 
437 #define PDU_RX_NODE_POOL_ELEMENT_SIZE MROUND(NODE_RX_HEADER_SIZE + \
438 					     MAX(MAX(PDU_ADV_SIZE, \
439 						     MAX(PDU_DATA_SIZE, \
440 							 PDU_CTRL_SIZE)), \
441 						 PDU_RX_USER_PDU_OCTETS_MAX))
442 
443 #if defined(CONFIG_BT_CTLR_ADV_ISO_SET)
444 #define BT_CTLR_ADV_ISO_SET CONFIG_BT_CTLR_ADV_ISO_SET
445 #else
446 #define BT_CTLR_ADV_ISO_SET 0
447 #endif
448 
449 #if defined(CONFIG_BT_PER_ADV_SYNC_MAX)
450 #define BT_CTLR_SCAN_SYNC_SET CONFIG_BT_PER_ADV_SYNC_MAX
451 #else
452 #define BT_CTLR_SCAN_SYNC_SET 0
453 #endif
454 
455 #if defined(CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET)
456 #define BT_CTLR_SCAN_SYNC_ISO_SET CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET
457 #else
458 #define BT_CTLR_SCAN_SYNC_ISO_SET 0
459 #endif
460 
461 #define PDU_RX_POOL_SIZE (PDU_RX_NODE_POOL_ELEMENT_SIZE * \
462 			  (RX_CNT + BT_CTLR_MAX_CONNECTABLE + \
463 			   BT_CTLR_ADV_SET + BT_CTLR_SCAN_SYNC_SET))
464 
465 /* Macros for encoding number of completed packets.
466  *
467  * If the pointer is numerically below 0x100, the pointer is treated as either
468  * data or control PDU.
469  *
470  * NOTE: For any architecture which would map RAM below address 0x100, this will
471  * not work.
472  */
473 #define IS_NODE_TX_PTR(_p) ((uint32_t)(_p) & ~0xFFUL)
474 #define IS_NODE_TX_DATA(_p) ((uint32_t)(_p) == 0x01UL)
475 #define IS_NODE_TX_CTRL(_p) ((uint32_t)(_p) == 0x02UL)
476 #define NODE_TX_DATA_SET(_p) ((_p) = (void *)0x01UL)
477 #define NODE_TX_CTRL_SET(_p) ((_p) = (void *)0x012UL)
478 
479 /* Macros for encoding number of ISO SDU fragments in the enqueued TX node
480  * pointer. This is needed to ensure only a single release of the node and link
481  * in tx_cmplt_get, even when called several times. At all times, the number of
482  * fragments must be available for HCI complete-counting.
483  *
484  * If the pointer is numerically below 0x100, the pointer is treated as a one
485  * byte fragments count.
486  *
487  * NOTE: For any architecture which would map RAM below address 0x100, this will
488  * not work.
489  */
490 #define NODE_TX_FRAGMENTS_GET(_p) ((uint32_t)(_p) & 0xFFUL)
491 #define NODE_TX_FRAGMENTS_SET(_p, _cmplt) ((_p) = (void *)(uint32_t)(_cmplt))
492 
493 static struct {
494 	void *free;
495 	uint8_t pool[PDU_RX_POOL_SIZE];
496 } mem_pdu_rx;
497 
498 /* NOTE: Two memq_link structures are reserved in the case of periodic sync,
499  * one each for sync established and sync lost respectively. Where as in
500  * comparison to a connection, the connection established uses incoming Rx-ed
501  * CONNECT_IND PDU to piggy back generation of connection complete, and hence
502  * only one is reserved for the generation of disconnection event (which can
503  * happen due to supervision timeout and other reasons that dont have an
504  * incoming Rx-ed PDU).
505  */
506 #define LINK_RX_POOL_SIZE                                                      \
507 	(sizeof(memq_link_t) *                                                 \
508 	 (RX_CNT + 2 + BT_CTLR_MAX_CONN + BT_CTLR_ADV_SET +                    \
509 	  (BT_CTLR_ADV_ISO_SET * 2) + (BT_CTLR_SCAN_SYNC_SET * 2) +            \
510 	  (BT_CTLR_SCAN_SYNC_ISO_SET * 2) +                                    \
511 	  (IQ_REPORT_CNT)))
512 static struct {
513 	uint16_t quota_pdu; /* Number of un-utilized buffers */
514 
515 	void *free;
516 	uint8_t pool[LINK_RX_POOL_SIZE];
517 } mem_link_rx;
518 
519 static MEMQ_DECLARE(ull_rx);
520 static MEMQ_DECLARE(ll_rx);
521 
522 #if defined(CONFIG_BT_CTLR_ISO) || \
523 	defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER) || \
524 	defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
525 #define ULL_TIME_WRAPPING_POINT_US	(HAL_TICKER_TICKS_TO_US_64BIT(HAL_TICKER_CNTR_MASK))
526 #define ULL_TIME_SPAN_FULL_US		(ULL_TIME_WRAPPING_POINT_US + 1)
527 #endif /* CONFIG_BT_CTLR_ISO ||
528 	* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER ||
529 	* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER
530 	*/
531 
532 #if defined(CONFIG_BT_CONN)
533 static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
534 
535 static void *mark_update;
536 #endif /* CONFIG_BT_CONN */
537 
538 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
539 #if defined(CONFIG_BT_CONN)
540 #define BT_CTLR_TX_BUFFERS (CONFIG_BT_BUF_ACL_TX_COUNT + LLCP_TX_CTRL_BUF_COUNT)
541 #else
542 #define BT_CTLR_TX_BUFFERS 0
543 #endif /* CONFIG_BT_CONN */
544 
545 static MFIFO_DEFINE(tx_ack, sizeof(struct lll_tx),
546 		    BT_CTLR_TX_BUFFERS + BT_CTLR_ISO_TX_PDU_BUFFERS);
547 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
548 
549 static void *mark_disable;
550 
551 static inline int init_reset(void);
552 static void perform_lll_reset(void *param);
553 static inline void *mark_set(void **m, void *param);
554 static inline void *mark_unset(void **m, void *param);
555 static inline void *mark_get(void *m);
556 static void rx_replenish_all(void);
557 #if defined(CONFIG_BT_CONN) || \
558 	(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
559 	defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
560 	defined(CONFIG_BT_CTLR_ADV_ISO)
561 static void rx_release_replenish(struct node_rx_hdr *rx);
562 static void rx_link_dequeue_release_quota_inc(memq_link_t *link);
563 #endif /* CONFIG_BT_CONN ||
564 	* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
565 	* CONFIG_BT_CTLR_ADV_PERIODIC ||
566 	* CONFIG_BT_CTLR_ADV_ISO
567 	*/
568 static void rx_demux(void *param);
569 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
570 static void rx_demux_yield(void);
571 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
572 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
573 static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last);
574 static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
575 					memq_link_t *link,
576 					struct node_tx *node_tx);
577 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
578 static inline void rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx_hdr);
579 static inline void rx_demux_event_done(memq_link_t *link,
580 				       struct node_rx_event_done *done);
581 static void ll_rx_link_quota_inc(void);
582 static void ll_rx_link_quota_dec(void);
583 static void disabled_cb(void *param);
584 
ll_init(struct k_sem * sem_rx)585 int ll_init(struct k_sem *sem_rx)
586 {
587 	static bool mayfly_initialized;
588 	int err;
589 
590 	/* Store the semaphore to be used to wakeup Thread context */
591 	sem_recv = sem_rx;
592 
593 	/* Initialize counter */
594 	/* TODO: Bind and use counter driver? */
595 	cntr_init();
596 
597 	/* Initialize mayfly. It may be done only once due to mayfly design.
598 	 *
599 	 * On init mayfly memq head and tail is assigned with a link instance
600 	 * that is used during enqueue operation. New link provided by enqueue
601 	 * is added as a tail and will be used in future enqueue. While dequeue,
602 	 * the link that was used for storage of the job is released and stored
603 	 * in a job it was related to. The job may store initial link. If mayfly
604 	 * is re-initialized but job objects were not re-initialized there is a
605 	 * risk that enqueued job will point to the same link as it is in a memq
606 	 * just after re-initialization. After enqueue operation with that link,
607 	 * head and tail still points to the same link object, so memq is
608 	 * considered as empty.
609 	 */
610 	if (!mayfly_initialized) {
611 		mayfly_init();
612 		mayfly_initialized = true;
613 	}
614 
615 
616 	/* Initialize Ticker */
617 	ticker_users[MAYFLY_CALL_ID_0][0] = TICKER_USER_LLL_OPS;
618 	ticker_users[MAYFLY_CALL_ID_1][0] = TICKER_USER_ULL_HIGH_OPS;
619 	ticker_users[MAYFLY_CALL_ID_2][0] = TICKER_USER_ULL_LOW_OPS;
620 	ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_THREAD_OPS;
621 
622 	err = ticker_init(TICKER_INSTANCE_ID_CTLR,
623 			  TICKER_NODES, &ticker_nodes[0],
624 			  MAYFLY_CALLER_COUNT, &ticker_users[0],
625 			  TICKER_USER_OPS, &ticker_user_ops[0],
626 			  hal_ticker_instance0_caller_id_get,
627 			  hal_ticker_instance0_sched,
628 			  hal_ticker_instance0_trigger_set);
629 	LL_ASSERT(!err);
630 
631 	/* Initialize semaphore for ticker API blocking wait */
632 	k_sem_init(&sem_ticker_api_cb, 0, 1);
633 
634 	/* Initialize LLL */
635 	err = lll_init();
636 	if (err) {
637 		return err;
638 	}
639 
640 	/* Initialize ULL internals */
641 	/* TODO: globals? */
642 
643 	/* Common to init and reset */
644 	err = init_reset();
645 	if (err) {
646 		return err;
647 	}
648 
649 #if defined(CONFIG_BT_BROADCASTER)
650 	err = lll_adv_init();
651 	if (err) {
652 		return err;
653 	}
654 
655 	err = ull_adv_init();
656 	if (err) {
657 		return err;
658 	}
659 #endif /* CONFIG_BT_BROADCASTER */
660 
661 #if defined(CONFIG_BT_OBSERVER)
662 	err = lll_scan_init();
663 	if (err) {
664 		return err;
665 	}
666 
667 	err = ull_scan_init();
668 	if (err) {
669 		return err;
670 	}
671 #endif /* CONFIG_BT_OBSERVER */
672 
673 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
674 	err = lll_sync_init();
675 	if (err) {
676 		return err;
677 	}
678 
679 	err = ull_sync_init();
680 	if (err) {
681 		return err;
682 	}
683 
684 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
685 	err = ull_sync_iso_init();
686 	if (err) {
687 		return err;
688 	}
689 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
690 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
691 
692 #if defined(CONFIG_BT_CONN)
693 	err = lll_conn_init();
694 	if (err) {
695 		return err;
696 	}
697 
698 	err = ull_conn_init();
699 	if (err) {
700 		return err;
701 	}
702 #endif /* CONFIG_BT_CONN */
703 
704 #if defined(CONFIG_BT_CTLR_DF)
705 	err = ull_df_init();
706 	if (err) {
707 		return err;
708 	}
709 #endif
710 
711 #if defined(CONFIG_BT_CTLR_ISO)
712 	err = ull_iso_init();
713 	if (err) {
714 		return err;
715 	}
716 #endif /* CONFIG_BT_CTLR_ISO */
717 
718 #if defined(CONFIG_BT_CTLR_CONN_ISO)
719 	err = ull_conn_iso_init();
720 	if (err) {
721 		return err;
722 	}
723 #endif /* CONFIG_BT_CTLR_CONN_ISO */
724 
725 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
726 	err = ull_peripheral_iso_init();
727 	if (err) {
728 		return err;
729 	}
730 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
731 
732 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
733 	err = ull_central_iso_init();
734 	if (err) {
735 		return err;
736 	}
737 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
738 
739 #if defined(CONFIG_BT_CTLR_ADV_ISO)
740 	err = ull_adv_iso_init();
741 	if (err) {
742 		return err;
743 	}
744 #endif /* CONFIG_BT_CTLR_ADV_ISO */
745 
746 #if defined(CONFIG_BT_CTLR_DF)
747 	err = lll_df_init();
748 	if (err) {
749 		return err;
750 	}
751 #endif
752 
753 #if defined(CONFIG_BT_CTLR_USER_EXT)
754 	err = ull_user_init();
755 	if (err) {
756 		return err;
757 	}
758 #endif /* CONFIG_BT_CTLR_USER_EXT */
759 
760 	/* reset filter accept list, resolving list and initialise RPA timeout*/
761 	if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
762 		ull_filter_reset(true);
763 	}
764 
765 #if defined(CONFIG_BT_CTLR_TEST)
766 	err = mem_ut();
767 	if (err) {
768 		return err;
769 	}
770 
771 	err = ecb_ut();
772 	if (err) {
773 		return err;
774 	}
775 
776 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
777 	lll_chan_sel_2_ut();
778 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
779 #endif /* CONFIG_BT_CTLR_TEST */
780 
781 	return  0;
782 }
783 
ll_deinit(void)784 int ll_deinit(void)
785 {
786 	int err;
787 
788 	ll_reset();
789 
790 	err = lll_deinit();
791 	if (err) {
792 		return err;
793 	}
794 
795 	err = ticker_deinit(TICKER_INSTANCE_ID_CTLR);
796 
797 	return err;
798 }
799 
ll_reset(void)800 void ll_reset(void)
801 {
802 	int err;
803 
804 	/* Note: The sequence of reset control flow is as follows:
805 	 * - Reset ULL context, i.e. stop ULL scheduling, abort LLL events etc.
806 	 * - Reset LLL context, i.e. post LLL event abort, let LLL cleanup its
807 	 *   variables, if any.
808 	 * - Reset ULL static variables (which otherwise was mem-zeroed in cases
809 	 *   if power-on reset wherein architecture startup mem-zeroes .bss
810 	 *   sections.
811 	 * - Initialize ULL context variable, similar to on-power-up.
812 	 */
813 
814 #if defined(CONFIG_BT_BROADCASTER)
815 #if defined(CONFIG_BT_CTLR_ADV_ISO)
816 	/* Reset adv iso sets */
817 	err = ull_adv_iso_reset();
818 	LL_ASSERT(!err);
819 #endif /* CONFIG_BT_CTLR_ADV_ISO */
820 
821 	/* Reset adv state */
822 	err = ull_adv_reset();
823 	LL_ASSERT(!err);
824 #endif /* CONFIG_BT_BROADCASTER */
825 
826 #if defined(CONFIG_BT_OBSERVER)
827 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
828 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
829 	/* Reset sync iso sets */
830 	err = ull_sync_iso_reset();
831 	LL_ASSERT(!err);
832 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
833 
834 	/* Reset periodic sync sets */
835 	err = ull_sync_reset();
836 	LL_ASSERT(!err);
837 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
838 
839 	/* Reset scan state */
840 	err = ull_scan_reset();
841 	LL_ASSERT(!err);
842 #endif /* CONFIG_BT_OBSERVER */
843 
844 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
845 	err = ull_peripheral_iso_reset();
846 	LL_ASSERT(!err);
847 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
848 
849 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
850 	err = ull_central_iso_reset();
851 	LL_ASSERT(!err);
852 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
853 
854 #if defined(CONFIG_BT_CTLR_CONN_ISO)
855 	err = ull_conn_iso_reset();
856 	LL_ASSERT(!err);
857 #endif /* CONFIG_BT_CTLR_CONN_ISO */
858 
859 #if defined(CONFIG_BT_CTLR_ISO)
860 	err = ull_iso_reset();
861 	LL_ASSERT(!err);
862 #endif /* CONFIG_BT_CTLR_ISO */
863 
864 #if defined(CONFIG_BT_CONN)
865 	/* Reset conn role */
866 	err = ull_conn_reset();
867 	LL_ASSERT(!err);
868 
869 	MFIFO_INIT(tx_ack);
870 #endif /* CONFIG_BT_CONN */
871 
872 	/* reset filter accept list and resolving list */
873 	if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
874 		ull_filter_reset(false);
875 	}
876 
877 	/* Re-initialize ULL internals */
878 
879 	/* Re-initialize the prep mfifo */
880 	MFIFO_INIT(prep);
881 
882 	/* Re-initialize the free rx mfifo */
883 	MFIFO_INIT(pdu_rx_free);
884 
885 #if defined(CONFIG_BT_CONN)
886 	/* Re-initialize the free ll rx mfifo */
887 	MFIFO_INIT(ll_pdu_rx_free);
888 #endif /* CONFIG_BT_CONN */
889 
890 	/* Reset LLL via mayfly */
891 	{
892 		static memq_link_t link;
893 		static struct mayfly mfy = {0, 0, &link, NULL,
894 					    perform_lll_reset};
895 		uint32_t retval;
896 
897 		/* NOTE: If Zero Latency Interrupt is used, then LLL context
898 		 *       will be the highest priority IRQ in the system, hence
899 		 *       mayfly_enqueue will be done running the callee inline
900 		 *       (vector to the callee function) in this function. Else
901 		 *       we use semaphore to wait for perform_lll_reset to
902 		 *       complete.
903 		 */
904 
905 #if !defined(CONFIG_BT_CTLR_ZLI)
906 		struct k_sem sem;
907 
908 		k_sem_init(&sem, 0, 1);
909 		mfy.param = &sem;
910 #endif /* !CONFIG_BT_CTLR_ZLI */
911 
912 		retval = mayfly_enqueue(TICKER_USER_ID_THREAD,
913 					TICKER_USER_ID_LLL, 0, &mfy);
914 		LL_ASSERT(!retval);
915 
916 #if !defined(CONFIG_BT_CTLR_ZLI)
917 		/* LLL reset must complete before returning - wait for
918 		 * reset completion in LLL mayfly thread
919 		 */
920 		k_sem_take(&sem, K_FOREVER);
921 #endif /* !CONFIG_BT_CTLR_ZLI */
922 	}
923 
924 #if defined(CONFIG_BT_BROADCASTER)
925 	/* Finalize after adv state LLL context reset */
926 	err = ull_adv_reset_finalize();
927 	LL_ASSERT(!err);
928 #endif /* CONFIG_BT_BROADCASTER */
929 
930 	/* Reset/End DTM Tx or Rx commands */
931 	if (IS_ENABLED(CONFIG_BT_CTLR_DTM)) {
932 		uint16_t num_rx;
933 
934 		(void)ll_test_end(&num_rx);
935 		ARG_UNUSED(num_rx);
936 	}
937 
938 	/* Common to init and reset */
939 	err = init_reset();
940 	LL_ASSERT(!err);
941 
942 #if defined(CONFIG_BT_CTLR_DF)
943 	/* Direction Finding has to be reset after ull init_reset call because
944 	 *  it uses mem_link_rx for node_rx_iq_report. The mem_linx_rx is reset
945 	 *  in common ull init_reset.
946 	 */
947 	err = ull_df_reset();
948 	LL_ASSERT(!err);
949 #endif
950 
951 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
952 	ll_feat_reset();
953 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
954 
955 	/* clear static random address */
956 	(void)ll_addr_set(1U, NULL);
957 }
958 
959 /**
960  * @brief Peek the next node_rx to send up to Host
961  * @details Tightly coupled with prio_recv_thread()
962  *   Execution context: Controller thread
963  *
964  * @param node_rx[out]   Pointer to rx node at head of queue
965  * @param handle[out]    Connection handle
966  * @return TX completed
967  */
ll_rx_get(void ** node_rx,uint16_t * handle)968 uint8_t ll_rx_get(void **node_rx, uint16_t *handle)
969 {
970 	struct node_rx_pdu *rx;
971 	uint8_t cmplt = 0U;
972 	memq_link_t *link;
973 
974 #if defined(CONFIG_BT_CONN) || \
975 	(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
976 	defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
977 	defined(CONFIG_BT_CTLR_ADV_ISO)
978 ll_rx_get_again:
979 #endif /* CONFIG_BT_CONN ||
980 	* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
981 	* CONFIG_BT_CTLR_ADV_PERIODIC ||
982 	* CONFIG_BT_CTLR_ADV_ISO
983 	*/
984 
985 	*node_rx = NULL;
986 
987 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
988 	/* Save the tx_ack FIFO's last index to avoid the value being changed if there were no
989 	 * Rx PDUs and we were pre-empted before calling `tx_cmplt_get()`, that may advance the
990 	 * first index beyond ack_last value recorded in node_rx enqueued by `ll_rx_put()` call
991 	 * when we are in the `else` clause below.
992 	 */
993 	uint8_t tx_ack_last = mfifo_fifo_tx_ack.l;
994 
995 	/* Ensure that the value is fetched before call to memq_peek, i.e. compiler shall not
996 	 * reorder memory write before above read.
997 	 */
998 	cpu_dmb();
999 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1000 
1001 	link = memq_peek(memq_ll_rx.head, memq_ll_rx.tail, (void **)&rx);
1002 	if (link) {
1003 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
1004 		cmplt = tx_cmplt_get(handle, &mfifo_fifo_tx_ack.f, rx->hdr.ack_last);
1005 		if (!cmplt) {
1006 			uint8_t f, cmplt_prev, cmplt_curr;
1007 			uint16_t h;
1008 
1009 			cmplt_curr = 0U;
1010 			f = mfifo_fifo_tx_ack.f;
1011 			do {
1012 				cmplt_prev = cmplt_curr;
1013 				cmplt_curr = tx_cmplt_get(&h, &f,
1014 							  mfifo_fifo_tx_ack.l);
1015 			} while ((cmplt_prev != 0U) ||
1016 				 (cmplt_prev != cmplt_curr));
1017 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1018 
1019 			if (0) {
1020 #if defined(CONFIG_BT_CONN) || \
1021 	(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT))
1022 			/* Do not send up buffers to Host thread that are
1023 			 * marked for release
1024 			 */
1025 			} else if (rx->hdr.type == NODE_RX_TYPE_RELEASE) {
1026 				rx_link_dequeue_release_quota_inc(link);
1027 				rx_release_replenish((struct node_rx_hdr *)rx);
1028 
1029 				goto ll_rx_get_again;
1030 #endif /* CONFIG_BT_CONN ||
1031 	* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT)
1032 	*/
1033 
1034 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1035 			} else if (rx->hdr.type == NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE) {
1036 				const uint8_t report_cnt = 1U;
1037 
1038 				(void)memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head, NULL);
1039 				ll_rx_link_release(link);
1040 				ull_iq_report_link_inc_quota(report_cnt);
1041 				ull_df_iq_report_mem_release(rx);
1042 				ull_df_rx_iq_report_alloc(report_cnt);
1043 
1044 				goto ll_rx_get_again;
1045 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1046 
1047 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1048 			} else if (rx->hdr.type == NODE_RX_TYPE_SYNC_CHM_COMPLETE) {
1049 				rx_link_dequeue_release_quota_inc(link);
1050 
1051 				/* Remove Channel Map Update Indication from
1052 				 * ACAD.
1053 				 */
1054 				ull_adv_sync_chm_complete(rx);
1055 
1056 				rx_release_replenish((struct node_rx_hdr *)rx);
1057 
1058 				goto ll_rx_get_again;
1059 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1060 
1061 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1062 			} else if (rx->hdr.type == NODE_RX_TYPE_BIG_CHM_COMPLETE) {
1063 				rx_link_dequeue_release_quota_inc(link);
1064 
1065 				/* Update Channel Map in BIGInfo present in
1066 				 * Periodic Advertising PDU.
1067 				 */
1068 				ull_adv_iso_chm_complete(rx);
1069 
1070 				rx_release_replenish((struct node_rx_hdr *)rx);
1071 
1072 				goto ll_rx_get_again;
1073 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1074 			}
1075 
1076 			*node_rx = rx;
1077 
1078 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
1079 		}
1080 	} else {
1081 		/* Use the saved ack last value, before call was done to memq_peek, to ensure we
1082 		 * do not advance the first index `f` beyond the value that was the last index `l`
1083 		 * when memq_peek was called.
1084 		 */
1085 		cmplt = tx_cmplt_get(handle, &mfifo_fifo_tx_ack.f, tx_ack_last);
1086 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1087 	}
1088 
1089 	return cmplt;
1090 }
1091 
1092 /**
1093  * @brief Commit the dequeue from memq_ll_rx, where ll_rx_get() did the peek
1094  * @details Execution context: Controller thread
1095  */
ll_rx_dequeue(void)1096 void ll_rx_dequeue(void)
1097 {
1098 	struct node_rx_pdu *rx = NULL;
1099 	memq_link_t *link;
1100 
1101 	link = memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head,
1102 			    (void **)&rx);
1103 	LL_ASSERT(link);
1104 
1105 	ll_rx_link_release(link);
1106 
1107 	/* handle object specific clean up */
1108 	switch (rx->hdr.type) {
1109 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1110 #if defined(CONFIG_BT_OBSERVER)
1111 	case NODE_RX_TYPE_EXT_1M_REPORT:
1112 	case NODE_RX_TYPE_EXT_2M_REPORT:
1113 	case NODE_RX_TYPE_EXT_CODED_REPORT:
1114 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1115 	case NODE_RX_TYPE_SYNC_REPORT:
1116 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1117 	{
1118 		struct node_rx_pdu *rx_curr;
1119 		struct pdu_adv *adv;
1120 		uint8_t loop = PDU_RX_POOL_SIZE / PDU_RX_NODE_POOL_ELEMENT_SIZE;
1121 
1122 		adv = (struct pdu_adv *)rx->pdu;
1123 		if (adv->type != PDU_ADV_TYPE_EXT_IND) {
1124 			break;
1125 		}
1126 
1127 		rx_curr = rx->rx_ftr.extra;
1128 		while (rx_curr) {
1129 			memq_link_t *link_free;
1130 
1131 			LL_ASSERT(loop);
1132 			loop--;
1133 
1134 			link_free = rx_curr->hdr.link;
1135 			rx_curr = rx_curr->rx_ftr.extra;
1136 
1137 			ll_rx_link_release(link_free);
1138 		}
1139 	}
1140 	break;
1141 
1142 	case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
1143 	{
1144 		ull_scan_term_dequeue(rx->hdr.handle);
1145 	}
1146 	break;
1147 #endif /* CONFIG_BT_OBSERVER */
1148 
1149 #if defined(CONFIG_BT_BROADCASTER)
1150 	case NODE_RX_TYPE_EXT_ADV_TERMINATE:
1151 	{
1152 		struct ll_adv_set *adv;
1153 		struct lll_adv_aux *lll_aux;
1154 
1155 		adv = ull_adv_set_get(rx->hdr.handle);
1156 		LL_ASSERT(adv);
1157 
1158 		lll_aux = adv->lll.aux;
1159 		if (lll_aux) {
1160 			struct ll_adv_aux_set *aux;
1161 
1162 			aux = HDR_LLL2ULL(lll_aux);
1163 
1164 			aux->is_started = 0U;
1165 		}
1166 
1167 #if defined(CONFIG_BT_PERIPHERAL)
1168 		struct lll_conn *lll_conn = adv->lll.conn;
1169 
1170 		if (!lll_conn) {
1171 			adv->is_enabled = 0U;
1172 
1173 			break;
1174 		}
1175 
1176 		LL_ASSERT(!lll_conn->link_tx_free);
1177 
1178 		memq_link_t *memq_link = memq_deinit(&lll_conn->memq_tx.head,
1179 						     &lll_conn->memq_tx.tail);
1180 		LL_ASSERT(memq_link);
1181 
1182 		lll_conn->link_tx_free = memq_link;
1183 
1184 		struct ll_conn *conn = HDR_LLL2ULL(lll_conn);
1185 
1186 		ll_conn_release(conn);
1187 		adv->lll.conn = NULL;
1188 
1189 		ll_rx_release(adv->node_rx_cc_free);
1190 		adv->node_rx_cc_free = NULL;
1191 
1192 		ll_rx_link_release(adv->link_cc_free);
1193 		adv->link_cc_free = NULL;
1194 #endif /* CONFIG_BT_PERIPHERAL */
1195 
1196 		adv->is_enabled = 0U;
1197 	}
1198 	break;
1199 #endif /* CONFIG_BT_BROADCASTER */
1200 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1201 
1202 #if defined(CONFIG_BT_CONN)
1203 	case NODE_RX_TYPE_CONNECTION:
1204 	{
1205 		struct node_rx_cc *cc = (void *)rx->pdu;
1206 		struct node_rx_ftr *ftr = &(rx->rx_ftr);
1207 
1208 		if (0) {
1209 
1210 #if defined(CONFIG_BT_PERIPHERAL)
1211 		} else if ((cc->status == BT_HCI_ERR_ADV_TIMEOUT) || cc->role) {
1212 			struct ll_adv_set *adv;
1213 			struct lll_adv *lll;
1214 
1215 			/* Get reference to ULL context */
1216 			lll = ftr->param;
1217 			adv = HDR_LLL2ULL(lll);
1218 
1219 			if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
1220 				struct lll_conn *conn_lll;
1221 				struct ll_conn *conn;
1222 				memq_link_t *memq_link;
1223 
1224 				conn_lll = lll->conn;
1225 				LL_ASSERT(conn_lll);
1226 				lll->conn = NULL;
1227 
1228 				LL_ASSERT(!conn_lll->link_tx_free);
1229 				memq_link = memq_deinit(&conn_lll->memq_tx.head,
1230 							&conn_lll->memq_tx.tail);
1231 				LL_ASSERT(memq_link);
1232 				conn_lll->link_tx_free = memq_link;
1233 
1234 				conn = HDR_LLL2ULL(conn_lll);
1235 				ll_conn_release(conn);
1236 			} else {
1237 				/* Release un-utilized node rx */
1238 				if (adv->node_rx_cc_free) {
1239 					void *rx_free;
1240 
1241 					rx_free = adv->node_rx_cc_free;
1242 					adv->node_rx_cc_free = NULL;
1243 
1244 					ll_rx_release(rx_free);
1245 				}
1246 			}
1247 
1248 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1249 			if (lll->aux) {
1250 				struct ll_adv_aux_set *aux;
1251 
1252 				aux = HDR_LLL2ULL(lll->aux);
1253 				aux->is_started = 0U;
1254 			}
1255 
1256 			/* If Extended Advertising Commands used, reset
1257 			 * is_enabled when advertising set terminated event is
1258 			 * dequeued. Otherwise, legacy advertising commands used
1259 			 * then reset is_enabled here.
1260 			 */
1261 			if (!lll->node_rx_adv_term) {
1262 				adv->is_enabled = 0U;
1263 			}
1264 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1265 			adv->is_enabled = 0U;
1266 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1267 
1268 #else /* !CONFIG_BT_PERIPHERAL */
1269 			ARG_UNUSED(cc);
1270 #endif /* !CONFIG_BT_PERIPHERAL */
1271 
1272 #if defined(CONFIG_BT_CENTRAL)
1273 		} else {
1274 			struct ll_scan_set *scan = HDR_LLL2ULL(ftr->param);
1275 
1276 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
1277 			struct ll_scan_set *scan_other =
1278 				ull_scan_is_enabled_get(SCAN_HANDLE_PHY_CODED);
1279 
1280 			if (scan_other) {
1281 				if (scan_other == scan) {
1282 					scan_other = ull_scan_is_enabled_get(SCAN_HANDLE_1M);
1283 				}
1284 
1285 				if (scan_other) {
1286 					scan_other->lll.conn = NULL;
1287 					scan_other->is_enabled = 0U;
1288 				}
1289 			}
1290 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
1291 
1292 			scan->lll.conn = NULL;
1293 			scan->is_enabled = 0U;
1294 #else /* !CONFIG_BT_CENTRAL */
1295 		} else {
1296 			LL_ASSERT(0);
1297 #endif /* !CONFIG_BT_CENTRAL */
1298 		}
1299 
1300 		if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
1301 			uint8_t bm;
1302 
1303 			/* FIXME: use the correct adv and scan set to get
1304 			 * enabled status bitmask
1305 			 */
1306 			bm = (IS_ENABLED(CONFIG_BT_OBSERVER)?(ull_scan_is_enabled(0) << 1):0) |
1307 			     (IS_ENABLED(CONFIG_BT_BROADCASTER)?ull_adv_is_enabled(0):0);
1308 
1309 			if (!bm) {
1310 				ull_filter_adv_scan_state_cb(0);
1311 			}
1312 		}
1313 	}
1314 	break;
1315 
1316 	case NODE_RX_TYPE_TERMINATE:
1317 	case NODE_RX_TYPE_DC_PDU:
1318 #endif /* CONFIG_BT_CONN */
1319 
1320 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1321 	case NODE_RX_TYPE_BIG_COMPLETE:
1322 	case NODE_RX_TYPE_BIG_TERMINATE:
1323 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1324 
1325 #if defined(CONFIG_BT_OBSERVER)
1326 	case NODE_RX_TYPE_REPORT:
1327 
1328 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1329 		/* fall through */
1330 	case NODE_RX_TYPE_SYNC:
1331 	case NODE_RX_TYPE_SYNC_LOST:
1332 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1333 	/* fall through */
1334 	case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
1335 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1336 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1337 		/* fall through */
1338 	case NODE_RX_TYPE_SYNC_ISO:
1339 	case NODE_RX_TYPE_SYNC_ISO_LOST:
1340 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1341 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1342 #endif /* CONFIG_BT_OBSERVER */
1343 
1344 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1345 	case NODE_RX_TYPE_SCAN_REQ:
1346 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1347 
1348 #if defined(CONFIG_BT_CONN)
1349 	case NODE_RX_TYPE_CONN_UPDATE:
1350 	case NODE_RX_TYPE_ENC_REFRESH:
1351 
1352 #if defined(CONFIG_BT_CTLR_LE_PING)
1353 	case NODE_RX_TYPE_APTO:
1354 #endif /* CONFIG_BT_CTLR_LE_PING */
1355 
1356 	case NODE_RX_TYPE_CHAN_SEL_ALGO:
1357 
1358 #if defined(CONFIG_BT_CTLR_PHY)
1359 	case NODE_RX_TYPE_PHY_UPDATE:
1360 #endif /* CONFIG_BT_CTLR_PHY */
1361 
1362 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1363 	case NODE_RX_TYPE_RSSI:
1364 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1365 #endif /* CONFIG_BT_CONN */
1366 
1367 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
1368 	case NODE_RX_TYPE_PROFILE:
1369 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
1370 
1371 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1372 	case NODE_RX_TYPE_ADV_INDICATION:
1373 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1374 
1375 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
1376 	case NODE_RX_TYPE_SCAN_INDICATION:
1377 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
1378 
1379 #if defined(CONFIG_BT_HCI_MESH_EXT)
1380 	case NODE_RX_TYPE_MESH_ADV_CPLT:
1381 	case NODE_RX_TYPE_MESH_REPORT:
1382 #endif /* CONFIG_BT_HCI_MESH_EXT */
1383 
1384 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
1385 	case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
1386 		__fallthrough;
1387 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
1388 
1389 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1390 	case NODE_RX_TYPE_CIS_REQUEST:
1391 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1392 
1393 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1394 	case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
1395 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1396 
1397 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1398 	case NODE_RX_TYPE_CIS_ESTABLISHED:
1399 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1400 
1401 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1402 	case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
1403 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1404 
1405 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1406 	case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
1407 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1408 
1409 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
1410 	case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
1411 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
1412 	case NODE_RX_TYPE_PATH_LOSS:
1413 
1414 	/* Ensure that at least one 'case' statement is present for this
1415 	 * code block.
1416 	 */
1417 	case NODE_RX_TYPE_NONE:
1418 		LL_ASSERT(rx->hdr.type != NODE_RX_TYPE_NONE);
1419 		break;
1420 
1421 	default:
1422 		LL_ASSERT(0);
1423 		break;
1424 	}
1425 
1426 	/* FIXME: clean up when porting Mesh Ext. */
1427 	if (0) {
1428 #if defined(CONFIG_BT_HCI_MESH_EXT)
1429 	} else if (rx->hdr.type == NODE_RX_TYPE_MESH_ADV_CPLT) {
1430 		struct ll_adv_set *adv;
1431 		struct ll_scan_set *scan;
1432 
1433 		adv = ull_adv_is_enabled_get(0);
1434 		LL_ASSERT(adv);
1435 		adv->is_enabled = 0U;
1436 
1437 		scan = ull_scan_is_enabled_get(0);
1438 		LL_ASSERT(scan);
1439 
1440 		scan->is_enabled = 0U;
1441 
1442 		ll_adv_scan_state_cb(0);
1443 #endif /* CONFIG_BT_HCI_MESH_EXT */
1444 	}
1445 }
1446 
ll_rx_mem_release(void ** node_rx)1447 void ll_rx_mem_release(void **node_rx)
1448 {
1449 	struct node_rx_pdu *rx;
1450 
1451 	rx = *node_rx;
1452 	while (rx) {
1453 		struct node_rx_pdu *rx_free;
1454 
1455 		rx_free = rx;
1456 		rx = rx->hdr.next;
1457 
1458 		switch (rx_free->hdr.type) {
1459 #if defined(CONFIG_BT_BROADCASTER)
1460 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1461 		case NODE_RX_TYPE_EXT_ADV_TERMINATE:
1462 			ll_rx_release(rx_free);
1463 			break;
1464 
1465 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1466 		case NODE_RX_TYPE_BIG_COMPLETE:
1467 			/* Nothing to release */
1468 			break;
1469 
1470 		case NODE_RX_TYPE_BIG_TERMINATE:
1471 		{
1472 			struct ll_adv_iso_set *adv_iso = rx_free->rx_ftr.param;
1473 
1474 			ull_adv_iso_stream_release(adv_iso);
1475 		}
1476 		break;
1477 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1478 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1479 #endif /* CONFIG_BT_BROADCASTER */
1480 
1481 #if defined(CONFIG_BT_OBSERVER)
1482 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1483 		case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
1484 		{
1485 			ll_rx_release(rx_free);
1486 		}
1487 		break;
1488 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1489 #endif /* CONFIG_BT_OBSERVER */
1490 
1491 #if defined(CONFIG_BT_CONN)
1492 		case NODE_RX_TYPE_CONNECTION:
1493 		{
1494 			struct node_rx_cc *cc =
1495 				(void *)rx_free->pdu;
1496 
1497 			if (0) {
1498 
1499 #if defined(CONFIG_BT_PERIPHERAL)
1500 			} else if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
1501 				ll_rx_release(rx_free);
1502 
1503 				break;
1504 #endif /* !CONFIG_BT_PERIPHERAL */
1505 
1506 #if defined(CONFIG_BT_CENTRAL)
1507 			} else if (cc->status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
1508 				ull_central_cleanup(rx_free);
1509 
1510 #if defined(CONFIG_BT_CTLR_PRIVACY)
1511 #if defined(CONFIG_BT_BROADCASTER)
1512 				if (!ull_adv_is_enabled_get(0))
1513 #endif /* CONFIG_BT_BROADCASTER */
1514 				{
1515 					ull_filter_adv_scan_state_cb(0);
1516 				}
1517 #endif /* CONFIG_BT_CTLR_PRIVACY */
1518 				break;
1519 #endif /* CONFIG_BT_CENTRAL */
1520 
1521 			} else {
1522 				LL_ASSERT(!cc->status);
1523 			}
1524 		}
1525 
1526 		__fallthrough;
1527 		case NODE_RX_TYPE_DC_PDU:
1528 #endif /* CONFIG_BT_CONN */
1529 
1530 #if defined(CONFIG_BT_OBSERVER)
1531 		case NODE_RX_TYPE_REPORT:
1532 
1533 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1534 			__fallthrough;
1535 		case NODE_RX_TYPE_EXT_1M_REPORT:
1536 		case NODE_RX_TYPE_EXT_2M_REPORT:
1537 		case NODE_RX_TYPE_EXT_CODED_REPORT:
1538 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1539 		case NODE_RX_TYPE_SYNC_REPORT:
1540 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1541 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1542 #endif /* CONFIG_BT_OBSERVER */
1543 
1544 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1545 		case NODE_RX_TYPE_SCAN_REQ:
1546 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1547 
1548 #if defined(CONFIG_BT_CONN)
1549 		case NODE_RX_TYPE_CONN_UPDATE:
1550 		case NODE_RX_TYPE_ENC_REFRESH:
1551 
1552 #if defined(CONFIG_BT_CTLR_LE_PING)
1553 		case NODE_RX_TYPE_APTO:
1554 #endif /* CONFIG_BT_CTLR_LE_PING */
1555 
1556 		case NODE_RX_TYPE_CHAN_SEL_ALGO:
1557 
1558 #if defined(CONFIG_BT_CTLR_PHY)
1559 		case NODE_RX_TYPE_PHY_UPDATE:
1560 #endif /* CONFIG_BT_CTLR_PHY */
1561 
1562 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1563 		case NODE_RX_TYPE_RSSI:
1564 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1565 #endif /* CONFIG_BT_CONN */
1566 
1567 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
1568 		case NODE_RX_TYPE_PROFILE:
1569 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
1570 
1571 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1572 		case NODE_RX_TYPE_ADV_INDICATION:
1573 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1574 
1575 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
1576 		case NODE_RX_TYPE_SCAN_INDICATION:
1577 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
1578 
1579 #if defined(CONFIG_BT_HCI_MESH_EXT)
1580 		case NODE_RX_TYPE_MESH_ADV_CPLT:
1581 		case NODE_RX_TYPE_MESH_REPORT:
1582 #endif /* CONFIG_BT_HCI_MESH_EXT */
1583 
1584 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
1585 		case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
1586 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
1587 
1588 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1589 		case NODE_RX_TYPE_CIS_REQUEST:
1590 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1591 
1592 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1593 		case NODE_RX_TYPE_CIS_ESTABLISHED:
1594 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1595 
1596 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1597 		case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
1598 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1599 
1600 #if defined(CONFIG_BT_CTLR_ISO)
1601 		case NODE_RX_TYPE_ISO_PDU:
1602 #endif
1603 		case NODE_RX_TYPE_PATH_LOSS:
1604 
1605 		/* Ensure that at least one 'case' statement is present for this
1606 		 * code block.
1607 		 */
1608 		case NODE_RX_TYPE_NONE:
1609 			LL_ASSERT(rx_free->hdr.type != NODE_RX_TYPE_NONE);
1610 			ll_rx_link_quota_inc();
1611 			ll_rx_release(rx_free);
1612 			break;
1613 
1614 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1615 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1616 		case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
1617 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1618 		case NODE_RX_TYPE_SYNC:
1619 		{
1620 			struct node_rx_sync *se =
1621 				(void *)rx_free->pdu;
1622 			uint8_t status = se->status;
1623 
1624 			/* Below status codes use node_rx_sync_estab, hence
1625 			 * release the node_rx memory and release sync context
1626 			 * if sync establishment failed.
1627 			 */
1628 			if ((status == BT_HCI_ERR_SUCCESS) ||
1629 			    (status == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE) ||
1630 			    (status == BT_HCI_ERR_CONN_FAIL_TO_ESTAB)) {
1631 				struct ll_sync_set *sync;
1632 
1633 				/* pick the sync context before node_rx
1634 				 * release.
1635 				 */
1636 				sync = (void *)rx_free->rx_ftr.param;
1637 
1638 				ll_rx_release(rx_free);
1639 
1640 				ull_sync_setup_reset(sync);
1641 
1642 				if (status != BT_HCI_ERR_SUCCESS) {
1643 					memq_link_t *link_sync_lost;
1644 
1645 					link_sync_lost =
1646 						sync->node_rx_lost.rx.hdr.link;
1647 					ll_rx_link_release(link_sync_lost);
1648 
1649 					ull_sync_release(sync);
1650 				}
1651 
1652 				break;
1653 			} else {
1654 				LL_ASSERT(status == BT_HCI_ERR_OP_CANCELLED_BY_HOST);
1655 
1656 				/* Fall through and release sync context */
1657 			}
1658 		}
1659 		/* Pass through */
1660 
1661 		case NODE_RX_TYPE_SYNC_LOST:
1662 		{
1663 			struct ll_sync_set *sync =
1664 				(void *)rx_free->rx_ftr.param;
1665 
1666 			ull_sync_release(sync);
1667 		}
1668 		break;
1669 
1670 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1671 		case NODE_RX_TYPE_SYNC_ISO:
1672 		{
1673 			struct node_rx_sync_iso *se =
1674 				(void *)rx_free->pdu;
1675 
1676 			if (!se->status) {
1677 				ll_rx_release(rx_free);
1678 
1679 				break;
1680 			}
1681 		}
1682 		/* Pass through */
1683 
1684 		case NODE_RX_TYPE_SYNC_ISO_LOST:
1685 		{
1686 			struct ll_sync_iso_set *sync_iso =
1687 				(void *)rx_free->rx_ftr.param;
1688 
1689 			ull_sync_iso_stream_release(sync_iso);
1690 		}
1691 		break;
1692 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1693 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1694 
1695 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX) || \
1696 	defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
1697 		case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
1698 		case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
1699 		case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
1700 		{
1701 			const uint8_t report_cnt = 1U;
1702 
1703 			ull_iq_report_link_inc_quota(report_cnt);
1704 			ull_df_iq_report_mem_release(rx_free);
1705 			ull_df_rx_iq_report_alloc(report_cnt);
1706 		}
1707 		break;
1708 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1709 
1710 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_CONN_ISO)
1711 		case NODE_RX_TYPE_TERMINATE:
1712 		{
1713 			if (IS_ACL_HANDLE(rx_free->hdr.handle)) {
1714 				struct ll_conn *conn;
1715 				memq_link_t *link;
1716 
1717 				conn = ll_conn_get(rx_free->hdr.handle);
1718 				LL_ASSERT(conn != NULL);
1719 
1720 				LL_ASSERT(!conn->lll.link_tx_free);
1721 				link = memq_deinit(&conn->lll.memq_tx.head,
1722 						&conn->lll.memq_tx.tail);
1723 				LL_ASSERT(link);
1724 				conn->lll.link_tx_free = link;
1725 
1726 				ll_conn_release(conn);
1727 			} else if (IS_CIS_HANDLE(rx_free->hdr.handle)) {
1728 				ll_rx_link_quota_inc();
1729 				ll_rx_release(rx_free);
1730 			}
1731 		}
1732 		break;
1733 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_CONN_ISO */
1734 
1735 		case NODE_RX_TYPE_EVENT_DONE:
1736 		default:
1737 			LL_ASSERT(0);
1738 			break;
1739 		}
1740 	}
1741 
1742 	*node_rx = rx;
1743 
1744 	rx_replenish_all();
1745 }
1746 
ll_rx_link_quota_update(int8_t delta)1747 static void ll_rx_link_quota_update(int8_t delta)
1748 {
1749 	LL_ASSERT(delta <= 0 || mem_link_rx.quota_pdu < RX_CNT);
1750 	mem_link_rx.quota_pdu += delta;
1751 }
1752 
ll_rx_link_quota_inc(void)1753 static void ll_rx_link_quota_inc(void)
1754 {
1755 	ll_rx_link_quota_update(1);
1756 }
1757 
ll_rx_link_quota_dec(void)1758 static void ll_rx_link_quota_dec(void)
1759 {
1760 	ll_rx_link_quota_update(-1);
1761 }
1762 
ll_rx_link_alloc(void)1763 void *ll_rx_link_alloc(void)
1764 {
1765 	return mem_acquire(&mem_link_rx.free);
1766 }
1767 
ll_rx_link_release(memq_link_t * link)1768 void ll_rx_link_release(memq_link_t *link)
1769 {
1770 	mem_release(link, &mem_link_rx.free);
1771 }
1772 
ll_rx_alloc(void)1773 void *ll_rx_alloc(void)
1774 {
1775 	return mem_acquire(&mem_pdu_rx.free);
1776 }
1777 
ll_rx_release(void * node_rx)1778 void ll_rx_release(void *node_rx)
1779 {
1780 	mem_release(node_rx, &mem_pdu_rx.free);
1781 }
1782 
ll_rx_put(memq_link_t * link,void * rx)1783 void ll_rx_put(memq_link_t *link, void *rx)
1784 {
1785 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
1786 	struct node_rx_hdr *rx_hdr = rx;
1787 
1788 	/* Serialize Tx ack with Rx enqueue by storing reference to
1789 	 * last element index in Tx ack FIFO.
1790 	 */
1791 	rx_hdr->ack_last = mfifo_fifo_tx_ack.l;
1792 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1793 
1794 	/* Enqueue the Rx object */
1795 	memq_enqueue(link, rx, &memq_ll_rx.tail);
1796 }
1797 
1798 /**
1799  * @brief Permit another loop in the controller thread (prio_recv_thread)
1800  * @details Execution context: ULL mayfly
1801  */
ll_rx_sched(void)1802 void ll_rx_sched(void)
1803 {
1804 	/* sem_recv references the same semaphore (sem_prio_recv)
1805 	 * in prio_recv_thread
1806 	 */
1807 	k_sem_give(sem_recv);
1808 }
1809 
ll_rx_put_sched(memq_link_t * link,void * rx)1810 void ll_rx_put_sched(memq_link_t *link, void *rx)
1811 {
1812 	ll_rx_put(link, rx);
1813 	ll_rx_sched();
1814 }
1815 
1816 #if defined(CONFIG_BT_CONN)
ll_pdu_rx_alloc_peek(uint8_t count)1817 void *ll_pdu_rx_alloc_peek(uint8_t count)
1818 {
1819 	if (count > MFIFO_AVAIL_COUNT_GET(ll_pdu_rx_free)) {
1820 		return NULL;
1821 	}
1822 
1823 	return MFIFO_DEQUEUE_PEEK(ll_pdu_rx_free);
1824 }
1825 
ll_pdu_rx_alloc(void)1826 void *ll_pdu_rx_alloc(void)
1827 {
1828 	return MFIFO_DEQUEUE(ll_pdu_rx_free);
1829 }
1830 #endif /* CONFIG_BT_CONN */
1831 
1832 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
ll_tx_ack_put(uint16_t handle,struct node_tx * node_tx)1833 void ll_tx_ack_put(uint16_t handle, struct node_tx *node_tx)
1834 {
1835 	struct lll_tx *tx;
1836 	uint8_t idx;
1837 
1838 	idx = MFIFO_ENQUEUE_GET(tx_ack, (void **)&tx);
1839 	LL_ASSERT(tx);
1840 
1841 	tx->handle = handle;
1842 	tx->node = node_tx;
1843 
1844 	MFIFO_ENQUEUE(tx_ack, idx);
1845 }
1846 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1847 
ll_timeslice_ticker_id_get(uint8_t * const instance_index,uint8_t * const ticker_id)1848 void ll_timeslice_ticker_id_get(uint8_t * const instance_index,
1849 				uint8_t * const ticker_id)
1850 {
1851 	*instance_index = TICKER_INSTANCE_ID_CTLR;
1852 	*ticker_id = (TICKER_NODES - FLASH_TICKER_NODES - COEX_TICKER_NODES);
1853 }
1854 
ll_coex_ticker_id_get(uint8_t * const instance_index,uint8_t * const ticker_id)1855 void ll_coex_ticker_id_get(uint8_t * const instance_index,
1856 				uint8_t * const ticker_id)
1857 {
1858 	*instance_index = TICKER_INSTANCE_ID_CTLR;
1859 	*ticker_id = (TICKER_NODES - COEX_TICKER_NODES);
1860 }
1861 
ll_radio_state_abort(void)1862 void ll_radio_state_abort(void)
1863 {
1864 	static memq_link_t link;
1865 	static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1866 	uint32_t ret;
1867 
1868 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1869 			     &mfy);
1870 	LL_ASSERT(!ret);
1871 }
1872 
ll_radio_state_is_idle(void)1873 uint32_t ll_radio_state_is_idle(void)
1874 {
1875 	return lll_radio_is_idle();
1876 }
1877 
ull_ticker_status_give(uint32_t status,void * param)1878 void ull_ticker_status_give(uint32_t status, void *param)
1879 {
1880 	*((uint32_t volatile *)param) = status;
1881 
1882 	k_sem_give(&sem_ticker_api_cb);
1883 }
1884 
1885 /**
1886  * @brief Take the ticker API semaphore (if applicable) and wait for operation
1887  *        complete.
1888  *
1889  * Waits for ticker operation to complete by taking ticker API semaphore,
1890  * unless the operation was executed inline due to same-priority caller/
1891  * callee id.
1892  *
1893  * In case of asynchronous ticker operation (caller priority !=
1894  * callee priority), the function grabs the semaphore and waits for
1895  * ull_ticker_status_give, which assigns the ret_cb variable and releases
1896  * the semaphore.
1897  *
1898  * In case of synchronous ticker operation, the result is already known at
1899  * entry, and semaphore is only taken if ret_cb has been updated. This is done
1900  * to balance take/give counts. If *ret_cb is still TICKER_STATUS_BUSY, but
1901  * ret is not, the ticker operation has failed early, and no callback will be
1902  * invoked. In this case the semaphore shall not be taken.
1903  *
1904  * @param ret    Return value from ticker API call:
1905  *               TICKER_STATUS_BUSY:    Ticker operation is queued
1906  *               TICKER_STATUS_SUCCESS: Operation completed OK
1907  *               TICKER_STATUS_FAILURE: Operation failed
1908  *
1909  * @param ret_cb Pointer to user data passed to ticker operation
1910  *               callback, which holds the operation result. Value
1911  *               upon entry:
1912  *               TICKER_STATUS_BUSY:    Ticker has not yet called CB
1913  *               TICKER_STATUS_SUCCESS: Operation completed OK via CB
1914  *               TICKER_STATUS_FAILURE: Operation failed via CB
1915  *
1916  *               NOTE: For correct operation, *ret_cb must be initialized
1917  *               to TICKER_STATUS_BUSY before initiating the ticker API call.
1918  *
1919  * @return uint32_t Returns result of completed ticker operation
1920  */
ull_ticker_status_take(uint32_t ret,uint32_t volatile * ret_cb)1921 uint32_t ull_ticker_status_take(uint32_t ret, uint32_t volatile *ret_cb)
1922 {
1923 	if ((ret == TICKER_STATUS_BUSY) || (*ret_cb != TICKER_STATUS_BUSY)) {
1924 		/* Operation is either pending of completed via callback
1925 		 * prior to this function call. Take the semaphore and wait,
1926 		 * or take it to balance take/give counting.
1927 		 */
1928 		k_sem_take(&sem_ticker_api_cb, K_FOREVER);
1929 		return *ret_cb;
1930 	}
1931 
1932 	return ret;
1933 }
1934 
ull_disable_mark(void * param)1935 void *ull_disable_mark(void *param)
1936 {
1937 	return mark_set(&mark_disable, param);
1938 }
1939 
ull_disable_unmark(void * param)1940 void *ull_disable_unmark(void *param)
1941 {
1942 	return mark_unset(&mark_disable, param);
1943 }
1944 
ull_disable_mark_get(void)1945 void *ull_disable_mark_get(void)
1946 {
1947 	return mark_get(mark_disable);
1948 }
1949 
1950 /**
1951  * @brief Stops a specified ticker using the ull_disable_(un)mark functions.
1952  *
1953  * @param ticker_handle The handle of the ticker.
1954  * @param param         The object to mark.
1955  * @param lll_disable   Optional object when calling @ref ull_disable
1956  *
1957  * @return 0 if success, else ERRNO.
1958  */
ull_ticker_stop_with_mark(uint8_t ticker_handle,void * param,void * lll_disable)1959 int ull_ticker_stop_with_mark(uint8_t ticker_handle, void *param,
1960 			      void *lll_disable)
1961 {
1962 	uint32_t volatile ret_cb;
1963 	uint32_t ret;
1964 	void *mark;
1965 	int err;
1966 
1967 	mark = ull_disable_mark(param);
1968 	if (mark != param) {
1969 		return -ENOLCK;
1970 	}
1971 
1972 	ret_cb = TICKER_STATUS_BUSY;
1973 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1974 			  ticker_handle, ull_ticker_status_give,
1975 			  (void *)&ret_cb);
1976 	ret = ull_ticker_status_take(ret, &ret_cb);
1977 	if (ret) {
1978 		mark = ull_disable_unmark(param);
1979 		if (mark != param) {
1980 			return -ENOLCK;
1981 		}
1982 
1983 		return -EALREADY;
1984 	}
1985 
1986 	err = ull_disable(lll_disable);
1987 
1988 	mark = ull_disable_unmark(param);
1989 	if (mark != param) {
1990 		return -ENOLCK;
1991 	}
1992 
1993 	if (err && (err != -EALREADY)) {
1994 		return err;
1995 	}
1996 
1997 	return 0;
1998 }
1999 
2000 #if defined(CONFIG_BT_CONN)
ull_update_mark(void * param)2001 void *ull_update_mark(void *param)
2002 {
2003 	return mark_set(&mark_update, param);
2004 }
2005 
ull_update_unmark(void * param)2006 void *ull_update_unmark(void *param)
2007 {
2008 	return mark_unset(&mark_update, param);
2009 }
2010 
ull_update_mark_get(void)2011 void *ull_update_mark_get(void)
2012 {
2013 	return mark_get(mark_update);
2014 }
2015 #endif /* CONFIG_BT_CONN */
2016 
ull_disable(void * lll)2017 int ull_disable(void *lll)
2018 {
2019 	static memq_link_t link;
2020 	static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2021 	struct ull_hdr *hdr;
2022 	struct k_sem sem;
2023 	uint32_t ret;
2024 
2025 	hdr = HDR_LLL2ULL(lll);
2026 	if (!ull_ref_get(hdr)) {
2027 		return -EALREADY;
2028 	}
2029 	cpu_dmb(); /* Ensure synchronized data access */
2030 
2031 	k_sem_init(&sem, 0, 1);
2032 
2033 	hdr->disabled_param = &sem;
2034 	hdr->disabled_cb = disabled_cb;
2035 
2036 	cpu_dmb(); /* Ensure synchronized data access */
2037 
2038 	/* ULL_HIGH can run after we have call `ull_ref_get` and it can
2039 	 * decrement the ref count. Hence, handle this race condition by
2040 	 * ensuring that `disabled_cb` has been set while the ref count is still
2041 	 * set.
2042 	 * No need to call `lll_disable` and take the semaphore thereafter if
2043 	 * reference count is zero.
2044 	 * If the `sem` is given when reference count was decremented, we do not
2045 	 * care.
2046 	 */
2047 	if (!ull_ref_get(hdr)) {
2048 		return -EALREADY;
2049 	}
2050 
2051 	mfy.param = lll;
2052 	ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0,
2053 			     &mfy);
2054 	LL_ASSERT(!ret);
2055 
2056 	return k_sem_take(&sem, ULL_DISABLE_TIMEOUT);
2057 }
2058 
ull_pdu_rx_alloc_peek(uint8_t count)2059 void *ull_pdu_rx_alloc_peek(uint8_t count)
2060 {
2061 	if (count > MFIFO_AVAIL_COUNT_GET(pdu_rx_free)) {
2062 		return NULL;
2063 	}
2064 
2065 	return MFIFO_DEQUEUE_PEEK(pdu_rx_free);
2066 }
2067 
ull_pdu_rx_alloc_peek_iter(uint8_t * idx)2068 void *ull_pdu_rx_alloc_peek_iter(uint8_t *idx)
2069 {
2070 	return *(void **)MFIFO_DEQUEUE_ITER_GET(pdu_rx_free, idx);
2071 }
2072 
ull_pdu_rx_alloc(void)2073 void *ull_pdu_rx_alloc(void)
2074 {
2075 	return MFIFO_DEQUEUE(pdu_rx_free);
2076 }
2077 
ull_rx_put(memq_link_t * link,void * rx)2078 void ull_rx_put(memq_link_t *link, void *rx)
2079 {
2080 #if defined(CONFIG_BT_CONN)
2081 	struct node_rx_hdr *rx_hdr = rx;
2082 
2083 	/* Serialize Tx ack with Rx enqueue by storing reference to
2084 	 * last element index in Tx ack FIFO.
2085 	 */
2086 	rx_hdr->ack_last = ull_conn_ack_last_idx_get();
2087 #endif /* CONFIG_BT_CONN */
2088 
2089 	/* Enqueue the Rx object */
2090 	memq_enqueue(link, rx, &memq_ull_rx.tail);
2091 }
2092 
ull_rx_sched(void)2093 void ull_rx_sched(void)
2094 {
2095 	static memq_link_t link;
2096 	static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
2097 
2098 	/* Kick the ULL (using the mayfly, tailchain it) */
2099 	mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
2100 }
2101 
ull_rx_put_sched(memq_link_t * link,void * rx)2102 void ull_rx_put_sched(memq_link_t *link, void *rx)
2103 {
2104 	ull_rx_put(link, rx);
2105 	ull_rx_sched();
2106 }
2107 
ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,lll_abort_cb_t abort_cb,struct lll_prepare_param * prepare_param,lll_prepare_cb_t prepare_cb,uint8_t is_resume)2108 struct lll_event *ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,
2109 				      lll_abort_cb_t abort_cb,
2110 				      struct lll_prepare_param *prepare_param,
2111 				      lll_prepare_cb_t prepare_cb,
2112 				      uint8_t is_resume)
2113 {
2114 	struct lll_event *e;
2115 	uint8_t idx;
2116 
2117 	idx = MFIFO_ENQUEUE_GET(prep, (void **)&e);
2118 	if (!e) {
2119 		return NULL;
2120 	}
2121 
2122 	memcpy(&e->prepare_param, prepare_param, sizeof(e->prepare_param));
2123 	e->prepare_cb = prepare_cb;
2124 	e->is_abort_cb = is_abort_cb;
2125 	e->abort_cb = abort_cb;
2126 	e->is_resume = is_resume;
2127 	e->is_aborted = 0U;
2128 
2129 	MFIFO_ENQUEUE(prep, idx);
2130 
2131 	return e;
2132 }
2133 
ull_prepare_dequeue_get(void)2134 void *ull_prepare_dequeue_get(void)
2135 {
2136 	return MFIFO_DEQUEUE_GET(prep);
2137 }
2138 
ull_prepare_dequeue_iter(uint8_t * idx)2139 void *ull_prepare_dequeue_iter(uint8_t *idx)
2140 {
2141 	return MFIFO_DEQUEUE_ITER_GET(prep, idx);
2142 }
2143 
ull_prepare_dequeue(uint8_t caller_id)2144 void ull_prepare_dequeue(uint8_t caller_id)
2145 {
2146 	void *param_normal_head = NULL;
2147 	void *param_normal_next = NULL;
2148 	void *param_resume_head = NULL;
2149 	void *param_resume_next = NULL;
2150 	struct lll_event *next;
2151 	uint8_t loop;
2152 
2153 	/* Development assertion check to ensure the below loop processing
2154 	 * has a limit.
2155 	 *
2156 	 * Only 2 scanner and 1 advertiser (directed adv) gets enqueue back:
2157 	 *
2158 	 * Already in queue max 7 (EVENT_PIPELINE_MAX):
2159 	 *  - 2 continuous scan prepare in queue (1M and Coded PHY)
2160 	 *  - 2 continuous scan resume in queue (1M and Coded PHY)
2161 	 *  - 1 directed adv prepare
2162 	 *  - 1 directed adv resume
2163 	 *  - 1 any other role with time reservation
2164 	 *
2165 	 * The loop removes the duplicates (scan and advertiser) with is_aborted
2166 	 * flag set in 7 iterations:
2167 	 *  - 1 scan prepare (1M)
2168 	 *  - 1 scan prepare (Coded PHY)
2169 	 *  - 1 directed adv prepare
2170 	 *
2171 	 * and has enqueue the following in these 7 iterations:
2172 	 *  - 1 scan resume (1M)
2173 	 *  - 1 scan resume (Coded PHY)
2174 	 *  - 1 directed adv resume
2175 	 *
2176 	 * Hence, it should be (EVENT_PIPELINE_MAX + 3U) iterations max.
2177 	 */
2178 	loop = (EVENT_PIPELINE_MAX + 3U);
2179 
2180 	next = ull_prepare_dequeue_get();
2181 	while (next) {
2182 		void *param = next->prepare_param.param;
2183 		uint8_t is_aborted = next->is_aborted;
2184 		uint8_t is_resume = next->is_resume;
2185 
2186 		/* Assert if we exceed iterations processing the prepare queue
2187 		 */
2188 		LL_ASSERT(loop);
2189 		loop--;
2190 
2191 		/* Let LLL invoke the `prepare` interface if radio not in active
2192 		 * use. Otherwise, enqueue at end of the prepare pipeline queue.
2193 		 */
2194 		if (!is_aborted) {
2195 			static memq_link_t link;
2196 			static struct mayfly mfy = {0, 0, &link, NULL,
2197 						    lll_resume};
2198 			uint32_t ret;
2199 
2200 			mfy.param = next;
2201 			ret = mayfly_enqueue(caller_id, TICKER_USER_ID_LLL, 0,
2202 					     &mfy);
2203 			LL_ASSERT(!ret);
2204 		}
2205 
2206 		MFIFO_DEQUEUE(prep);
2207 
2208 		/* Check for anymore more prepare elements in queue */
2209 		next = ull_prepare_dequeue_get();
2210 		if (!next) {
2211 			break;
2212 		}
2213 
2214 		/* A valid prepare element has its `prepare` invoked or was
2215 		 * enqueued back into prepare pipeline.
2216 		 */
2217 		if (!is_aborted) {
2218 			/* The prepare element was not a resume event, it would
2219 			 * use the radio or was enqueued back into prepare
2220 			 * pipeline with a preempt timeout being set.
2221 			 *
2222 			 * Remember the first encountered and the next element
2223 			 * in the prepare pipeline so that we do not infinitely
2224 			 * loop through the resume events in prepare pipeline.
2225 			 */
2226 			if (!is_resume) {
2227 				if (!param_normal_head) {
2228 					param_normal_head = param;
2229 				} else if (!param_normal_next) {
2230 					param_normal_next = param;
2231 				}
2232 			} else {
2233 				if (!param_resume_head) {
2234 					param_resume_head = param;
2235 				} else if (!param_resume_next) {
2236 					param_resume_next = param;
2237 				}
2238 			}
2239 
2240 			/* Stop traversing the prepare pipeline when we reach
2241 			 * back to the first or next event where we
2242 			 * initially started processing the prepare pipeline.
2243 			 */
2244 			if (!next->is_aborted &&
2245 			    ((!next->is_resume &&
2246 			      ((next->prepare_param.param ==
2247 				param_normal_head) ||
2248 			       (next->prepare_param.param ==
2249 				param_normal_next))) ||
2250 			     (next->is_resume &&
2251 			      !param_normal_next &&
2252 			      ((next->prepare_param.param ==
2253 				param_resume_head) ||
2254 			       (next->prepare_param.param ==
2255 				param_resume_next))))) {
2256 				break;
2257 			}
2258 		}
2259 	}
2260 }
2261 
ull_event_done_extra_get(void)2262 struct event_done_extra *ull_event_done_extra_get(void)
2263 {
2264 	struct node_rx_event_done *evdone;
2265 
2266 	evdone = MFIFO_DEQUEUE_PEEK(done);
2267 	if (!evdone) {
2268 		return NULL;
2269 	}
2270 
2271 	return &evdone->extra;
2272 }
2273 
ull_done_extra_type_set(uint8_t type)2274 struct event_done_extra *ull_done_extra_type_set(uint8_t type)
2275 {
2276 	struct event_done_extra *extra;
2277 
2278 	extra = ull_event_done_extra_get();
2279 	if (!extra) {
2280 		return NULL;
2281 	}
2282 
2283 	extra->type = type;
2284 
2285 	return extra;
2286 }
2287 
ull_event_done(void * param)2288 void *ull_event_done(void *param)
2289 {
2290 	struct node_rx_event_done *evdone;
2291 	memq_link_t *link;
2292 
2293 	/* Obtain new node that signals "Done of an RX-event".
2294 	 * Obtain this by dequeuing from the global 'mfifo_done' queue.
2295 	 * Note that 'mfifo_done' is a queue of pointers, not of
2296 	 * struct node_rx_event_done
2297 	 */
2298 	evdone = MFIFO_DEQUEUE(done);
2299 	if (!evdone) {
2300 		/* Not fatal if we can not obtain node, though
2301 		 * we will loose the packets in software stack.
2302 		 * If this happens during Conn Upd, this could cause LSTO
2303 		 */
2304 		return NULL;
2305 	}
2306 
2307 	link = evdone->hdr.link;
2308 	evdone->hdr.link = NULL;
2309 
2310 	evdone->hdr.type = NODE_RX_TYPE_EVENT_DONE;
2311 	evdone->param = param;
2312 
2313 	ull_rx_put_sched(link, evdone);
2314 
2315 	return evdone;
2316 }
2317 
2318 #if defined(CONFIG_BT_PERIPHERAL) || defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2319 /**
2320  * @brief Extract timing from completed event
2321  *
2322  * @param node_rx_event_done[in] Done event containing fresh timing information
2323  * @param ticks_drift_plus[out]  Positive part of drift uncertainty window
2324  * @param ticks_drift_minus[out] Negative part of drift uncertainty window
2325  */
ull_drift_ticks_get(struct node_rx_event_done * done,uint32_t * ticks_drift_plus,uint32_t * ticks_drift_minus)2326 void ull_drift_ticks_get(struct node_rx_event_done *done,
2327 			 uint32_t *ticks_drift_plus,
2328 			 uint32_t *ticks_drift_minus)
2329 {
2330 	uint32_t start_to_address_expected_us;
2331 	uint32_t start_to_address_actual_us;
2332 	uint32_t window_widening_event_us;
2333 	uint32_t preamble_to_addr_us;
2334 
2335 	start_to_address_actual_us =
2336 		done->extra.drift.start_to_address_actual_us;
2337 	window_widening_event_us =
2338 		done->extra.drift.window_widening_event_us;
2339 	preamble_to_addr_us =
2340 		done->extra.drift.preamble_to_addr_us;
2341 
2342 	start_to_address_expected_us = EVENT_JITTER_US +
2343 				       EVENT_TICKER_RES_MARGIN_US +
2344 				       window_widening_event_us +
2345 				       preamble_to_addr_us;
2346 
2347 	if (start_to_address_actual_us <= start_to_address_expected_us) {
2348 		*ticks_drift_plus =
2349 			HAL_TICKER_US_TO_TICKS(window_widening_event_us);
2350 		*ticks_drift_minus =
2351 			HAL_TICKER_US_TO_TICKS((start_to_address_expected_us -
2352 					       start_to_address_actual_us));
2353 	} else {
2354 		*ticks_drift_plus =
2355 			HAL_TICKER_US_TO_TICKS(start_to_address_actual_us);
2356 		*ticks_drift_minus =
2357 			HAL_TICKER_US_TO_TICKS(EVENT_JITTER_US +
2358 					       EVENT_TICKER_RES_MARGIN_US +
2359 					       preamble_to_addr_us);
2360 	}
2361 }
2362 #endif /* CONFIG_BT_PERIPHERAL || CONFIG_BT_CTLR_SYNC_PERIODIC */
2363 
init_reset(void)2364 static inline int init_reset(void)
2365 {
2366 	memq_link_t *link;
2367 
2368 	/* Initialize and allocate done pool */
2369 	RXFIFO_INIT_ALLOC(done);
2370 
2371 	/* Initialize rx pool. */
2372 	mem_init(mem_pdu_rx.pool, (PDU_RX_NODE_POOL_ELEMENT_SIZE),
2373 		 sizeof(mem_pdu_rx.pool) / (PDU_RX_NODE_POOL_ELEMENT_SIZE),
2374 		 &mem_pdu_rx.free);
2375 
2376 	/* Initialize rx link pool. */
2377 	mem_init(mem_link_rx.pool, sizeof(memq_link_t),
2378 		 sizeof(mem_link_rx.pool) / sizeof(memq_link_t),
2379 		 &mem_link_rx.free);
2380 
2381 	/* Acquire a link to initialize ull rx memq */
2382 	link = mem_acquire(&mem_link_rx.free);
2383 	LL_ASSERT(link);
2384 
2385 	/* Initialize ull rx memq */
2386 	MEMQ_INIT(ull_rx, link);
2387 
2388 	/* Acquire a link to initialize ll rx memq */
2389 	link = mem_acquire(&mem_link_rx.free);
2390 	LL_ASSERT(link);
2391 
2392 	/* Initialize ll rx memq */
2393 	MEMQ_INIT(ll_rx, link);
2394 
2395 	/* Allocate rx free buffers */
2396 	mem_link_rx.quota_pdu = RX_CNT;
2397 	rx_replenish_all();
2398 
2399 #if (defined(CONFIG_BT_BROADCASTER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
2400 	defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2401 	defined(CONFIG_BT_CTLR_SYNC_PERIODIC) || \
2402 	defined(CONFIG_BT_CONN)
2403 	/* Initialize channel map */
2404 	ull_chan_reset();
2405 #endif /* (CONFIG_BT_BROADCASTER && CONFIG_BT_CTLR_ADV_EXT) ||
2406 	* CONFIG_BT_CTLR_ADV_PERIODIC ||
2407 	* CONFIG_BT_CTLR_SYNC_PERIODIC ||
2408 	* CONFIG_BT_CONN
2409 	*/
2410 
2411 	return 0;
2412 }
2413 
perform_lll_reset(void * param)2414 static void perform_lll_reset(void *param)
2415 {
2416 	int err;
2417 
2418 	/* Reset LLL */
2419 	err = lll_reset();
2420 	LL_ASSERT(!err);
2421 
2422 #if defined(CONFIG_BT_BROADCASTER)
2423 	/* Reset adv state */
2424 	err = lll_adv_reset();
2425 	LL_ASSERT(!err);
2426 #endif /* CONFIG_BT_BROADCASTER */
2427 
2428 #if defined(CONFIG_BT_OBSERVER)
2429 	/* Reset scan state */
2430 	err = lll_scan_reset();
2431 	LL_ASSERT(!err);
2432 #endif /* CONFIG_BT_OBSERVER */
2433 
2434 #if defined(CONFIG_BT_CONN)
2435 	/* Reset conn role */
2436 	err = lll_conn_reset();
2437 	LL_ASSERT(!err);
2438 #endif /* CONFIG_BT_CONN */
2439 
2440 #if defined(CONFIG_BT_CTLR_DF)
2441 	err = lll_df_reset();
2442 	LL_ASSERT(!err);
2443 #endif /* CONFIG_BT_CTLR_DF */
2444 
2445 #if !defined(CONFIG_BT_CTLR_ZLI)
2446 	k_sem_give(param);
2447 #endif /* !CONFIG_BT_CTLR_ZLI */
2448 }
2449 
mark_set(void ** m,void * param)2450 static inline void *mark_set(void **m, void *param)
2451 {
2452 	if (!*m) {
2453 		*m = param;
2454 	}
2455 
2456 	return *m;
2457 }
2458 
mark_unset(void ** m,void * param)2459 static inline void *mark_unset(void **m, void *param)
2460 {
2461 	if (*m && *m == param) {
2462 		*m = NULL;
2463 
2464 		return param;
2465 	}
2466 
2467 	return NULL;
2468 }
2469 
mark_get(void * m)2470 static inline void *mark_get(void *m)
2471 {
2472 	return m;
2473 }
2474 
rx_replenish(uint8_t max)2475 static void rx_replenish(uint8_t max)
2476 {
2477 	uint8_t idx;
2478 
2479 	if (max > mem_link_rx.quota_pdu) {
2480 		max = mem_link_rx.quota_pdu;
2481 	}
2482 
2483 	while (max && MFIFO_ENQUEUE_IDX_GET(pdu_rx_free, &idx)) {
2484 		memq_link_t *link;
2485 		struct node_rx_hdr *rx;
2486 
2487 		link = mem_acquire(&mem_link_rx.free);
2488 		if (!link) {
2489 			return;
2490 		}
2491 
2492 		rx = mem_acquire(&mem_pdu_rx.free);
2493 		if (!rx) {
2494 			ll_rx_link_release(link);
2495 			return;
2496 		}
2497 
2498 		rx->link = link;
2499 
2500 		MFIFO_BY_IDX_ENQUEUE(pdu_rx_free, idx, rx);
2501 
2502 		ll_rx_link_quota_dec();
2503 
2504 		max--;
2505 	}
2506 
2507 #if defined(CONFIG_BT_CONN)
2508 	if (!max) {
2509 		return;
2510 	}
2511 
2512 	/* Replenish the ULL to LL/HCI free Rx PDU queue after LLL to ULL free
2513 	 * Rx PDU queue has been filled.
2514 	 */
2515 	while (mem_link_rx.quota_pdu &&
2516 	       MFIFO_ENQUEUE_IDX_GET(ll_pdu_rx_free, &idx)) {
2517 		memq_link_t *link;
2518 		struct node_rx_hdr *rx;
2519 
2520 		link = mem_acquire(&mem_link_rx.free);
2521 		if (!link) {
2522 			return;
2523 		}
2524 
2525 		rx = mem_acquire(&mem_pdu_rx.free);
2526 		if (!rx) {
2527 			ll_rx_link_release(link);
2528 			return;
2529 		}
2530 
2531 		link->mem = NULL;
2532 		rx->link = link;
2533 
2534 		MFIFO_BY_IDX_ENQUEUE(ll_pdu_rx_free, idx, rx);
2535 
2536 		ll_rx_link_quota_dec();
2537 	}
2538 #endif /* CONFIG_BT_CONN */
2539 }
2540 
rx_replenish_all(void)2541 static void rx_replenish_all(void)
2542 {
2543 	rx_replenish(UINT8_MAX);
2544 }
2545 
2546 #if defined(CONFIG_BT_CONN) || \
2547 	(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
2548 	defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2549 	defined(CONFIG_BT_CTLR_ADV_ISO)
2550 
rx_replenish_one(void)2551 static void rx_replenish_one(void)
2552 {
2553 	rx_replenish(1U);
2554 }
2555 
rx_release_replenish(struct node_rx_hdr * rx)2556 static void rx_release_replenish(struct node_rx_hdr *rx)
2557 {
2558 	ll_rx_release(rx);
2559 	rx_replenish_one();
2560 }
2561 
rx_link_dequeue_release_quota_inc(memq_link_t * link)2562 static void rx_link_dequeue_release_quota_inc(memq_link_t *link)
2563 {
2564 	(void)memq_dequeue(memq_ll_rx.tail,
2565 			   &memq_ll_rx.head, NULL);
2566 	ll_rx_link_release(link);
2567 	ll_rx_link_quota_inc();
2568 }
2569 #endif /* CONFIG_BT_CONN ||
2570 	* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
2571 	* CONFIG_BT_CTLR_ADV_PERIODIC ||
2572 	* CONFIG_BT_CTLR_ADV_ISO
2573 	*/
2574 
rx_demux(void * param)2575 static void rx_demux(void *param)
2576 {
2577 	memq_link_t *link;
2578 
2579 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2580 	do {
2581 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2582 		struct node_rx_hdr *rx;
2583 #if defined(CONFIG_BT_CONN)
2584 		struct node_tx *tx;
2585 		memq_link_t *link_tx;
2586 		uint8_t ack_last;
2587 		uint16_t handle; /* Handle to Ack TX */
2588 
2589 		/* Save the ack_last, Tx Ack FIFO's last index to avoid the value being changed if
2590 		 * there were no Rx PDUs and we were pre-empted before calling
2591 		 * `rx_demux_conn_tx_ack()` in the `else` clause.
2592 		 */
2593 		link_tx = ull_conn_ack_peek(&ack_last, &handle, &tx);
2594 
2595 		/* Ensure that the value is fetched before call to memq_peek, i.e. compiler shall
2596 		 * not reorder memory write before above read.
2597 		 */
2598 		cpu_dmb();
2599 #endif /* CONFIG_BT_CONN */
2600 
2601 		link = memq_peek(memq_ull_rx.head, memq_ull_rx.tail, (void **)&rx);
2602 		if (link) {
2603 			LL_ASSERT(rx);
2604 
2605 #if defined(CONFIG_BT_CONN)
2606 			link_tx = ull_conn_ack_by_last_peek(rx->ack_last, &handle, &tx);
2607 			if (link_tx) {
2608 				rx_demux_conn_tx_ack(rx->ack_last, handle, link_tx, tx);
2609 			} else
2610 #endif /* CONFIG_BT_CONN */
2611 			{
2612 				rx_demux_rx(link, rx);
2613 			}
2614 
2615 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2616 			rx_demux_yield();
2617 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
2618 
2619 #if defined(CONFIG_BT_CONN)
2620 		} else if (link_tx) {
2621 			rx_demux_conn_tx_ack(ack_last, handle, link_tx, tx);
2622 
2623 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2624 			rx_demux_yield();
2625 #else /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
2626 			link = link_tx;
2627 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
2628 #endif /* CONFIG_BT_CONN */
2629 		}
2630 
2631 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2632 	} while (link);
2633 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2634 }
2635 
2636 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
rx_demux_yield(void)2637 static void rx_demux_yield(void)
2638 {
2639 	static memq_link_t link;
2640 	static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
2641 	struct node_rx_hdr *rx;
2642 	memq_link_t *link_peek;
2643 
2644 	link_peek = memq_peek(memq_ull_rx.head, memq_ull_rx.tail, (void **)&rx);
2645 	if (!link_peek) {
2646 #if defined(CONFIG_BT_CONN)
2647 		struct node_tx *node_tx;
2648 		uint8_t ack_last;
2649 		uint16_t handle;
2650 
2651 		link_peek = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
2652 		if (!link_peek) {
2653 			return;
2654 		}
2655 #else /* !CONFIG_BT_CONN */
2656 		return;
2657 #endif /* !CONFIG_BT_CONN */
2658 	}
2659 
2660 	/* Kick the ULL (using the mayfly, tailchain it) */
2661 	mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_HIGH, 1,
2662 		       &mfy);
2663 }
2664 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2665 
2666 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
tx_cmplt_get(uint16_t * handle,uint8_t * first,uint8_t last)2667 static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last)
2668 {
2669 	struct lll_tx *tx;
2670 	uint8_t cmplt;
2671 	uint8_t next;
2672 
2673 	next = *first;
2674 	tx = mfifo_dequeue_iter_get(mfifo_fifo_tx_ack.m, mfifo_tx_ack.s,
2675 				    mfifo_tx_ack.n, mfifo_fifo_tx_ack.f, last,
2676 				    &next);
2677 	if (!tx) {
2678 		return 0;
2679 	}
2680 
2681 	*handle = tx->handle;
2682 	cmplt = 0U;
2683 	do {
2684 		if (false) {
2685 #if defined(CONFIG_BT_CTLR_ADV_ISO) || \
2686 	defined(CONFIG_BT_CTLR_CONN_ISO)
2687 		} else if (IS_CIS_HANDLE(tx->handle) ||
2688 			   IS_ADV_ISO_HANDLE(tx->handle)) {
2689 			struct node_tx_iso *tx_node;
2690 			uint8_t sdu_fragments;
2691 
2692 			/* NOTE: tx_cmplt_get() is permitted to be called
2693 			 *       multiple times before the tx_ack queue which is
2694 			 *       associated with Rx queue is changed by the
2695 			 *       dequeue of Rx node.
2696 			 *
2697 			 *       Tx node is released early without waiting for
2698 			 *       any dependency on Rx queue. Released Tx node
2699 			 *       reference is overloaded to store the Tx
2700 			 *       fragments count.
2701 			 *
2702 			 *       A hack is used here that depends on the fact
2703 			 *       that memory addresses have a value greater than
2704 			 *       0xFF, to determined if a node Tx has been
2705 			 *       released in a prior iteration of this function.
2706 			 */
2707 
2708 			/* We must count each SDU HCI fragment */
2709 			tx_node = tx->node;
2710 			if (IS_NODE_TX_PTR(tx_node)) {
2711 				/* We count each SDU fragment completed
2712 				 * by this PDU.
2713 				 */
2714 				sdu_fragments = tx_node->sdu_fragments;
2715 
2716 				/* Replace node reference with fragments
2717 				 * count
2718 				 */
2719 				NODE_TX_FRAGMENTS_SET(tx->node, sdu_fragments);
2720 
2721 				/* Release node as its a reference and not
2722 				 * fragments count.
2723 				 */
2724 				ll_iso_link_tx_release(tx_node->link);
2725 				ll_iso_tx_mem_release(tx_node);
2726 			} else {
2727 				/* Get SDU fragments count from the encoded
2728 				 * node reference value.
2729 				 */
2730 				sdu_fragments = NODE_TX_FRAGMENTS_GET(tx_node);
2731 			}
2732 
2733 			/* Accumulate the tx acknowledgements */
2734 			cmplt += sdu_fragments;
2735 
2736 			goto next_ack;
2737 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2738 
2739 #if defined(CONFIG_BT_CONN)
2740 		} else {
2741 			struct node_tx *tx_node;
2742 			struct pdu_data *p;
2743 
2744 			/* NOTE: tx_cmplt_get() is permitted to be called
2745 			 *       multiple times before the tx_ack queue which is
2746 			 *       associated with Rx queue is changed by the
2747 			 *       dequeue of Rx node.
2748 			 *
2749 			 *       Tx node is released early without waiting for
2750 			 *       any dependency on Rx queue. Released Tx node
2751 			 *       reference is overloaded to store whether
2752 			 *       packet with data or control was released.
2753 			 *
2754 			 *       A hack is used here that depends on the fact
2755 			 *       that memory addresses have a value greater than
2756 			 *       0xFF, to determined if a node Tx has been
2757 			 *       released in a prior iteration of this function.
2758 			 */
2759 			tx_node = tx->node;
2760 			p = (void *)tx_node->pdu;
2761 			if (!tx_node ||
2762 			    (IS_NODE_TX_PTR(tx_node) &&
2763 			     (p->ll_id == PDU_DATA_LLID_DATA_START ||
2764 			      p->ll_id == PDU_DATA_LLID_DATA_CONTINUE)) ||
2765 			    (!IS_NODE_TX_PTR(tx_node) &&
2766 			     IS_NODE_TX_DATA(tx_node))) {
2767 				/* data packet, hence count num cmplt */
2768 				NODE_TX_DATA_SET(tx->node);
2769 				cmplt++;
2770 			} else {
2771 				/* ctrl packet or flushed, hence dont count num
2772 				 * cmplt
2773 				 */
2774 				NODE_TX_CTRL_SET(tx->node);
2775 			}
2776 
2777 			if (IS_NODE_TX_PTR(tx_node)) {
2778 				ll_tx_mem_release(tx_node);
2779 			}
2780 #endif /* CONFIG_BT_CONN */
2781 
2782 		}
2783 
2784 #if defined(CONFIG_BT_CTLR_ADV_ISO) || \
2785 	defined(CONFIG_BT_CTLR_CONN_ISO)
2786 next_ack:
2787 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2788 
2789 		*first = next;
2790 		tx = mfifo_dequeue_iter_get(mfifo_fifo_tx_ack.m, mfifo_tx_ack.s,
2791 					    mfifo_tx_ack.n, mfifo_fifo_tx_ack.f,
2792 					    last, &next);
2793 	} while (tx && tx->handle == *handle);
2794 
2795 	return cmplt;
2796 }
2797 
rx_demux_conn_tx_ack(uint8_t ack_last,uint16_t handle,memq_link_t * link,struct node_tx * node_tx)2798 static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
2799 					memq_link_t *link,
2800 					struct node_tx *node_tx)
2801 {
2802 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2803 	do {
2804 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2805 		/* Dequeue node */
2806 		ull_conn_ack_dequeue();
2807 
2808 		/* Process Tx ack */
2809 		ull_conn_tx_ack(handle, link, node_tx);
2810 
2811 		/* Release link mem */
2812 		ull_conn_link_tx_release(link);
2813 
2814 		/* check for more rx ack */
2815 		link = ull_conn_ack_by_last_peek(ack_last, &handle, &node_tx);
2816 
2817 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2818 		if (!link)
2819 #else /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2820 	} while (link);
2821 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2822 
2823 		{
2824 			/* trigger thread to call ll_rx_get() */
2825 			ll_rx_sched();
2826 		}
2827 }
2828 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
2829 
2830 /**
2831  * @brief Dispatch rx objects
2832  * @details Rx objects are only peeked, not dequeued yet.
2833  *   Execution context: ULL high priority Mayfly
2834  */
rx_demux_rx(memq_link_t * link,struct node_rx_hdr * rx)2835 static inline void rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx)
2836 {
2837 	/* Demux Rx objects */
2838 	switch (rx->type) {
2839 	case NODE_RX_TYPE_EVENT_DONE:
2840 	{
2841 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2842 		rx_demux_event_done(link, (struct node_rx_event_done *)rx);
2843 	}
2844 	break;
2845 
2846 #if defined(CONFIG_BT_OBSERVER)
2847 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2848 	case NODE_RX_TYPE_EXT_1M_REPORT:
2849 	case NODE_RX_TYPE_EXT_CODED_REPORT:
2850 	case NODE_RX_TYPE_EXT_AUX_REPORT:
2851 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2852 	case NODE_RX_TYPE_SYNC_REPORT:
2853 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2854 	{
2855 		struct pdu_adv *adv;
2856 
2857 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2858 
2859 		adv = (void *)((struct node_rx_pdu *)rx)->pdu;
2860 		if (adv->type != PDU_ADV_TYPE_EXT_IND) {
2861 			ll_rx_put_sched(link, rx);
2862 			break;
2863 		}
2864 
2865 		ull_scan_aux_setup(link, (struct node_rx_pdu *)rx);
2866 	}
2867 	break;
2868 
2869 	case NODE_RX_TYPE_EXT_AUX_RELEASE:
2870 	{
2871 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2872 		ull_scan_aux_release(link, (struct node_rx_pdu *)rx);
2873 	}
2874 	break;
2875 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2876 	case NODE_RX_TYPE_SYNC:
2877 	{
2878 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2879 		ull_sync_established_report(link, (struct node_rx_pdu *)rx);
2880 	}
2881 	break;
2882 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
2883 	case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
2884 	{
2885 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2886 		ll_rx_put_sched(link, rx);
2887 	}
2888 	break;
2889 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
2890 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2891 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2892 #endif /* CONFIG_BT_OBSERVER */
2893 
2894 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2895 	case NODE_RX_TYPE_CIS_ESTABLISHED:
2896 	{
2897 		struct ll_conn *conn;
2898 
2899 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2900 
2901 		conn = ll_conn_get(rx->handle);
2902 		LL_ASSERT(conn != NULL);
2903 
2904 		if (ull_cp_cc_awaiting_established(conn)) {
2905 			ull_cp_cc_established(conn, BT_HCI_ERR_SUCCESS);
2906 		}
2907 
2908 		rx->type = NODE_RX_TYPE_RELEASE;
2909 		ll_rx_put_sched(link, rx);
2910 	}
2911 	break;
2912 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2913 
2914 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX) || \
2915 	defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
2916 	case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
2917 	case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
2918 	case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
2919 	case NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE:
2920 	{
2921 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2922 		ll_rx_put_sched(link, rx);
2923 	}
2924 	break;
2925 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
2926 
2927 #if defined(CONFIG_BT_CONN)
2928 	case NODE_RX_TYPE_CONNECTION:
2929 	{
2930 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2931 		ull_conn_setup(link, (struct node_rx_pdu *)rx);
2932 	}
2933 	break;
2934 
2935 	case NODE_RX_TYPE_DC_PDU:
2936 	{
2937 		ull_conn_rx(link, (struct node_rx_pdu **)&rx);
2938 
2939 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2940 
2941 		/* Only schedule node if not marked as retain by LLCP */
2942 		if (rx && rx->type != NODE_RX_TYPE_RETAIN) {
2943 			ll_rx_put_sched(link, rx);
2944 		}
2945 	}
2946 	break;
2947 
2948 	case NODE_RX_TYPE_TERMINATE:
2949 #endif /* CONFIG_BT_CONN */
2950 
2951 #if defined(CONFIG_BT_OBSERVER) || \
2952 	defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2953 	defined(CONFIG_BT_CTLR_BROADCAST_ISO) || \
2954 	defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
2955 	defined(CONFIG_BT_CTLR_PROFILE_ISR) || \
2956 	defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
2957 	defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
2958 	defined(CONFIG_BT_CONN)
2959 
2960 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
2961 	case NODE_RX_TYPE_SYNC_CHM_COMPLETE:
2962 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
2963 
2964 #if defined(CONFIG_BT_CTLR_ADV_ISO)
2965 	case NODE_RX_TYPE_BIG_CHM_COMPLETE:
2966 	case NODE_RX_TYPE_BIG_TERMINATE:
2967 #endif /* CONFIG_BT_CTLR_ADV_ISO */
2968 
2969 #if defined(CONFIG_BT_OBSERVER)
2970 	case NODE_RX_TYPE_REPORT:
2971 #endif /* CONFIG_BT_OBSERVER */
2972 
2973 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
2974 	case NODE_RX_TYPE_SCAN_REQ:
2975 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
2976 
2977 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
2978 	case NODE_RX_TYPE_PROFILE:
2979 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
2980 
2981 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
2982 	case NODE_RX_TYPE_ADV_INDICATION:
2983 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
2984 
2985 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
2986 	case NODE_RX_TYPE_SCAN_INDICATION:
2987 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
2988 	case NODE_RX_TYPE_PATH_LOSS:
2989 
2990 	case NODE_RX_TYPE_RELEASE:
2991 	{
2992 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2993 		ll_rx_put_sched(link, rx);
2994 	}
2995 	break;
2996 #endif /* CONFIG_BT_OBSERVER ||
2997 	* CONFIG_BT_CTLR_ADV_PERIODIC ||
2998 	* CONFIG_BT_CTLR_BROADCAST_ISO ||
2999 	* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY ||
3000 	* CONFIG_BT_CTLR_PROFILE_ISR ||
3001 	* CONFIG_BT_CTLR_ADV_INDICATION ||
3002 	* CONFIG_BT_CTLR_SCAN_INDICATION ||
3003 	* CONFIG_BT_CONN
3004 	*/
3005 
3006 	default:
3007 	{
3008 #if defined(CONFIG_BT_CTLR_USER_EXT)
3009 		/* Try proprietary demuxing */
3010 		rx_demux_rx_proprietary(link, rx, memq_ull_rx.tail,
3011 					&memq_ull_rx.head);
3012 #else
3013 		LL_ASSERT(0);
3014 #endif /* CONFIG_BT_CTLR_USER_EXT */
3015 	}
3016 	break;
3017 	}
3018 }
3019 
rx_demux_event_done(memq_link_t * link,struct node_rx_event_done * done)3020 static inline void rx_demux_event_done(memq_link_t *link,
3021 				       struct node_rx_event_done *done)
3022 {
3023 	struct ull_hdr *ull_hdr;
3024 	void *release;
3025 
3026 	/* Decrement prepare reference if ULL will not resume */
3027 	ull_hdr = done->param;
3028 	if (ull_hdr) {
3029 		LL_ASSERT(ull_ref_get(ull_hdr));
3030 		ull_ref_dec(ull_hdr);
3031 	}
3032 
3033 	/* Process role dependent event done */
3034 	switch (done->extra.type) {
3035 #if defined(CONFIG_BT_CONN)
3036 	case EVENT_DONE_EXTRA_TYPE_CONN:
3037 		ull_conn_done(done);
3038 		break;
3039 #endif /* CONFIG_BT_CONN */
3040 
3041 #if defined(CONFIG_BT_BROADCASTER)
3042 #if defined(CONFIG_BT_CTLR_ADV_EXT) || \
3043 	defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
3044 	case EVENT_DONE_EXTRA_TYPE_ADV:
3045 		ull_adv_done(done);
3046 		break;
3047 
3048 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3049 	case EVENT_DONE_EXTRA_TYPE_ADV_AUX:
3050 		ull_adv_aux_done(done);
3051 		break;
3052 
3053 #if defined(CONFIG_BT_CTLR_ADV_ISO)
3054 	case EVENT_DONE_EXTRA_TYPE_ADV_ISO_COMPLETE:
3055 		ull_adv_iso_done_complete(done);
3056 		break;
3057 
3058 	case EVENT_DONE_EXTRA_TYPE_ADV_ISO_TERMINATE:
3059 		ull_adv_iso_done_terminate(done);
3060 		break;
3061 #endif /* CONFIG_BT_CTLR_ADV_ISO */
3062 #endif /* CONFIG_BT_CTLR_ADV_EXT */
3063 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
3064 #endif /* CONFIG_BT_BROADCASTER */
3065 
3066 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3067 #if defined(CONFIG_BT_OBSERVER)
3068 	case EVENT_DONE_EXTRA_TYPE_SCAN:
3069 		ull_scan_done(done);
3070 		break;
3071 
3072 	case EVENT_DONE_EXTRA_TYPE_SCAN_AUX:
3073 		ull_scan_aux_done(done);
3074 		break;
3075 
3076 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
3077 	case EVENT_DONE_EXTRA_TYPE_SYNC:
3078 		ull_sync_done(done);
3079 		break;
3080 
3081 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
3082 	case EVENT_DONE_EXTRA_TYPE_SYNC_ISO_ESTAB:
3083 		ull_sync_iso_estab_done(done);
3084 		break;
3085 
3086 	case EVENT_DONE_EXTRA_TYPE_SYNC_ISO:
3087 		ull_sync_iso_done(done);
3088 		break;
3089 
3090 	case EVENT_DONE_EXTRA_TYPE_SYNC_ISO_TERMINATE:
3091 		ull_sync_iso_done_terminate(done);
3092 		break;
3093 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
3094 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
3095 #endif /* CONFIG_BT_OBSERVER */
3096 #endif /* CONFIG_BT_CTLR_ADV_EXT */
3097 
3098 #if defined(CONFIG_BT_CTLR_CONN_ISO)
3099 	case EVENT_DONE_EXTRA_TYPE_CIS:
3100 		ull_conn_iso_done(done);
3101 		break;
3102 #endif /* CONFIG_BT_CTLR_CONN_ISO */
3103 
3104 #if defined(CONFIG_BT_CTLR_USER_EXT)
3105 	case EVENT_DONE_EXTRA_TYPE_USER_START
3106 		... EVENT_DONE_EXTRA_TYPE_USER_END:
3107 		ull_proprietary_done(done);
3108 		break;
3109 #endif /* CONFIG_BT_CTLR_USER_EXT */
3110 
3111 	case EVENT_DONE_EXTRA_TYPE_NONE:
3112 		/* ignore */
3113 		break;
3114 
3115 	default:
3116 		LL_ASSERT(0);
3117 		break;
3118 	}
3119 
3120 	/* Release done */
3121 	done->extra.type = 0U;
3122 	release = RXFIFO_RELEASE(done, link, done);
3123 	LL_ASSERT(release == done);
3124 
3125 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
3126 	/* dequeue prepare pipeline */
3127 	ull_prepare_dequeue(TICKER_USER_ID_ULL_HIGH);
3128 
3129 	/* LLL done synchronize count */
3130 	lll_done_ull_inc();
3131 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
3132 
3133 	/* If disable initiated, signal the semaphore */
3134 	if (ull_hdr && !ull_ref_get(ull_hdr) && ull_hdr->disabled_cb) {
3135 		ull_hdr->disabled_cb(ull_hdr->disabled_param);
3136 	}
3137 }
3138 
disabled_cb(void * param)3139 static void disabled_cb(void *param)
3140 {
3141 	k_sem_give(param);
3142 }
3143 
3144 /**
3145  * @brief   Support function for RXFIFO_ALLOC macro
3146  * @details This function allocates up to 'max' number of MFIFO elements by
3147  *          enqueuing pointers to memory elements with associated memq links.
3148  */
ull_rxfifo_alloc(uint8_t s,uint8_t n,uint8_t f,uint8_t * l,uint8_t * m,void * mem_free,void * link_free,uint8_t max)3149 void ull_rxfifo_alloc(uint8_t s, uint8_t n, uint8_t f, uint8_t *l, uint8_t *m,
3150 		      void *mem_free, void *link_free, uint8_t max)
3151 {
3152 	uint8_t idx;
3153 
3154 	while ((max--) && mfifo_enqueue_idx_get(n, f, *l, &idx)) {
3155 		memq_link_t *link;
3156 		struct node_rx_hdr *rx;
3157 
3158 		link = mem_acquire(link_free);
3159 		if (!link) {
3160 			break;
3161 		}
3162 
3163 		rx = mem_acquire(mem_free);
3164 		if (!rx) {
3165 			mem_release(link, link_free);
3166 			break;
3167 		}
3168 
3169 		link->mem = NULL;
3170 		rx->link = link;
3171 
3172 		mfifo_by_idx_enqueue(m, s, idx, rx, l);
3173 	}
3174 }
3175 
3176 /**
3177  * @brief   Support function for RXFIFO_RELEASE macro
3178  * @details This function releases a node by returning it to the FIFO.
3179  */
ull_rxfifo_release(uint8_t s,uint8_t n,uint8_t f,uint8_t * l,uint8_t * m,memq_link_t * link,struct node_rx_hdr * rx)3180 void *ull_rxfifo_release(uint8_t s, uint8_t n, uint8_t f, uint8_t *l, uint8_t *m,
3181 			 memq_link_t *link, struct node_rx_hdr *rx)
3182 {
3183 	uint8_t idx;
3184 
3185 	if (!mfifo_enqueue_idx_get(n, f, *l, &idx)) {
3186 		return NULL;
3187 	}
3188 
3189 	rx->link = link;
3190 
3191 	mfifo_by_idx_enqueue(m, s, idx, rx, l);
3192 
3193 	return rx;
3194 }
3195 
3196 #if defined(CONFIG_BT_CTLR_ISO) || \
3197 	defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER) || \
3198 	defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
3199 /**
3200  * @brief Wraps given time within the range of 0 to ULL_TIME_WRAPPING_POINT_US
3201  * @param  time_now  Current time value
3202  * @param  time_diff Time difference (signed)
3203  * @return           Wrapped time after difference
3204  */
ull_get_wrapped_time_us(uint32_t time_now_us,int32_t time_diff_us)3205 uint32_t ull_get_wrapped_time_us(uint32_t time_now_us, int32_t time_diff_us)
3206 {
3207 	LL_ASSERT(time_now_us <= ULL_TIME_WRAPPING_POINT_US);
3208 
3209 	uint32_t result = ((uint64_t)time_now_us + ULL_TIME_SPAN_FULL_US + time_diff_us) %
3210 				((uint64_t)ULL_TIME_SPAN_FULL_US);
3211 
3212 	return result;
3213 }
3214 #endif /* CONFIG_BT_CTLR_ISO ||
3215 	* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER ||
3216 	* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER
3217 	*/
3218