1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12
13 #include <zephyr/bluetooth/hci_types.h>
14
15 #include "hal/ccm.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/dbuf.h"
21
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25
26 #include "ll.h"
27 #include "ll_settings.h"
28
29 #include "lll.h"
30 #include "ll_feat.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_conn.h"
33 #include "lll_conn_iso.h"
34
35 #include "ull_tx_queue.h"
36
37 #include "isoal.h"
38 #include "ull_iso_types.h"
39 #include "ull_conn_iso_types.h"
40 #include "ull_conn_iso_internal.h"
41
42 #include "ull_conn_types.h"
43 #include "ull_internal.h"
44 #include "ull_llcp.h"
45 #include "ull_llcp_features.h"
46 #include "ull_llcp_internal.h"
47 #include "ull_conn_internal.h"
48
49 #include <soc.h>
50 #include "hal/debug.h"
51
52 /* LLCP Local Procedure PHY Update FSM states */
53 enum {
54 LP_PU_STATE_IDLE = LLCP_STATE_IDLE,
55 LP_PU_STATE_WAIT_TX_PHY_REQ,
56 LP_PU_STATE_WAIT_TX_ACK_PHY_REQ,
57 LP_PU_STATE_WAIT_RX_PHY_RSP,
58 LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND,
59 LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND,
60 LP_PU_STATE_WAIT_RX_PHY_UPDATE_IND,
61 LP_PU_STATE_WAIT_NTF_AVAIL,
62 LP_PU_STATE_WAIT_INSTANT,
63 LP_PU_STATE_WAIT_INSTANT_ON_AIR,
64 };
65
66 /* LLCP Local Procedure PHY Update FSM events */
67 enum {
68 /* Procedure run */
69 LP_PU_EVT_RUN,
70
71 /* Response received */
72 LP_PU_EVT_PHY_RSP,
73
74 /* Indication received */
75 LP_PU_EVT_PHY_UPDATE_IND,
76
77 /* Ack received */
78 LP_PU_EVT_ACK,
79
80 /* Ready to notify host */
81 LP_PU_EVT_NTF,
82
83 /* Reject response received */
84 LP_PU_EVT_REJECT,
85
86 /* Unknown response received */
87 LP_PU_EVT_UNKNOWN,
88 };
89
90 /* LLCP Remote Procedure PHY Update FSM states */
91 enum {
92 RP_PU_STATE_IDLE = LLCP_STATE_IDLE,
93 RP_PU_STATE_WAIT_RX_PHY_REQ,
94 RP_PU_STATE_WAIT_TX_PHY_RSP,
95 RP_PU_STATE_WAIT_TX_ACK_PHY_RSP,
96 RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND,
97 RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND,
98 RP_PU_STATE_WAIT_RX_PHY_UPDATE_IND,
99 RP_PU_STATE_WAIT_NTF_AVAIL,
100 RP_PU_STATE_WAIT_INSTANT,
101 RP_PU_STATE_WAIT_INSTANT_ON_AIR,
102 };
103
104 /* LLCP Remote Procedure PHY Update FSM events */
105 enum {
106 /* Procedure run */
107 RP_PU_EVT_RUN,
108
109 /* Request received */
110 RP_PU_EVT_PHY_REQ,
111
112 /* Ack received */
113 RP_PU_EVT_ACK,
114
115 /* Indication received */
116 RP_PU_EVT_PHY_UPDATE_IND,
117
118 /* Ready to notify host */
119 RP_PU_EVT_NTF,
120 };
121
122 /* Hardcoded instant delta +6 */
123 #define PHY_UPDATE_INSTANT_DELTA 6
124
125 #if defined(CONFIG_BT_CENTRAL)
126 /* PHY preference order*/
127 #define PHY_PREF_1 PHY_2M
128 #define PHY_PREF_2 PHY_1M
129 #define PHY_PREF_3 PHY_CODED
130
pu_select_phy(uint8_t phys)131 static inline uint8_t pu_select_phy(uint8_t phys)
132 {
133 /* select only one phy, select preferred */
134 if (phys & PHY_PREF_1) {
135 return PHY_PREF_1;
136 } else if (phys & PHY_PREF_2) {
137 return PHY_PREF_2;
138 } else if (phys & PHY_PREF_3) {
139 return PHY_PREF_3;
140 } else {
141 return 0U;
142 }
143 }
144
pu_prep_update_ind(struct ll_conn * conn,struct proc_ctx * ctx)145 static void pu_prep_update_ind(struct ll_conn *conn, struct proc_ctx *ctx)
146 {
147 ctx->data.pu.tx = pu_select_phy(ctx->data.pu.tx);
148 ctx->data.pu.rx = pu_select_phy(ctx->data.pu.rx);
149
150 if (ctx->data.pu.tx != conn->lll.phy_tx) {
151 ctx->data.pu.c_to_p_phy = ctx->data.pu.tx;
152 } else {
153 ctx->data.pu.c_to_p_phy = 0U;
154 }
155 if (ctx->data.pu.rx != conn->lll.phy_rx) {
156 ctx->data.pu.p_to_c_phy = ctx->data.pu.rx;
157 } else {
158 ctx->data.pu.p_to_c_phy = 0U;
159 }
160 }
161 #endif /* CONFIG_BT_CENTRAL */
162
163 #if defined(CONFIG_BT_PERIPHERAL)
pu_select_phy_timing_restrict(struct ll_conn * conn,uint8_t phy_tx)164 static uint8_t pu_select_phy_timing_restrict(struct ll_conn *conn, uint8_t phy_tx)
165 {
166 /* select the probable PHY with longest Tx time, which
167 * will be restricted to fit current
168 * connEffectiveMaxTxTime.
169 */
170 /* Note - entry 0 in table is unused, so 0 on purpose */
171 uint8_t phy_tx_time[8] = { 0, PHY_1M, PHY_2M, PHY_1M,
172 PHY_CODED, PHY_CODED, PHY_CODED, PHY_CODED };
173 struct lll_conn *lll = &conn->lll;
174 const uint8_t phys = phy_tx | lll->phy_tx;
175
176 return phy_tx_time[phys];
177 }
178 #endif /* CONFIG_BT_PERIPHERAL */
179
pu_set_timing_restrict(struct ll_conn * conn,uint8_t phy_tx)180 static void pu_set_timing_restrict(struct ll_conn *conn, uint8_t phy_tx)
181 {
182 struct lll_conn *lll = &conn->lll;
183
184 lll->phy_tx_time = phy_tx;
185 }
186
pu_reset_timing_restrict(struct ll_conn * conn)187 static void pu_reset_timing_restrict(struct ll_conn *conn)
188 {
189 pu_set_timing_restrict(conn, conn->lll.phy_tx);
190 }
191
192 #if defined(CONFIG_BT_PERIPHERAL)
phy_validation_check_phy_ind(uint8_t phy)193 static inline bool phy_validation_check_phy_ind(uint8_t phy)
194 {
195 /* This is equivalent to:
196 * maximum one bit set, and no bit set is rfu's
197 */
198 return (phy < 5 && phy != 3);
199 }
200
pu_check_update_ind(struct ll_conn * conn,struct proc_ctx * ctx)201 static uint8_t pu_check_update_ind(struct ll_conn *conn, struct proc_ctx *ctx)
202 {
203 uint8_t ret = 0;
204
205 /* Check if either phy selected is invalid */
206 if (!phy_validation_check_phy_ind(ctx->data.pu.c_to_p_phy) ||
207 !phy_validation_check_phy_ind(ctx->data.pu.p_to_c_phy)) {
208 /* more than one or any rfu bit selected in either phy */
209 ctx->data.pu.error = BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
210 ret = 1;
211 }
212
213 /* Both tx and rx PHY unchanged */
214 if (!((ctx->data.pu.c_to_p_phy | ctx->data.pu.p_to_c_phy) & 0x07)) {
215 /* if no phy changes, quit procedure, and possibly signal host */
216 ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
217 ret = 1;
218 } else {
219 /* if instant already passed, quit procedure with error */
220 if (is_instant_reached_or_passed(ctx->data.pu.instant,
221 ull_conn_event_counter(conn))) {
222 ctx->data.pu.error = BT_HCI_ERR_INSTANT_PASSED;
223 ret = 1;
224 }
225 }
226 return ret;
227 }
228 #endif /* CONFIG_BT_PERIPHERAL */
229
pu_apply_phy_update(struct ll_conn * conn,struct proc_ctx * ctx)230 static uint8_t pu_apply_phy_update(struct ll_conn *conn, struct proc_ctx *ctx)
231 {
232 struct lll_conn *lll = &conn->lll;
233 uint8_t phy_bitmask = PHY_1M;
234 const uint8_t old_tx = lll->phy_tx;
235 const uint8_t old_rx = lll->phy_rx;
236
237 #if defined(CONFIG_BT_CTLR_PHY_2M)
238 phy_bitmask |= PHY_2M;
239 #endif
240 #if defined(CONFIG_BT_CTLR_PHY_CODED)
241 phy_bitmask |= PHY_CODED;
242 #endif
243 const uint8_t p_to_c_phy = ctx->data.pu.p_to_c_phy & phy_bitmask;
244 const uint8_t c_to_p_phy = ctx->data.pu.c_to_p_phy & phy_bitmask;
245
246 if (0) {
247 #if defined(CONFIG_BT_PERIPHERAL)
248 } else if (lll->role == BT_HCI_ROLE_PERIPHERAL) {
249 if (p_to_c_phy) {
250 lll->phy_tx = p_to_c_phy;
251 }
252 if (c_to_p_phy) {
253 lll->phy_rx = c_to_p_phy;
254 }
255 #endif /* CONFIG_BT_PERIPHERAL */
256 #if defined(CONFIG_BT_CENTRAL)
257 } else if (lll->role == BT_HCI_ROLE_CENTRAL) {
258 if (p_to_c_phy) {
259 lll->phy_rx = p_to_c_phy;
260 }
261 if (c_to_p_phy) {
262 lll->phy_tx = c_to_p_phy;
263 }
264 #endif /* CONFIG_BT_CENTRAL */
265 }
266
267 return ((old_tx != lll->phy_tx) || (old_rx != lll->phy_rx));
268 }
269
270 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
pu_calc_eff_time(uint8_t max_octets,uint8_t phy,uint16_t default_time)271 static uint16_t pu_calc_eff_time(uint8_t max_octets, uint8_t phy, uint16_t default_time)
272 {
273 uint16_t payload_time = PDU_DC_MAX_US(max_octets, phy);
274 uint16_t eff_time;
275
276 eff_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, payload_time);
277 eff_time = MIN(eff_time, default_time);
278 #if defined(CONFIG_BT_CTLR_PHY_CODED)
279 eff_time = MAX(eff_time, PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, phy));
280 #endif
281
282 return eff_time;
283 }
284
pu_update_eff_times(struct ll_conn * conn,struct proc_ctx * ctx)285 static uint8_t pu_update_eff_times(struct ll_conn *conn, struct proc_ctx *ctx)
286 {
287 struct lll_conn *lll = &conn->lll;
288 uint16_t eff_tx_time = lll->dle.eff.max_tx_time;
289 uint16_t eff_rx_time = lll->dle.eff.max_rx_time;
290 uint16_t max_rx_time, max_tx_time;
291
292 ull_dle_max_time_get(conn, &max_rx_time, &max_tx_time);
293
294 if ((ctx->data.pu.p_to_c_phy && (lll->role == BT_HCI_ROLE_PERIPHERAL)) ||
295 (ctx->data.pu.c_to_p_phy && (lll->role == BT_HCI_ROLE_CENTRAL))) {
296 eff_tx_time =
297 pu_calc_eff_time(lll->dle.eff.max_tx_octets, lll->phy_tx, max_tx_time);
298 }
299 if ((ctx->data.pu.p_to_c_phy && (lll->role == BT_HCI_ROLE_CENTRAL)) ||
300 (ctx->data.pu.c_to_p_phy && (lll->role == BT_HCI_ROLE_PERIPHERAL))) {
301 eff_rx_time =
302 pu_calc_eff_time(lll->dle.eff.max_rx_octets, lll->phy_rx, max_rx_time);
303 }
304
305 if ((eff_tx_time > lll->dle.eff.max_tx_time) ||
306 (lll->dle.eff.max_tx_time > max_tx_time) ||
307 (eff_rx_time > lll->dle.eff.max_rx_time) ||
308 (lll->dle.eff.max_rx_time > max_rx_time)) {
309 lll->dle.eff.max_tx_time = eff_tx_time;
310 lll->dle.eff.max_rx_time = eff_rx_time;
311 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
312 lll->evt_len_upd = 1U;
313 #endif /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
314 return 1U;
315 }
316
317 return 0U;
318 }
319 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
320
pu_set_preferred_phys(struct ll_conn * conn,struct proc_ctx * ctx)321 static inline void pu_set_preferred_phys(struct ll_conn *conn, struct proc_ctx *ctx)
322 {
323 conn->phy_pref_rx = ctx->data.pu.rx;
324 conn->phy_pref_tx = ctx->data.pu.tx;
325
326 /*
327 * Note: Since 'flags' indicate local coded phy preference (S2 or S8) and
328 * this is not negotiated with the peer, it is simply reconfigured in conn->lll when
329 * the update is initiated, and takes effect whenever the coded phy is in use.
330 */
331 conn->lll.phy_flags = ctx->data.pu.flags;
332 }
333
pu_combine_phys(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t tx,uint8_t rx)334 static inline void pu_combine_phys(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t tx,
335 uint8_t rx)
336 {
337 /* Combine requested phys with locally preferred phys */
338 ctx->data.pu.rx &= rx;
339 ctx->data.pu.tx &= tx;
340 /* If either tx or rx is 'no change' at this point we force both to no change to
341 * comply with the spec
342 * Spec. BT5.2 Vol6, Part B, section 5.1.10:
343 * The remainder of this section shall apply irrespective of which device initiated
344 * the procedure.
345 *
346 * Irrespective of the above rules, the central may leave both directions
347 * unchanged. If the periph specified a single PHY in both the TX_PHYS and
348 * RX_PHYS fields and both fields are the same, the central shall either select
349 * the PHY specified by the periph for both directions or shall leave both directions
350 * unchanged.
351 */
352 if (conn->lll.role == BT_HCI_ROLE_CENTRAL && (!ctx->data.pu.rx || !ctx->data.pu.tx)) {
353 ctx->data.pu.tx = 0;
354 ctx->data.pu.rx = 0;
355 }
356 }
357
358 #if defined(CONFIG_BT_CENTRAL)
pu_prepare_instant(struct ll_conn * conn,struct proc_ctx * ctx)359 static void pu_prepare_instant(struct ll_conn *conn, struct proc_ctx *ctx)
360 {
361 /* Set instance only in case there is actual PHY change. Otherwise the instant should be
362 * set to 0.
363 */
364 if (ctx->data.pu.c_to_p_phy != 0 || ctx->data.pu.p_to_c_phy != 0) {
365 ctx->data.pu.instant = ull_conn_event_counter(conn) + conn->lll.latency +
366 PHY_UPDATE_INSTANT_DELTA;
367 } else {
368 ctx->data.pu.instant = 0;
369 }
370 }
371 #endif /* CONFIG_BT_CENTRAL */
372
373 /*
374 * LLCP Local Procedure PHY Update FSM
375 */
376
lp_pu_tx(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)377 static void lp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
378 {
379 struct node_tx *tx;
380 struct pdu_data *pdu;
381
382 LL_ASSERT(ctx->node_ref.tx);
383
384 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
385 if (!((ctx->tx_opcode == PDU_DATA_LLCTRL_TYPE_PHY_REQ) &&
386 (conn->lll.role == BT_HCI_ROLE_CENTRAL))) {
387 if (!llcp_ntf_alloc_is_available()) {
388 /* No NTF nodes avail, so we need to hold off TX */
389 ctx->state = LP_PU_STATE_WAIT_NTF_AVAIL;
390 return;
391 }
392 ctx->data.pu.ntf_dle_node = llcp_ntf_alloc();
393 LL_ASSERT(ctx->data.pu.ntf_dle_node);
394 }
395 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
396
397 tx = ctx->node_ref.tx;
398 ctx->node_ref.tx = NULL;
399 ctx->node_ref.tx_ack = tx;
400 pdu = (struct pdu_data *)tx->pdu;
401
402 /* Encode LL Control PDU */
403 switch (ctx->tx_opcode) {
404 case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
405 pu_set_preferred_phys(conn, ctx);
406 llcp_pdu_encode_phy_req(ctx, pdu);
407 llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
408 ctx->state = LP_PU_STATE_WAIT_TX_ACK_PHY_REQ;
409 break;
410 #if defined(CONFIG_BT_CENTRAL)
411 case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
412 pu_prep_update_ind(conn, ctx);
413 pu_prepare_instant(conn, ctx);
414 llcp_pdu_encode_phy_update_ind(ctx, pdu);
415 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
416 ctx->state = LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND;
417 break;
418 #endif /* CONFIG_BT_CENTRAL */
419 default:
420 LL_ASSERT(0);
421 }
422
423 /* Enqueue LL Control PDU towards LLL */
424 llcp_tx_enqueue(conn, tx);
425
426 /* Restart procedure response timeout timer */
427 llcp_lr_prt_restart(conn);
428 }
429
pu_ntf(struct ll_conn * conn,struct proc_ctx * ctx)430 static void pu_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
431 {
432 struct node_rx_pdu *ntf;
433 struct node_rx_pu *pdu;
434
435 /* Piggy-back on stored RX node */
436 ntf = ctx->node_ref.rx;
437 ctx->node_ref.rx = NULL;
438 LL_ASSERT(ntf);
439
440 if (ctx->data.pu.ntf_pu) {
441 LL_ASSERT(ntf->hdr.type == NODE_RX_TYPE_RETAIN);
442 ntf->hdr.type = NODE_RX_TYPE_PHY_UPDATE;
443 ntf->hdr.handle = conn->lll.handle;
444 pdu = (struct node_rx_pu *)ntf->pdu;
445
446 pdu->status = ctx->data.pu.error;
447 pdu->rx = conn->lll.phy_rx;
448 pdu->tx = conn->lll.phy_tx;
449 } else {
450 ntf->hdr.type = NODE_RX_TYPE_RELEASE;
451 }
452
453 /* Enqueue notification towards LL */
454 ll_rx_put_sched(ntf->hdr.link, ntf);
455
456 ctx->data.pu.ntf_pu = 0;
457 }
458
459 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
pu_dle_ntf(struct ll_conn * conn,struct proc_ctx * ctx)460 static void pu_dle_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
461 {
462 struct node_rx_pdu *ntf;
463 struct pdu_data *pdu;
464
465 /* Retrieve DLE ntf node */
466 ntf = ctx->data.pu.ntf_dle_node;
467
468 if (!ctx->data.pu.ntf_dle) {
469 if (!ntf) {
470 /* If no DLE ntf was pre-allocated there is nothing more to do */
471 /* This will happen in case of a completion on UNKNOWN_RSP to PHY_REQ
472 * in Central case.
473 */
474 return;
475 }
476 /* Signal to release pre-allocated node in case there is no DLE ntf */
477 ntf->hdr.type = NODE_RX_TYPE_RELEASE;
478 } else {
479 LL_ASSERT(ntf);
480
481 ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
482 ntf->hdr.handle = conn->lll.handle;
483 pdu = (struct pdu_data *)ntf->pdu;
484
485 llcp_ntf_encode_length_change(conn, pdu);
486 }
487
488 /* Enqueue notification towards LL */
489 ll_rx_put_sched(ntf->hdr.link, ntf);
490
491 ctx->data.pu.ntf_dle = 0;
492 ctx->data.pu.ntf_dle_node = NULL;
493 }
494 #endif
495
lp_pu_complete_finalize(struct ll_conn * conn,struct proc_ctx * ctx)496 static void lp_pu_complete_finalize(struct ll_conn *conn, struct proc_ctx *ctx)
497 {
498 llcp_lr_complete(conn);
499 llcp_rr_set_paused_cmd(conn, PROC_NONE);
500 ctx->state = LP_PU_STATE_IDLE;
501 }
502
lp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)503 static void lp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
504 {
505 pu_ntf(conn, ctx);
506 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
507 pu_dle_ntf(conn, ctx);
508 #endif
509 lp_pu_complete_finalize(conn, ctx);
510 }
511
lp_pu_complete(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)512 static void lp_pu_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
513 {
514 pu_reset_timing_restrict(conn);
515
516 /* Postpone procedure completion (and possible NTF generation) to actual 'air instant'
517 * Since LLCP STM is driven from LLL prepare this actually happens BEFORE instant
518 * and thus NTFs are generated and propagated up prior to actual instant on air.
519 * Instead postpone completion/NTF to the beginning of RX handling
520 */
521 ctx->state = LP_PU_STATE_WAIT_INSTANT_ON_AIR;
522 }
523
lp_pu_send_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)524 static void lp_pu_send_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
525 {
526 if (llcp_lr_ispaused(conn) || llcp_rr_get_collision(conn) ||
527 !llcp_tx_alloc_peek(conn, ctx) ||
528 (llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE)) {
529 ctx->state = LP_PU_STATE_WAIT_TX_PHY_REQ;
530 } else {
531 llcp_rr_set_incompat(conn, INCOMPAT_RESOLVABLE);
532 llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
533 ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_REQ;
534
535 /* Allocate TX node */
536 ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
537 lp_pu_tx(conn, ctx, evt, param);
538 }
539 }
540
541 #if defined(CONFIG_BT_CENTRAL)
lp_pu_send_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)542 static void lp_pu_send_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
543 void *param)
544 {
545 if (llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
546 ctx->state = LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND;
547 } else {
548 ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
549
550 /* Allocate TX node */
551 ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
552 lp_pu_tx(conn, ctx, evt, param);
553 }
554 }
555 #endif /* CONFIG_BT_CENTRAL */
556
lp_pu_st_idle(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)557 static void lp_pu_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
558 {
559 switch (evt) {
560 case LP_PU_EVT_RUN:
561 lp_pu_send_phy_req(conn, ctx, evt, param);
562 break;
563 default:
564 /* Ignore other evts */
565 break;
566 }
567 }
568
lp_pu_st_wait_tx_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)569 static void lp_pu_st_wait_tx_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
570 void *param)
571 {
572 switch (evt) {
573 case LP_PU_EVT_RUN:
574 lp_pu_send_phy_req(conn, ctx, evt, param);
575 break;
576 default:
577 /* Ignore other evts */
578 break;
579 }
580 }
581
582 #if defined(CONFIG_BT_CENTRAL)
lp_pu_st_wait_rx_phy_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)583 static void lp_pu_st_wait_rx_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
584 void *param)
585 {
586 switch (evt) {
587 case LP_PU_EVT_PHY_RSP:
588 llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
589 /* 'Prefer' the phys from the REQ */
590 uint8_t tx_pref = ctx->data.pu.tx;
591 uint8_t rx_pref = ctx->data.pu.rx;
592
593 llcp_pdu_decode_phy_rsp(ctx, (struct pdu_data *)param);
594 /* Pause data tx */
595 llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
596 /* Combine with the 'Preferred' phys */
597 pu_combine_phys(conn, ctx, tx_pref, rx_pref);
598
599 /* Mark RX node to NOT release */
600 llcp_rx_node_retain(ctx);
601
602 lp_pu_send_phy_update_ind(conn, ctx, evt, param);
603 break;
604 case LP_PU_EVT_UNKNOWN:
605 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
606 /* Unsupported in peer, so disable locally for this connection
607 * Peer does not accept PHY UPDATE, so disable non 1M phys on current connection
608 */
609 feature_unmask_features(conn, LL_FEAT_BIT_PHY_2M | LL_FEAT_BIT_PHY_CODED);
610
611 /* Mark RX node to NOT release */
612 llcp_rx_node_retain(ctx);
613
614 ctx->data.pu.error = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
615 ctx->data.pu.ntf_pu = 1;
616 lp_pu_complete(conn, ctx, evt, param);
617 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
618 break;
619 default:
620 /* Ignore other evts */
621 break;
622 }
623 }
624 #endif /* CONFIG_BT_CENTRAL */
625
lp_pu_st_wait_tx_ack_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)626 static void lp_pu_st_wait_tx_ack_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
627 void *param)
628 {
629 switch (evt) {
630 case LP_PU_EVT_ACK:
631 switch (conn->lll.role) {
632 #if defined(CONFIG_BT_CENTRAL)
633 case BT_HCI_ROLE_CENTRAL:
634 ctx->state = LP_PU_STATE_WAIT_RX_PHY_RSP;
635 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_RSP;
636 break;
637 #endif /* CONFIG_BT_CENTRAL */
638 #if defined(CONFIG_BT_PERIPHERAL)
639 case BT_HCI_ROLE_PERIPHERAL:
640 /* If we act as peripheral apply timing restriction */
641 pu_set_timing_restrict(
642 conn, pu_select_phy_timing_restrict(conn, ctx->data.pu.tx));
643 ctx->state = LP_PU_STATE_WAIT_RX_PHY_UPDATE_IND;
644 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
645 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
646 break;
647 #endif /* CONFIG_BT_PERIPHERAL */
648 default:
649 /* Unknown role */
650 LL_ASSERT(0);
651 }
652
653 break;
654 default:
655 /* Ignore other evts */
656 break;
657 }
658 }
659
660 #if defined(CONFIG_BT_CENTRAL)
lp_pu_st_wait_tx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)661 static void lp_pu_st_wait_tx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
662 void *param)
663 {
664 switch (evt) {
665 case LP_PU_EVT_RUN:
666 lp_pu_send_phy_update_ind(conn, ctx, evt, param);
667 break;
668 default:
669 /* Ignore other evts */
670 break;
671 }
672 }
673
lp_pu_st_wait_tx_ack_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)674 static void lp_pu_st_wait_tx_ack_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx,
675 uint8_t evt, void *param)
676 {
677 switch (evt) {
678 case LP_PU_EVT_ACK:
679 LL_ASSERT(conn->lll.role == BT_HCI_ROLE_CENTRAL);
680 if (ctx->data.pu.p_to_c_phy || ctx->data.pu.c_to_p_phy) {
681 /* Either phys should change */
682 if (ctx->data.pu.c_to_p_phy) {
683 /* central to periph tx phy changes so, apply timing restriction */
684 pu_set_timing_restrict(conn, ctx->data.pu.c_to_p_phy);
685 }
686
687 /* Since at least one phy will change,
688 * stop the procedure response timeout
689 */
690 llcp_lr_prt_stop(conn);
691
692 /* Now we should wait for instant */
693 ctx->state = LP_PU_STATE_WAIT_INSTANT;
694 } else {
695 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
696 ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
697 ctx->data.pu.ntf_pu = ctx->data.pu.host_initiated;
698 lp_pu_complete(conn, ctx, evt, param);
699 }
700 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
701 break;
702 default:
703 /* Ignore other evts */
704 break;
705 }
706 }
707 #endif /* CONFIG_BT_CENTRAL */
708
709 #if defined(CONFIG_BT_PERIPHERAL)
lp_pu_st_wait_rx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)710 static void lp_pu_st_wait_rx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
711 void *param)
712 {
713 switch (evt) {
714 case LP_PU_EVT_PHY_UPDATE_IND:
715 LL_ASSERT(conn->lll.role == BT_HCI_ROLE_PERIPHERAL);
716 llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
717 llcp_pdu_decode_phy_update_ind(ctx, (struct pdu_data *)param);
718 const uint8_t end_procedure = pu_check_update_ind(conn, ctx);
719
720 /* Mark RX node to NOT release */
721 llcp_rx_node_retain(ctx);
722
723 if (!end_procedure) {
724 if (ctx->data.pu.p_to_c_phy) {
725 /* If periph to central phy changes apply tx timing restriction */
726 pu_set_timing_restrict(conn, ctx->data.pu.p_to_c_phy);
727 }
728
729 /* Since at least one phy will change,
730 * stop the procedure response timeout
731 */
732 llcp_lr_prt_stop(conn);
733
734 ctx->state = LP_PU_STATE_WAIT_INSTANT;
735 } else {
736 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
737 if (ctx->data.pu.error != BT_HCI_ERR_SUCCESS) {
738 /* Mark the connection for termination */
739 conn->llcp_terminate.reason_final = ctx->data.pu.error;
740 }
741 ctx->data.pu.ntf_pu = ctx->data.pu.host_initiated;
742 lp_pu_complete(conn, ctx, evt, param);
743 }
744 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
745 break;
746 case LP_PU_EVT_REJECT:
747 llcp_pdu_decode_reject_ext_ind(ctx, (struct pdu_data *)param);
748 ctx->data.pu.error = ctx->reject_ext_ind.error_code;
749 /* Fallthrough */
750 case LP_PU_EVT_UNKNOWN:
751 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
752 if (evt == LP_PU_EVT_UNKNOWN) {
753 feature_unmask_features(conn, LL_FEAT_BIT_PHY_2M | LL_FEAT_BIT_PHY_CODED);
754 ctx->data.pu.error = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
755 }
756 /* Mark RX node to NOT release */
757 llcp_rx_node_retain(ctx);
758
759 ctx->data.pu.ntf_pu = 1;
760 lp_pu_complete(conn, ctx, evt, param);
761 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
762 break;
763 default:
764 /* Ignore other evts */
765 break;
766 }
767 }
768 #endif /* CONFIG_BT_PERIPHERAL */
769
lp_pu_check_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)770 static void lp_pu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
771 void *param)
772 {
773 uint16_t event_counter = ull_conn_event_counter_at_prepare(conn);
774
775 if (is_instant_reached_or_passed(ctx->data.pu.instant, event_counter)) {
776 const uint8_t phy_changed = pu_apply_phy_update(conn, ctx);
777 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
778 if (phy_changed) {
779 ctx->data.pu.ntf_dle = pu_update_eff_times(conn, ctx);
780 }
781 #endif
782 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
783 ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
784 ctx->data.pu.ntf_pu = (phy_changed || ctx->data.pu.host_initiated);
785 lp_pu_complete(conn, ctx, evt, param);
786 }
787 }
788
lp_pu_st_wait_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)789 static void lp_pu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
790 void *param)
791 {
792 switch (evt) {
793 case LP_PU_EVT_RUN:
794 lp_pu_check_instant(conn, ctx, evt, param);
795 break;
796 default:
797 /* Ignore other evts */
798 break;
799 }
800 }
801
lp_pu_st_wait_instant_on_air(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)802 static void lp_pu_st_wait_instant_on_air(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
803 void *param)
804 {
805 switch (evt) {
806 case LP_PU_EVT_NTF:
807 lp_pu_tx_ntf(conn, ctx, evt, param);
808 break;
809 default:
810 /* Ignore other evts */
811 break;
812 }
813 }
814
815 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
lp_pu_st_wait_ntf_avail(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)816 static void lp_pu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
817 void *param)
818 {
819 switch (evt) {
820 case LP_PU_EVT_RUN:
821 lp_pu_tx(conn, ctx, evt, param);
822 break;
823 default:
824 /* Ignore other evts */
825 break;
826 }
827 }
828 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
829
lp_pu_execute_fsm(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)830 static void lp_pu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
831 {
832 switch (ctx->state) {
833 case LP_PU_STATE_IDLE:
834 lp_pu_st_idle(conn, ctx, evt, param);
835 break;
836 case LP_PU_STATE_WAIT_TX_PHY_REQ:
837 lp_pu_st_wait_tx_phy_req(conn, ctx, evt, param);
838 break;
839 case LP_PU_STATE_WAIT_TX_ACK_PHY_REQ:
840 lp_pu_st_wait_tx_ack_phy_req(conn, ctx, evt, param);
841 break;
842 #if defined(CONFIG_BT_CENTRAL)
843 case LP_PU_STATE_WAIT_RX_PHY_RSP:
844 lp_pu_st_wait_rx_phy_rsp(conn, ctx, evt, param);
845 break;
846 case LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND:
847 lp_pu_st_wait_tx_phy_update_ind(conn, ctx, evt, param);
848 break;
849 case LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND:
850 lp_pu_st_wait_tx_ack_phy_update_ind(conn, ctx, evt, param);
851 break;
852 #endif /* CONFIG_BT_CENTRAL */
853 #if defined(CONFIG_BT_PERIPHERAL)
854 case LP_PU_STATE_WAIT_RX_PHY_UPDATE_IND:
855 lp_pu_st_wait_rx_phy_update_ind(conn, ctx, evt, param);
856 break;
857 #endif /* CONFIG_BT_PERIPHERAL */
858 case LP_PU_STATE_WAIT_INSTANT:
859 lp_pu_st_wait_instant(conn, ctx, evt, param);
860 break;
861 case LP_PU_STATE_WAIT_INSTANT_ON_AIR:
862 lp_pu_st_wait_instant_on_air(conn, ctx, evt, param);
863 break;
864 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
865 case LP_PU_STATE_WAIT_NTF_AVAIL:
866 lp_pu_st_wait_ntf_avail(conn, ctx, evt, param);
867 break;
868 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
869 default:
870 /* Unknown state */
871 LL_ASSERT(0);
872 }
873 }
874
llcp_lp_pu_rx(struct ll_conn * conn,struct proc_ctx * ctx,struct node_rx_pdu * rx)875 void llcp_lp_pu_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
876 {
877 struct pdu_data *pdu = (struct pdu_data *)rx->pdu;
878
879 switch (pdu->llctrl.opcode) {
880 #if defined(CONFIG_BT_CENTRAL)
881 case PDU_DATA_LLCTRL_TYPE_PHY_RSP:
882 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_PHY_RSP, pdu);
883 break;
884 #endif /* CONFIG_BT_CENTRAL */
885 #if defined(CONFIG_BT_PERIPHERAL)
886 case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
887 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_PHY_UPDATE_IND, pdu);
888 break;
889 #endif /* CONFIG_BT_PERIPHERAL */
890 case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
891 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_UNKNOWN, pdu);
892 break;
893 case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
894 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_REJECT, pdu);
895 break;
896 default:
897 /* Invalid behaviour */
898 /* Invalid PDU received so terminate connection */
899 conn->llcp_terminate.reason_final = BT_HCI_ERR_LMP_PDU_NOT_ALLOWED;
900 llcp_lr_complete(conn);
901 ctx->state = LP_PU_STATE_IDLE;
902 break;
903 }
904 }
905
llcp_lp_pu_run(struct ll_conn * conn,struct proc_ctx * ctx,void * param)906 void llcp_lp_pu_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
907 {
908 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_RUN, param);
909 }
910
llcp_lp_pu_tx_ack(struct ll_conn * conn,struct proc_ctx * ctx,void * param)911 void llcp_lp_pu_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
912 {
913 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_ACK, param);
914 }
915
llcp_lp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx)916 void llcp_lp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
917 {
918 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_NTF, NULL);
919 }
920
llcp_lp_pu_awaiting_instant(struct proc_ctx * ctx)921 bool llcp_lp_pu_awaiting_instant(struct proc_ctx *ctx)
922 {
923 return (ctx->state == LP_PU_STATE_WAIT_INSTANT);
924 }
925
926 /*
927 * LLCP Remote Procedure PHY Update FSM
928 */
rp_pu_tx(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)929 static void rp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
930 {
931 struct node_tx *tx;
932 struct pdu_data *pdu;
933
934 LL_ASSERT(ctx->node_ref.tx);
935
936 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
937 if (!llcp_ntf_alloc_is_available()) {
938 /* No NTF nodes avail, so we need to hold off TX */
939 ctx->state = RP_PU_STATE_WAIT_NTF_AVAIL;
940 return;
941 }
942
943 ctx->data.pu.ntf_dle_node = llcp_ntf_alloc();
944 LL_ASSERT(ctx->data.pu.ntf_dle_node);
945 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
946
947 tx = ctx->node_ref.tx;
948 ctx->node_ref.tx = NULL;
949 pdu = (struct pdu_data *)tx->pdu;
950 ctx->node_ref.tx_ack = tx;
951
952 /* Encode LL Control PDU */
953 switch (ctx->tx_opcode) {
954 #if defined(CONFIG_BT_PERIPHERAL)
955 case PDU_DATA_LLCTRL_TYPE_PHY_RSP:
956 llcp_pdu_encode_phy_rsp(conn, pdu);
957 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
958 ctx->state = RP_PU_STATE_WAIT_TX_ACK_PHY_RSP;
959 break;
960 #endif /* CONFIG_BT_PERIPHERAL */
961 #if defined(CONFIG_BT_CENTRAL)
962 case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
963 pu_prep_update_ind(conn, ctx);
964 pu_prepare_instant(conn, ctx);
965 llcp_pdu_encode_phy_update_ind(ctx, pdu);
966 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
967 ctx->state = RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND;
968 break;
969 #endif /* CONFIG_BT_CENTRAL */
970 default:
971 LL_ASSERT(0);
972 }
973
974 /* Enqueue LL Control PDU towards LLL */
975 llcp_tx_enqueue(conn, tx);
976
977 /* Restart procedure response timeout timer */
978 llcp_rr_prt_restart(conn);
979 }
980
rp_pu_complete_finalize(struct ll_conn * conn,struct proc_ctx * ctx)981 static void rp_pu_complete_finalize(struct ll_conn *conn, struct proc_ctx *ctx)
982 {
983 llcp_rr_complete(conn);
984 llcp_rr_set_paused_cmd(conn, PROC_NONE);
985 ctx->state = RP_PU_STATE_IDLE;
986 }
987
rp_pu_complete(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)988 static void rp_pu_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
989 {
990 pu_reset_timing_restrict(conn);
991 /* Postpone procedure completion (and possible NTF generation) to actual 'air instant'
992 * Since LLCP STM is driven from LLL prepare this actually happens BEFORE instant
993 * and thus NTFs are generated and propagated up prior to actual instant on air.
994 * Instead postpone completion/NTF to the beginning of RX handling
995 */
996 ctx->state = RP_PU_STATE_WAIT_INSTANT_ON_AIR;
997 }
998
rp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)999 static void rp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1000 {
1001 pu_ntf(conn, ctx);
1002 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1003 pu_dle_ntf(conn, ctx);
1004 #endif
1005 rp_pu_complete_finalize(conn, ctx);
1006 }
1007
1008 #if defined(CONFIG_BT_CENTRAL)
rp_pu_send_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1009 static void rp_pu_send_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1010 void *param)
1011 {
1012 if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx) ||
1013 (llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE) ||
1014 !ull_is_lll_tx_queue_empty(conn)) {
1015 ctx->state = RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND;
1016 } else {
1017 llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
1018 ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
1019 ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
1020 rp_pu_tx(conn, ctx, evt, param);
1021
1022 }
1023 }
1024 #endif /* CONFIG_BT_CENTRAL */
1025
1026 #if defined(CONFIG_BT_PERIPHERAL)
rp_pu_send_phy_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1027 static void rp_pu_send_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1028 {
1029 if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx) ||
1030 (llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE)) {
1031 ctx->state = RP_PU_STATE_WAIT_TX_PHY_RSP;
1032 } else {
1033 llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
1034 ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_RSP;
1035 ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
1036 rp_pu_tx(conn, ctx, evt, param);
1037 }
1038 }
1039 #endif /* CONFIG_BT_CENTRAL */
1040
rp_pu_st_idle(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1041 static void rp_pu_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1042 {
1043 switch (evt) {
1044 case RP_PU_EVT_RUN:
1045 ctx->state = RP_PU_STATE_WAIT_RX_PHY_REQ;
1046 break;
1047 default:
1048 /* Ignore other evts */
1049 break;
1050 }
1051 }
1052
rp_pu_st_wait_rx_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1053 static void rp_pu_st_wait_rx_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1054 void *param)
1055 {
1056 llcp_pdu_decode_phy_req(ctx, (struct pdu_data *)param);
1057 /* Combine with the 'Preferred' the phys in conn->phy_pref_?x */
1058 pu_combine_phys(conn, ctx, conn->phy_pref_tx, conn->phy_pref_rx);
1059 llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
1060
1061 switch (evt) {
1062 case RP_PU_EVT_PHY_REQ:
1063 switch (conn->lll.role) {
1064 #if defined(CONFIG_BT_CENTRAL)
1065 case BT_HCI_ROLE_CENTRAL:
1066 /* Mark RX node to NOT release */
1067 llcp_rx_node_retain(ctx);
1068 rp_pu_send_phy_update_ind(conn, ctx, evt, param);
1069 break;
1070 #endif /* CONFIG_BT_CENTRAL */
1071 #if defined(CONFIG_BT_PERIPHERAL)
1072 case BT_HCI_ROLE_PERIPHERAL:
1073 rp_pu_send_phy_rsp(conn, ctx, evt, param);
1074 break;
1075 #endif /* CONFIG_BT_PERIPHERAL */
1076 default:
1077 /* Unknown role */
1078 LL_ASSERT(0);
1079 }
1080 break;
1081 default:
1082 /* Ignore other evts */
1083 break;
1084 }
1085 }
1086
1087 #if defined(CONFIG_BT_PERIPHERAL)
rp_pu_st_wait_tx_phy_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1088 static void rp_pu_st_wait_tx_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1089 void *param)
1090 {
1091 switch (evt) {
1092 case RP_PU_EVT_RUN:
1093 rp_pu_send_phy_rsp(conn, ctx, evt, param);
1094 break;
1095 default:
1096 /* Ignore other evts */
1097 break;
1098 }
1099 }
1100 #endif /* CONFIG_BT_PERIPHERAL */
1101
rp_pu_st_wait_tx_ack_phy(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1102 static void rp_pu_st_wait_tx_ack_phy(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1103 void *param)
1104 {
1105 switch (evt) {
1106 case RP_PU_EVT_ACK:
1107 if (0) {
1108 #if defined(CONFIG_BT_PERIPHERAL)
1109 } else if (ctx->state == RP_PU_STATE_WAIT_TX_ACK_PHY_RSP) {
1110 LL_ASSERT(conn->lll.role == BT_HCI_ROLE_PERIPHERAL);
1111 /* When we act as peripheral apply timing restriction */
1112 pu_set_timing_restrict(
1113 conn, pu_select_phy_timing_restrict(conn, ctx->data.pu.tx));
1114 /* RSP acked, now await update ind from central */
1115 ctx->state = RP_PU_STATE_WAIT_RX_PHY_UPDATE_IND;
1116 #endif /* CONFIG_BT_PERIPHERAL */
1117 #if defined(CONFIG_BT_CENTRAL)
1118 } else if (ctx->state == RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND) {
1119 LL_ASSERT(conn->lll.role == BT_HCI_ROLE_CENTRAL);
1120 if (ctx->data.pu.c_to_p_phy || ctx->data.pu.p_to_c_phy) {
1121 /* UPDATE_IND acked, so lets await instant */
1122 if (ctx->data.pu.c_to_p_phy) {
1123 /*
1124 * And if central to periph phys changes
1125 * apply timining restrictions
1126 */
1127 pu_set_timing_restrict(conn, ctx->data.pu.c_to_p_phy);
1128 }
1129 ctx->state = RP_PU_STATE_WAIT_INSTANT;
1130 } else {
1131 rp_pu_complete(conn, ctx, evt, param);
1132 }
1133 #endif /* CONFIG_BT_CENTRAL */
1134 } else {
1135 /* empty clause */
1136 }
1137 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
1138 break;
1139 default:
1140 /* Ignore other evts */
1141 break;
1142 }
1143 }
1144
1145 #if defined(CONFIG_BT_CENTRAL)
rp_pu_st_wait_tx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1146 static void rp_pu_st_wait_tx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1147 void *param)
1148 {
1149 switch (evt) {
1150 case RP_PU_EVT_RUN:
1151 rp_pu_send_phy_update_ind(conn, ctx, evt, param);
1152 break;
1153 default:
1154 /* Ignore other evts */
1155 break;
1156 }
1157 }
1158 #endif /* CONFIG_BT_CENTRAL */
1159
1160 #if defined(CONFIG_BT_PERIPHERAL)
rp_pu_st_wait_rx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1161 static void rp_pu_st_wait_rx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1162 void *param)
1163 {
1164 switch (evt) {
1165 case RP_PU_EVT_PHY_UPDATE_IND:
1166 llcp_pdu_decode_phy_update_ind(ctx, (struct pdu_data *)param);
1167 const uint8_t end_procedure = pu_check_update_ind(conn, ctx);
1168
1169 /* Mark RX node to NOT release */
1170 llcp_rx_node_retain(ctx);
1171
1172 if (!end_procedure) {
1173 /* Since at least one phy will change,
1174 * stop the procedure response timeout
1175 */
1176 llcp_rr_prt_stop(conn);
1177 ctx->state = RP_PU_STATE_WAIT_INSTANT;
1178 } else {
1179 if (ctx->data.pu.error == BT_HCI_ERR_INSTANT_PASSED) {
1180 /* Mark the connection for termination */
1181 conn->llcp_terminate.reason_final = BT_HCI_ERR_INSTANT_PASSED;
1182 }
1183 rp_pu_complete(conn, ctx, evt, param);
1184 }
1185 break;
1186 default:
1187 /* Ignore other evts */
1188 break;
1189 }
1190 }
1191 #endif /* CONFIG_BT_PERIPHERAL */
1192
rp_pu_check_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1193 static void rp_pu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1194 void *param)
1195 {
1196 uint16_t event_counter = ull_conn_event_counter_at_prepare(conn);
1197
1198 if (is_instant_reached_or_passed(ctx->data.pu.instant, event_counter)) {
1199 ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
1200 const uint8_t phy_changed = pu_apply_phy_update(conn, ctx);
1201 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1202 if (phy_changed) {
1203 ctx->data.pu.ntf_dle = pu_update_eff_times(conn, ctx);
1204 }
1205 #endif
1206 /* if PHY settings changed we should generate NTF */
1207 ctx->data.pu.ntf_pu = phy_changed;
1208 rp_pu_complete(conn, ctx, evt, param);
1209 }
1210 }
1211
rp_pu_st_wait_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1212 static void rp_pu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1213 void *param)
1214 {
1215 switch (evt) {
1216 case RP_PU_EVT_RUN:
1217 rp_pu_check_instant(conn, ctx, evt, param);
1218 break;
1219 default:
1220 /* Ignore other evts */
1221 break;
1222 }
1223 }
1224
rp_pu_st_wait_instant_on_air(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1225 static void rp_pu_st_wait_instant_on_air(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1226 void *param)
1227 {
1228 switch (evt) {
1229 case RP_PU_EVT_NTF:
1230 rp_pu_tx_ntf(conn, ctx, evt, param);
1231 break;
1232 default:
1233 /* Ignore other evts */
1234 break;
1235 }
1236 }
1237
1238 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
rp_pu_st_wait_ntf_avail(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1239 static void rp_pu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1240 void *param)
1241 {
1242 switch (evt) {
1243 case RP_PU_EVT_RUN:
1244 rp_pu_tx(conn, ctx, evt, param);
1245 break;
1246 default:
1247 /* Ignore other evts */
1248 break;
1249 }
1250 }
1251 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1252
rp_pu_execute_fsm(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1253 static void rp_pu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1254 {
1255 switch (ctx->state) {
1256 case RP_PU_STATE_IDLE:
1257 rp_pu_st_idle(conn, ctx, evt, param);
1258 break;
1259 case RP_PU_STATE_WAIT_RX_PHY_REQ:
1260 rp_pu_st_wait_rx_phy_req(conn, ctx, evt, param);
1261 break;
1262 #if defined(CONFIG_BT_PERIPHERAL)
1263 case RP_PU_STATE_WAIT_TX_PHY_RSP:
1264 rp_pu_st_wait_tx_phy_rsp(conn, ctx, evt, param);
1265 break;
1266 case RP_PU_STATE_WAIT_TX_ACK_PHY_RSP:
1267 rp_pu_st_wait_tx_ack_phy(conn, ctx, evt, param);
1268 break;
1269 case RP_PU_STATE_WAIT_RX_PHY_UPDATE_IND:
1270 rp_pu_st_wait_rx_phy_update_ind(conn, ctx, evt, param);
1271 break;
1272 #endif /* CONFIG_BT_PERIPHERAL */
1273 #if defined(CONFIG_BT_CENTRAL)
1274 case RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND:
1275 rp_pu_st_wait_tx_phy_update_ind(conn, ctx, evt, param);
1276 break;
1277 case RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND:
1278 rp_pu_st_wait_tx_ack_phy(conn, ctx, evt, param);
1279 break;
1280 #endif /* CONFIG_BT_CENTRAL */
1281 case RP_PU_STATE_WAIT_INSTANT:
1282 rp_pu_st_wait_instant(conn, ctx, evt, param);
1283 break;
1284 case RP_PU_STATE_WAIT_INSTANT_ON_AIR:
1285 rp_pu_st_wait_instant_on_air(conn, ctx, evt, param);
1286 break;
1287 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1288 case RP_PU_STATE_WAIT_NTF_AVAIL:
1289 rp_pu_st_wait_ntf_avail(conn, ctx, evt, param);
1290 break;
1291 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1292 default:
1293 /* Unknown state */
1294 LL_ASSERT(0);
1295 }
1296 }
1297
llcp_rp_pu_rx(struct ll_conn * conn,struct proc_ctx * ctx,struct node_rx_pdu * rx)1298 void llcp_rp_pu_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
1299 {
1300 struct pdu_data *pdu = (struct pdu_data *)rx->pdu;
1301
1302 switch (pdu->llctrl.opcode) {
1303 case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
1304 rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_PHY_REQ, pdu);
1305 break;
1306 #if defined(CONFIG_BT_PERIPHERAL)
1307 case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
1308 rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_PHY_UPDATE_IND, pdu);
1309 break;
1310 #endif /* CONFIG_BT_PERIPHERAL */
1311 default:
1312 /* Invalid behaviour */
1313 /* Invalid PDU received so terminate connection */
1314 conn->llcp_terminate.reason_final = BT_HCI_ERR_LMP_PDU_NOT_ALLOWED;
1315 llcp_rr_complete(conn);
1316 ctx->state = RP_PU_STATE_IDLE;
1317 break;
1318 }
1319 }
1320
llcp_rp_pu_run(struct ll_conn * conn,struct proc_ctx * ctx,void * param)1321 void llcp_rp_pu_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
1322 {
1323 rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_RUN, param);
1324 }
1325
llcp_rp_pu_tx_ack(struct ll_conn * conn,struct proc_ctx * ctx,void * param)1326 void llcp_rp_pu_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
1327 {
1328 rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_ACK, param);
1329 }
1330
llcp_rp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx)1331 void llcp_rp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
1332 {
1333 rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_NTF, NULL);
1334 }
1335
llcp_rp_pu_awaiting_instant(struct proc_ctx * ctx)1336 bool llcp_rp_pu_awaiting_instant(struct proc_ctx *ctx)
1337 {
1338 return (ctx->state == RP_PU_STATE_WAIT_INSTANT);
1339 }
1340