1 /*
2 * Copyright (c) 2024 Xiaomi Corporation
3 * Copyright (c) 2015-2016 Intel Corporation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <errno.h>
9 #include <stddef.h>
10 #include <stdio.h>
11 #include <string.h>
12
13 #include <zephyr/kernel.h>
14 #include <zephyr/arch/cpu.h>
15 #include <zephyr/sys/byteorder.h>
16 #include <zephyr/logging/log.h>
17 #include <zephyr/sys/util.h>
18
19 #include <zephyr/device.h>
20 #include <zephyr/init.h>
21 #include <zephyr/drivers/uart.h>
22
23 #include <zephyr/net_buf.h>
24 #include <zephyr/bluetooth/bluetooth.h>
25 #include <zephyr/bluetooth/l2cap.h>
26 #include <zephyr/bluetooth/hci.h>
27 #include <zephyr/bluetooth/buf.h>
28 #include <zephyr/bluetooth/hci_raw.h>
29
30 #define LOG_MODULE_NAME hci_uart_3wire
31 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
32
33 static K_KERNEL_STACK_DEFINE(tx_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
34 static K_KERNEL_STACK_DEFINE(rx_stack, CONFIG_BT_RX_STACK_SIZE);
35
36 static struct k_thread tx_thread_data;
37 static struct k_thread rx_thread_data;
38
39 static struct k_work_delayable ack_work;
40 static struct k_work_delayable retx_work;
41
42 #define HCI_3WIRE_ACK_PKT 0x00
43 #define HCI_COMMAND_PKT 0x01
44 #define HCI_ACLDATA_PKT 0x02
45 #define HCI_SCODATA_PKT 0x03
46 #define HCI_EVENT_PKT 0x04
47 #define HCI_ISODATA_PKT 0x05
48 #define HCI_3WIRE_LINK_PKT 0x0f
49 #define HCI_VENDOR_PKT 0xff
50
reliable_packet(uint8_t type)51 static bool reliable_packet(uint8_t type)
52 {
53 switch (type) {
54 case HCI_COMMAND_PKT:
55 case HCI_ACLDATA_PKT:
56 case HCI_EVENT_PKT:
57 case HCI_ISODATA_PKT:
58 return true;
59 default:
60 return false;
61 }
62 }
63
64 /* FIXME: Correct timeout */
65 #define H5_RX_ACK_TIMEOUT K_MSEC(250)
66 #define H5_TX_ACK_TIMEOUT K_MSEC(250)
67
68 #define SLIP_DELIMITER 0xc0
69 #define SLIP_ESC 0xdb
70 #define SLIP_ESC_DELIM 0xdc
71 #define SLIP_ESC_ESC 0xdd
72
73 #define H5_RX_ESC 1
74 #define H5_TX_ACK_PEND 2
75
76 #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
77 #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
78 #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
79 #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
80 #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
81 #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
82
83 #define H5_SET_SEQ(hdr, seq) ((hdr)[0] |= (seq))
84 #define H5_SET_ACK(hdr, ack) ((hdr)[0] |= (ack) << 3)
85 #define H5_SET_RELIABLE(hdr) ((hdr)[0] |= 1 << 7)
86 #define H5_SET_TYPE(hdr, type) ((hdr)[1] |= type)
87 #define H5_SET_LEN(hdr, len) (((hdr)[1] |= ((len) & 0x0f) << 4), \
88 ((hdr)[2] |= (len) >> 4))
89
90 #define H5_TX_WIN 4
91
92 static struct h5 {
93 struct net_buf *rx_buf;
94
95 struct k_fifo tx_queue;
96 struct k_fifo rx_queue;
97 struct k_fifo unack_queue;
98
99 uint8_t tx_win;
100 uint8_t tx_ack;
101 uint8_t tx_seq;
102
103 uint8_t rx_ack;
104
105 enum {
106 UNINIT,
107 INIT,
108 ACTIVE,
109 } link_state;
110
111 enum {
112 START,
113 HEADER,
114 PAYLOAD,
115 END,
116 } rx_state;
117 } h5;
118
119 static uint8_t unack_queue_len;
120
121 static const uint8_t sync_req[] = { 0x01, 0x7e };
122 static const uint8_t sync_rsp[] = { 0x02, 0x7d };
123 /* Third byte may change */
124 static const uint8_t conf_req[] = { 0x03, 0xfc };
125 static uint8_t conf_rsp[3] = { 0x04, 0x7b,};
126
127 /* H5 signal buffers pool */
128 #define MAX_SIG_LEN 3
129 #define SIGNAL_COUNT 2
130 #define SIG_BUF_SIZE (BT_BUF_RESERVE + MAX_SIG_LEN)
131 NET_BUF_POOL_DEFINE(h5_pool, SIGNAL_COUNT, SIG_BUF_SIZE, 0, NULL);
132
133 static const struct device *const h5_dev =
134 DEVICE_DT_GET(DT_CHOSEN(zephyr_bt_c2h_uart));
135
136 static K_FIFO_DEFINE(tx_queue);
137
138 static struct k_poll_signal tx_queue_change =
139 K_POLL_SIGNAL_INITIALIZER(tx_queue_change);
140
h5_reset_rx(void)141 static void h5_reset_rx(void)
142 {
143 if (h5.rx_buf) {
144 net_buf_unref(h5.rx_buf);
145 h5.rx_buf = NULL;
146 }
147
148 h5.rx_state = START;
149 }
150
h5_unslip_byte(uint8_t * byte)151 static int h5_unslip_byte(uint8_t *byte)
152 {
153 int count;
154
155 if (*byte != SLIP_ESC) {
156 return 0;
157 }
158
159 do {
160 count = uart_fifo_read(h5_dev, byte, sizeof(*byte));
161 } while (!count);
162
163 switch (*byte) {
164 case SLIP_ESC_DELIM:
165 *byte = SLIP_DELIMITER;
166 break;
167 case SLIP_ESC_ESC:
168 *byte = SLIP_ESC;
169 break;
170 default:
171 LOG_ERR("Invalid escape byte %x\n", *byte);
172 return -EIO;
173 }
174
175 return 0;
176 }
177
process_unack(void)178 static void process_unack(void)
179 {
180 uint8_t next_seq = h5.tx_seq;
181 uint8_t number_removed = unack_queue_len;
182 bool acked = false;
183
184 if (!unack_queue_len) {
185 return;
186 }
187
188 LOG_DBG("rx_ack %u tx_ack %u tx_seq %u unack_queue_len %u", h5.rx_ack, h5.tx_ack, h5.tx_seq,
189 unack_queue_len);
190
191 while (unack_queue_len > 0) {
192 if (next_seq == h5.rx_ack) {
193 /* Next sequence number is the same as last received
194 * ack number
195 */
196 break;
197 }
198
199 number_removed--;
200 /* Similar to (n - 1) % 8 with unsigned conversion */
201 next_seq = (next_seq - 1) & 0x07;
202 }
203
204 if (next_seq != h5.rx_ack) {
205 LOG_ERR("Wrong sequence: rx_ack %u tx_seq %u next_seq %u", h5.rx_ack, h5.tx_seq,
206 next_seq);
207 }
208
209 LOG_DBG("Need to remove %u packet from the queue", number_removed);
210
211 while (number_removed) {
212 struct net_buf *buf = k_fifo_get(&h5.unack_queue, K_NO_WAIT);
213
214 if (!buf) {
215 LOG_ERR("Unack queue is empty");
216 break;
217 }
218
219 /* TODO: print or do something with packet */
220 LOG_DBG("Remove buf from the unack_queue");
221
222 net_buf_unref(buf);
223 unack_queue_len--;
224 number_removed--;
225
226 acked = true;
227 }
228
229 if (acked) {
230 k_poll_signal_raise(&tx_queue_change, 0);
231 }
232 }
233
h5_print_header(const uint8_t * hdr,const char * str)234 static void h5_print_header(const uint8_t *hdr, const char *str)
235 {
236 if (H5_HDR_RELIABLE(hdr)) {
237 LOG_DBG("%s REL: seq %u ack %u crc %u type %u len %u", str, H5_HDR_SEQ(hdr),
238 H5_HDR_ACK(hdr), H5_HDR_CRC(hdr), H5_HDR_PKT_TYPE(hdr), H5_HDR_LEN(hdr));
239 } else {
240 LOG_DBG("%s UNREL: ack %u crc %u type %u len %u", str, H5_HDR_ACK(hdr),
241 H5_HDR_CRC(hdr), H5_HDR_PKT_TYPE(hdr), H5_HDR_LEN(hdr));
242 }
243 }
244
hexdump(const char * str,const uint8_t * packet,size_t length)245 static void hexdump(const char *str, const uint8_t *packet, size_t length)
246 {
247 int n = 0;
248
249 if (!length) {
250 printk("%s zero-length signal packet\n", str);
251 return;
252 }
253
254 while (length--) {
255 if (n % 16 == 0) {
256 printk("%s %08X ", str, n);
257 }
258
259 printk("%02X ", *packet++);
260
261 n++;
262 if (n % 8 == 0) {
263 if (n % 16 == 0) {
264 printk("\n");
265 } else {
266 printk(" ");
267 }
268 }
269 }
270
271 if (n % 16) {
272 printk("\n");
273 }
274 }
275
h5_slip_byte(uint8_t byte)276 static uint8_t h5_slip_byte(uint8_t byte)
277 {
278 switch (byte) {
279 case SLIP_DELIMITER:
280 uart_poll_out(h5_dev, SLIP_ESC);
281 uart_poll_out(h5_dev, SLIP_ESC_DELIM);
282 return 2;
283 case SLIP_ESC:
284 uart_poll_out(h5_dev, SLIP_ESC);
285 uart_poll_out(h5_dev, SLIP_ESC_ESC);
286 return 2;
287 default:
288 uart_poll_out(h5_dev, byte);
289 return 1;
290 }
291 }
292
h5_send(const uint8_t * payload,uint8_t type,int len)293 static void h5_send(const uint8_t *payload, uint8_t type, int len)
294 {
295 uint8_t hdr[4];
296 int i;
297
298 hexdump("<= ", payload, len);
299
300 (void)memset(hdr, 0, sizeof(hdr));
301
302 /* Set ACK for outgoing packet and stop delayed work */
303 H5_SET_ACK(hdr, h5.tx_ack);
304 /* If cancel fails we may ack the same seq number twice, this is OK. */
305 (void)k_work_cancel_delayable(&ack_work);
306
307 if (reliable_packet(type)) {
308 H5_SET_RELIABLE(hdr);
309 H5_SET_SEQ(hdr, h5.tx_seq);
310 h5.tx_seq = (h5.tx_seq + 1) % 8;
311 }
312
313 H5_SET_TYPE(hdr, type);
314 H5_SET_LEN(hdr, len);
315
316 /* Calculate CRC */
317 hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
318
319 h5_print_header(hdr, "TX: <");
320
321 uart_poll_out(h5_dev, SLIP_DELIMITER);
322
323 for (i = 0; i < 4; i++) {
324 h5_slip_byte(hdr[i]);
325 }
326
327 for (i = 0; i < len; i++) {
328 h5_slip_byte(payload[i]);
329 }
330
331 uart_poll_out(h5_dev, SLIP_DELIMITER);
332 }
333
334 /* Delayed work taking care about retransmitting packets */
retx_timeout(struct k_work * work)335 static void retx_timeout(struct k_work *work)
336 {
337 ARG_UNUSED(work);
338
339 LOG_DBG("unack_queue_len %u", unack_queue_len);
340
341 if (unack_queue_len) {
342 struct k_fifo tmp_queue;
343 struct net_buf *buf;
344
345 k_fifo_init(&tmp_queue);
346
347 /* Queue to temporary queue */
348 while ((buf = k_fifo_get(&h5.tx_queue, K_NO_WAIT))) {
349 k_fifo_put(&tmp_queue, buf);
350 }
351
352 /* Queue unack packets to the beginning of the queue */
353 while ((buf = k_fifo_get(&h5.unack_queue, K_NO_WAIT))) {
354 /* include also packet type */
355 net_buf_push(buf, sizeof(uint8_t));
356 k_fifo_put(&h5.tx_queue, buf);
357 h5.tx_seq = (h5.tx_seq - 1) & 0x07;
358 unack_queue_len--;
359 }
360
361 /* Queue saved packets from temp queue */
362 while ((buf = k_fifo_get(&tmp_queue, K_NO_WAIT))) {
363 k_fifo_put(&h5.tx_queue, buf);
364 }
365 }
366
367 k_poll_signal_raise(&tx_queue_change, 0);
368 }
369
ack_timeout(struct k_work * work)370 static void ack_timeout(struct k_work *work)
371 {
372 ARG_UNUSED(work);
373
374 LOG_DBG("");
375
376 h5_send(NULL, HCI_3WIRE_ACK_PKT, 0);
377 }
378
h5_process_complete_packet(uint8_t * hdr)379 static void h5_process_complete_packet(uint8_t *hdr)
380 {
381 struct net_buf *buf;
382
383 LOG_DBG("");
384
385 /* rx_ack should be in every packet */
386 h5.rx_ack = H5_HDR_ACK(hdr);
387
388 if (reliable_packet(H5_HDR_PKT_TYPE(hdr))) {
389 /* For reliable packet increment next transmit ack number */
390 h5.tx_ack = (h5.tx_ack + 1) % 8;
391 /* Submit delayed work to ack the packet */
392 k_work_reschedule(&ack_work, H5_RX_ACK_TIMEOUT);
393 }
394
395 h5_print_header(hdr, "RX: >");
396
397 process_unack();
398
399 buf = h5.rx_buf;
400 h5.rx_buf = NULL;
401
402 switch (H5_HDR_PKT_TYPE(hdr)) {
403 case HCI_3WIRE_ACK_PKT:
404 net_buf_unref(buf);
405 break;
406 case HCI_3WIRE_LINK_PKT:
407 k_fifo_put(&h5.rx_queue, buf);
408 break;
409 case HCI_COMMAND_PKT:
410 case HCI_ACLDATA_PKT:
411 case HCI_ISODATA_PKT:
412 hexdump("=> ", buf->data, buf->len);
413 k_fifo_put(&tx_queue, buf);
414 break;
415 }
416 }
417
bt_uart_isr(const struct device * unused,void * user_data)418 static void bt_uart_isr(const struct device *unused, void *user_data)
419 {
420 static int remaining;
421 uint8_t byte, type;
422 int ret;
423 static uint8_t hdr[4];
424 size_t buf_tailroom;
425
426 ARG_UNUSED(unused);
427 ARG_UNUSED(user_data);
428
429 while (uart_irq_update(h5_dev) &&
430 uart_irq_is_pending(h5_dev)) {
431
432 if (!uart_irq_rx_ready(h5_dev)) {
433 if (uart_irq_tx_ready(h5_dev)) {
434 LOG_DBG("transmit ready");
435 } else {
436 LOG_DBG("spurious interrupt");
437 }
438 /* Only the UART RX path is interrupt-enabled */
439 break;
440 }
441
442 ret = uart_fifo_read(h5_dev, &byte, sizeof(byte));
443 if (!ret) {
444 continue;
445 }
446
447 switch (h5.rx_state) {
448 case START:
449 if (byte == SLIP_DELIMITER) {
450 h5.rx_state = HEADER;
451 remaining = sizeof(hdr);
452 }
453 break;
454 case HEADER:
455 /* In a case we confuse ending slip delimiter
456 * with starting one.
457 */
458 if (byte == SLIP_DELIMITER) {
459 remaining = sizeof(hdr);
460 continue;
461 }
462
463 if (h5_unslip_byte(&byte) < 0) {
464 h5_reset_rx();
465 continue;
466 }
467
468 memcpy(&hdr[sizeof(hdr) - remaining], &byte, 1);
469 remaining--;
470
471 if (remaining) {
472 break;
473 }
474
475 remaining = H5_HDR_LEN(hdr);
476 type = H5_HDR_PKT_TYPE(hdr);
477
478 switch (type) {
479 case HCI_COMMAND_PKT:
480 case HCI_ACLDATA_PKT:
481 case HCI_ISODATA_PKT:
482 h5.rx_buf = bt_buf_get_tx(bt_buf_type_from_h4(type, BT_BUF_OUT),
483 K_NO_WAIT, NULL, 0);
484 if (!h5.rx_buf) {
485 LOG_WRN("No available data buffers");
486 h5_reset_rx();
487 continue;
488 }
489
490 h5.rx_state = PAYLOAD;
491 break;
492 case HCI_3WIRE_LINK_PKT:
493 case HCI_3WIRE_ACK_PKT:
494 h5.rx_buf = net_buf_alloc(&h5_pool, K_NO_WAIT);
495 if (!h5.rx_buf) {
496 LOG_WRN("No available signal buffers");
497 h5_reset_rx();
498 continue;
499 }
500
501 h5.rx_state = PAYLOAD;
502 break;
503 default:
504 LOG_ERR("Wrong packet type %u", type);
505 h5.rx_state = END;
506 break;
507 }
508 if (!remaining) {
509 h5.rx_state = END;
510 }
511 break;
512 case PAYLOAD:
513 if (byte == SLIP_DELIMITER) {
514 LOG_WRN("Unexpected ending delimiter");
515 h5_reset_rx();
516 continue;
517 }
518
519 if (h5_unslip_byte(&byte) < 0) {
520 h5_reset_rx();
521 continue;
522 }
523
524 buf_tailroom = net_buf_tailroom(h5.rx_buf);
525 if (buf_tailroom < sizeof(byte)) {
526 LOG_ERR("Not enough space in buffer %zu/%zu", sizeof(byte),
527 buf_tailroom);
528 h5_reset_rx();
529 break;
530 }
531
532 net_buf_add_mem(h5.rx_buf, &byte, sizeof(byte));
533 remaining--;
534 if (!remaining) {
535 h5.rx_state = END;
536 }
537 break;
538 case END:
539 if (byte != SLIP_DELIMITER) {
540 LOG_ERR("Missing ending SLIP_DELIMITER");
541 h5_reset_rx();
542 break;
543 }
544
545 LOG_DBG("Received full packet: type %u", H5_HDR_PKT_TYPE(hdr));
546
547 /* Check when full packet is received, it can be done
548 * when parsing packet header but we need to receive
549 * full packet anyway to clear UART.
550 */
551 if (H5_HDR_RELIABLE(hdr) &&
552 H5_HDR_SEQ(hdr) != h5.tx_ack) {
553 LOG_ERR("Seq expected %u got %u. Drop packet", h5.tx_ack,
554 H5_HDR_SEQ(hdr));
555 h5_reset_rx();
556 break;
557 }
558
559 h5_process_complete_packet(hdr);
560 h5.rx_state = START;
561 break;
562 }
563 }
564 }
565
h5_queue(struct net_buf * buf)566 static int h5_queue(struct net_buf *buf)
567 {
568 LOG_DBG("buf %p type %u len %u", buf, buf->data[0], buf->len);
569
570 k_fifo_put(&h5.tx_queue, buf);
571
572 return 0;
573 }
574
h5_get_type(struct net_buf * buf)575 static uint8_t h5_get_type(struct net_buf *buf)
576 {
577 return net_buf_pull_u8(buf);
578 }
579
process_events(struct k_poll_event * ev,int count)580 static void process_events(struct k_poll_event *ev, int count)
581 {
582 struct net_buf *buf;
583 uint8_t type;
584 int err;
585
586 LOG_DBG("count %d", count);
587
588 for (; count; ev++, count--) {
589 LOG_DBG("ev->state %u", ev->state);
590
591 switch (ev->state) {
592 case K_POLL_STATE_SIGNALED:
593 break;
594 case K_POLL_STATE_SEM_AVAILABLE:
595 /* After this fn is exec'd, `bt_conn_prepare_events()`
596 * will be called once again, and this time buffers will
597 * be available, so the FIFO will be added to the poll
598 * list instead of the ctlr buffers semaphore.
599 */
600 break;
601 case K_POLL_STATE_FIFO_DATA_AVAILABLE:
602 if (ev->tag == 0) {
603 /* Wait until a buffer is available */
604 buf = k_fifo_get(&tx_queue, K_NO_WAIT);
605 __ASSERT_NO_MSG(buf);
606
607 /* Pass buffer to the stack */
608 err = bt_send(buf);
609 if (err) {
610 LOG_ERR("Unable to send (err %d)", err);
611 net_buf_unref(buf);
612 }
613 } else if (ev->tag == 2) {
614 buf = k_fifo_get(&h5.tx_queue, K_FOREVER);
615 __ASSERT_NO_MSG(buf);
616
617 type = h5_get_type(buf);
618 h5_send(buf->data, type, buf->len);
619
620 /* buf is dequeued from tx_queue and queued to unack
621 * queue.
622 */
623 k_fifo_put(&h5.unack_queue, buf);
624 unack_queue_len++;
625
626 k_work_reschedule(&retx_work, H5_TX_ACK_TIMEOUT);
627 }
628 break;
629 case K_POLL_STATE_NOT_READY:
630 break;
631 default:
632 LOG_WRN("Unexpected k_poll event state %u", ev->state);
633 break;
634 }
635 }
636 }
637
tx_thread(void * p1,void * p2,void * p3)638 static void tx_thread(void *p1, void *p2, void *p3)
639 {
640 static struct k_poll_event events[] = {
641 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
642 K_POLL_MODE_NOTIFY_ONLY,
643 &tx_queue, 0),
644 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_SIGNAL,
645 K_POLL_MODE_NOTIFY_ONLY,
646 &tx_queue_change, 1),
647 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
648 K_POLL_MODE_NOTIFY_ONLY,
649 &h5.tx_queue, 2),
650 };
651
652 ARG_UNUSED(p1);
653 ARG_UNUSED(p2);
654 ARG_UNUSED(p3);
655
656 LOG_DBG("");
657
658 while (true) {
659 int err, ev_count = 2;
660
661 events[0].state = K_POLL_STATE_NOT_READY;
662 events[1].state = K_POLL_STATE_NOT_READY;
663 tx_queue_change.signaled = 0U;
664
665 if (h5.link_state == ACTIVE && unack_queue_len < h5.tx_win) {
666 events[2].state = K_POLL_STATE_NOT_READY;
667 ev_count++;
668 }
669
670 err = k_poll(events, ev_count, K_FOREVER);
671 process_events(events, ev_count);
672
673 /* Make sure we don't hog the CPU if there's all the time
674 * some ready events.
675 */
676 k_yield();
677 }
678 }
679
rx_thread(void * p1,void * p2,void * p3)680 static void rx_thread(void *p1, void *p2, void *p3)
681 {
682 ARG_UNUSED(p1);
683 ARG_UNUSED(p2);
684 ARG_UNUSED(p3);
685
686 LOG_DBG("");
687
688 while (true) {
689 struct net_buf *buf, *cache;
690
691 buf = k_fifo_get(&h5.rx_queue, K_FOREVER);
692
693 hexdump("=> ", buf->data, buf->len);
694
695 if (!memcmp(buf->data, sync_req, sizeof(sync_req))) {
696 if (h5.link_state == ACTIVE) {
697 while ((cache = k_fifo_get(&h5.unack_queue, K_NO_WAIT))) {
698 net_buf_unref(cache);
699 }
700
701 unack_queue_len = 0;
702
703 while ((cache = k_fifo_get(&h5.tx_queue, K_NO_WAIT))) {
704 net_buf_unref(cache);
705 }
706
707 h5_reset_rx();
708
709 h5.rx_ack = 0;
710 h5.link_state = INIT;
711 h5.tx_ack = 0;
712 h5.tx_seq = 0;
713 }
714
715 h5_send(sync_rsp, HCI_3WIRE_LINK_PKT, sizeof(sync_rsp));
716 } else if (!memcmp(buf->data, conf_req, 2)) {
717 if (buf->len > 2) {
718 uint8_t tx_win = buf->data[2] & 0x07;
719
720 /* Configuration field present */
721 h5.tx_win = MIN(h5.tx_win, tx_win);
722 }
723
724 conf_rsp[2] = h5.tx_win;
725
726 /*
727 * The Host sends Config Response messages with a
728 * Configuration Field.
729 */
730 h5_send(conf_rsp, HCI_3WIRE_LINK_PKT, sizeof(conf_rsp));
731
732 LOG_DBG("Finished H5 configuration, tx_win %u", h5.tx_win);
733
734 h5.link_state = ACTIVE;
735 } else {
736 LOG_ERR("Not handled yet %x %x", buf->data[0], buf->data[1]);
737 }
738
739 net_buf_unref(buf);
740
741 /* Make sure we don't hog the CPU if the rx_queue never
742 * gets empty.
743 */
744 k_yield();
745 }
746 }
747
hci_uart_init(void)748 static int hci_uart_init(void)
749 {
750 LOG_DBG("");
751
752 if (!device_is_ready(h5_dev)) {
753 LOG_ERR("HCI UART %s is not ready", h5_dev->name);
754 return -EINVAL;
755 }
756
757 uart_irq_rx_disable(h5_dev);
758 uart_irq_tx_disable(h5_dev);
759
760 uart_irq_callback_set(h5_dev, bt_uart_isr);
761
762 uart_irq_rx_enable(h5_dev);
763
764 return 0;
765 }
766
767 SYS_INIT(hci_uart_init, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
768
main(void)769 int main(void)
770 {
771 /* incoming events and data from the controller */
772 static K_FIFO_DEFINE(rx_queue);
773 int err;
774
775 LOG_DBG("Start");
776 __ASSERT(h5_dev, "UART device is NULL");
777
778 /* Enable the raw interface, this will in turn open the HCI driver */
779 bt_enable_raw(&rx_queue);
780
781 /* TX thread */
782 k_fifo_init(&h5.tx_queue);
783 k_thread_create(&tx_thread_data, tx_stack,
784 K_KERNEL_STACK_SIZEOF(tx_stack),
785 tx_thread, NULL, NULL, NULL,
786 K_PRIO_COOP(CONFIG_BT_HCI_TX_PRIO),
787 0, K_NO_WAIT);
788 k_thread_name_set(&tx_thread_data, "tx_thread");
789
790 k_fifo_init(&h5.rx_queue);
791 k_thread_create(&rx_thread_data, rx_stack,
792 K_KERNEL_STACK_SIZEOF(rx_stack),
793 rx_thread, NULL, NULL, NULL,
794 K_PRIO_COOP(CONFIG_BT_RX_PRIO),
795 0, K_NO_WAIT);
796 k_thread_name_set(&rx_thread_data, "rx_thread");
797
798 /* Unack queue */
799 k_fifo_init(&h5.unack_queue);
800
801 /* Init delayed work */
802 k_work_init_delayable(&ack_work, ack_timeout);
803 k_work_init_delayable(&retx_work, retx_timeout);
804
805 h5.tx_win = H5_TX_WIN;
806
807 while (1) {
808 struct net_buf *buf;
809
810 buf = k_fifo_get(&rx_queue, K_FOREVER);
811 err = h5_queue(buf);
812 if (err) {
813 LOG_ERR("Failed to send");
814 }
815 }
816
817 return 0;
818 }
819