1 /*
2  * Copyright (c) 2024 STMicroelectronics
3  * Copyright (c) 2016 Nordic Semiconductor ASA
4  * Copyright (c) 2015-2016 Intel Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <errno.h>
10 #include <stddef.h>
11 #include <stdio.h>
12 #include <string.h>
13 
14 #include <zephyr/kernel.h>
15 #include <zephyr/arch/cpu.h>
16 #include <zephyr/sys/byteorder.h>
17 #include <zephyr/logging/log.h>
18 #include <zephyr/sys/util.h>
19 #include <zephyr/device.h>
20 #include <zephyr/init.h>
21 #include <zephyr/drivers/uart.h>
22 #include <zephyr/drivers/bluetooth/hci_driver_bluenrg.h>
23 #include <zephyr/bluetooth/hci_types.h>
24 #include <zephyr/usb/usb_device.h>
25 #include <zephyr/net_buf.h>
26 #include <zephyr/bluetooth/bluetooth.h>
27 #include <zephyr/bluetooth/l2cap.h>
28 #include <zephyr/bluetooth/hci.h>
29 #include <zephyr/bluetooth/buf.h>
30 #include <zephyr/bluetooth/hci_raw.h>
31 #include <version.h>
32 
33 #define LOG_MODULE_NAME gui_hci_uart
34 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
35 
36 static const struct device *const hci_uart_dev =
37 	DEVICE_DT_GET(DT_CHOSEN(zephyr_bt_c2h_uart));
38 static K_THREAD_STACK_DEFINE(tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
39 static struct k_thread tx_thread_data;
40 static K_FIFO_DEFINE(tx_queue);
41 
42 /* RX in terms of bluetooth communication */
43 static K_FIFO_DEFINE(uart_tx_queue);
44 
45 #define H4_ST_EXT_CMD	0x81
46 #define H4_ST_VND_CMD	0xFF
47 
48 #define ST_IDLE		0 /* Waiting for packet type. */
49 #define ST_HDR		1 /* Receiving packet header. */
50 #define ST_PAYLOAD	2 /* Receiving packet payload. */
51 #define ST_DISCARD	3 /* Dropping packet. */
52 
53 /* Length of a discard/flush buffer.
54  * This is sized to align with a Bluetooth HCI packet:
55  * 1 byte H:4 header + 32 bytes ACL/event data
56  * Bigger values might overflow the stack since this is declared as a local
57  * variable, smaller ones will force the caller to call into discard more
58  * often.
59  */
60 #define H4_DISCARD_LEN 33
61 
62 #define RESP_VENDOR_CODE_OFFSET	1
63 #define RESP_LEN_OFFSET_LSB	2
64 #define RESP_LEN_OFFSET_MSB	3
65 #define RESP_CMDCODE_OFFSET	4
66 #define RESP_STATUS_OFFSET	5
67 #define RESP_PARAM_OFFSET	6
68 
69 /* Types of vendor codes */
70 #define VENDOR_CODE_ERROR	0
71 #define VENDOR_CODE_RESPONSE	1
72 
73 /* Commands */
74 #define VENDOR_CMD_READ_VERSION		0x01
75 #define VENDOR_CMD_BLUENRG_RESET	0x04
76 #define VENDOR_CMD_HW_BOOTLOADER	0x05
77 
78 struct bt_hci_ext_cmd_hdr {
79 	uint16_t opcode;
80 	uint16_t param_len;
81 } __packed;
82 
83 struct bt_vendor_cmd_hdr {
84 	uint8_t opcode;
85 	uint16_t param_len;
86 } __packed;
87 
88 struct bt_vendor_rsp_hdr {
89 	uint8_t vendor_code;
90 	uint16_t param_len;
91 	uint8_t opcode;
92 	uint8_t status;
93 	uint8_t params[2];
94 } __packed;
95 
96 static int h4_send(struct net_buf *buf);
97 
parse_cmd(uint8_t * hci_buffer,uint16_t hci_pckt_len,uint8_t * buffer_out)98 static uint16_t parse_cmd(uint8_t *hci_buffer, uint16_t hci_pckt_len, uint8_t *buffer_out)
99 {
100 	uint16_t len = 0;
101 	struct bt_vendor_cmd_hdr *hdr = (struct bt_vendor_cmd_hdr *) hci_buffer;
102 	struct bt_vendor_rsp_hdr *rsp = (struct bt_vendor_rsp_hdr *) (buffer_out + 1);
103 
104 	buffer_out[0] = H4_ST_VND_CMD;
105 	rsp->vendor_code = VENDOR_CODE_RESPONSE;
106 	rsp->opcode = hdr->opcode;
107 	rsp->status = 0;
108 
109 	switch (hdr->opcode) {
110 	case VENDOR_CMD_READ_VERSION:
111 		rsp->params[0] = KERNEL_VERSION_MAJOR;
112 		if (KERNEL_PATCHLEVEL >= 9) {
113 			rsp->params[1] = (KERNEL_VERSION_MINOR * 10) + 9;
114 		} else {
115 			rsp->params[1] = (KERNEL_VERSION_MINOR * 10) + KERNEL_PATCHLEVEL;
116 		}
117 		len = 2;
118 		break;
119 #if DT_HAS_COMPAT_STATUS_OKAY(st_hci_spi_v1) || DT_HAS_COMPAT_STATUS_OKAY(st_hci_spi_v2)
120 	case VENDOR_CMD_BLUENRG_RESET:
121 		bluenrg_bt_reset(0);
122 		break;
123 	case VENDOR_CMD_HW_BOOTLOADER:
124 		bluenrg_bt_reset(1);
125 		break;
126 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_hci_spi_v1) || DT_HAS_COMPAT_STATUS_OKAY(st_hci_spi_v2) */
127 	default:
128 		rsp->vendor_code = VENDOR_CODE_ERROR;
129 		rsp->status = BT_HCI_ERR_UNKNOWN_CMD;
130 	}
131 
132 	len += 2; /* Status and Command code */
133 	rsp->param_len = sys_cpu_to_le16(len);
134 	len += RESP_CMDCODE_OFFSET;
135 
136 	return len;
137 }
138 
send_evt(uint8_t * response,uint8_t len)139 static int send_evt(uint8_t *response, uint8_t len)
140 {
141 	struct net_buf *buf;
142 
143 	buf = bt_buf_get_rx(BT_BUF_EVT, K_NO_WAIT);
144 
145 	if (!buf) {
146 		LOG_ERR("EVT no buffer");
147 		return -ENOMEM;
148 	}
149 	if (len > net_buf_tailroom(buf)) {
150 		LOG_ERR("EVT too long: %d", len);
151 		net_buf_unref(buf);
152 		return -ENOMEM;
153 	}
154 	net_buf_add_mem(buf, response, len);
155 
156 	return h4_send(buf);
157 }
158 
h4_read(const struct device * uart,uint8_t * buf,size_t len)159 static int h4_read(const struct device *uart, uint8_t *buf, size_t len)
160 {
161 	int rx = uart_fifo_read(uart, buf, len);
162 
163 	LOG_DBG("read %d req %d", rx, len);
164 	return rx;
165 }
166 
valid_type(uint8_t type)167 static bool valid_type(uint8_t type)
168 {
169 	return (type == BT_HCI_H4_CMD) | (type == H4_ST_EXT_CMD) |
170 		(type == BT_HCI_H4_ACL) | (type == BT_HCI_H4_ISO) | (type == H4_ST_VND_CMD);
171 }
172 
173 /* Function expects that type is validated and only CMD, ISO or ACL are used. */
get_len(const uint8_t * hdr_buf,uint8_t type)174 static uint32_t get_len(const uint8_t *hdr_buf, uint8_t type)
175 {
176 	switch (type) {
177 	case BT_HCI_H4_CMD:
178 		return ((const struct bt_hci_cmd_hdr *)hdr_buf)->param_len;
179 	case H4_ST_EXT_CMD:
180 		return ((const struct bt_hci_ext_cmd_hdr *)hdr_buf)->param_len;
181 	case H4_ST_VND_CMD:
182 		return ((const struct bt_vendor_cmd_hdr *)hdr_buf)->param_len;
183 	case BT_HCI_H4_ISO:
184 		return bt_iso_hdr_len(
185 			sys_le16_to_cpu(((const struct bt_hci_iso_hdr *)hdr_buf)->len));
186 	case BT_HCI_H4_ACL:
187 		return sys_le16_to_cpu(((const struct bt_hci_acl_hdr *)hdr_buf)->len);
188 	default:
189 		LOG_ERR("Invalid type: %u", type);
190 		return 0;
191 	}
192 }
193 
194 /* Function expects that type is validated and only CMD, ISO or ACL are used. */
hdr_len(uint8_t type)195 static int hdr_len(uint8_t type)
196 {
197 	switch (type) {
198 	case BT_HCI_H4_CMD:
199 		return sizeof(struct bt_hci_cmd_hdr);
200 	case H4_ST_EXT_CMD:
201 		return sizeof(struct bt_hci_ext_cmd_hdr);
202 	case H4_ST_VND_CMD:
203 		return sizeof(struct bt_vendor_cmd_hdr);
204 	case BT_HCI_H4_ISO:
205 		return sizeof(struct bt_hci_iso_hdr);
206 	case BT_HCI_H4_ACL:
207 		return sizeof(struct bt_hci_acl_hdr);
208 	default:
209 		LOG_ERR("Invalid type: %u", type);
210 		return 0;
211 	}
212 }
213 
alloc_tx_buf(uint8_t type)214 static struct net_buf *alloc_tx_buf(uint8_t type)
215 {
216 	enum bt_buf_type alloc_type;
217 
218 	switch (type) {
219 	case H4_ST_EXT_CMD:
220 	case BT_HCI_H4_CMD:
221 	case H4_ST_VND_CMD:
222 		alloc_type = BT_BUF_CMD;
223 		break;
224 	case BT_HCI_H4_ISO:
225 		alloc_type = BT_BUF_ISO_OUT;
226 		break;
227 	case BT_HCI_H4_ACL:
228 		alloc_type = BT_BUF_ACL_OUT;
229 		break;
230 	default:
231 		LOG_ERR("Invalid type: %u", type);
232 		return NULL;
233 	}
234 
235 	return bt_buf_get_tx(alloc_type, K_NO_WAIT, NULL, 0);
236 }
237 
rx_isr(void)238 static void rx_isr(void)
239 {
240 	static struct net_buf *buf;
241 	static int remaining;
242 	static uint8_t state;
243 	static uint8_t type;
244 	static uint8_t hdr_buf[MAX(sizeof(struct bt_hci_cmd_hdr), sizeof(struct bt_hci_acl_hdr))];
245 	int read;
246 
247 	do {
248 		switch (state) {
249 		case ST_IDLE:
250 			/* Get packet type */
251 			read = h4_read(hci_uart_dev, &type, sizeof(type));
252 			/* since we read in loop until no data is in the fifo,
253 			 * it is possible that read = 0.
254 			 */
255 			if (read) {
256 				if (valid_type(type)) {
257 					/* Get expected header size and switch
258 					 * to receiving header.
259 					 */
260 					remaining = hdr_len(type);
261 					state = ST_HDR;
262 				} else {
263 					LOG_WRN("Unknown header %d", type);
264 				}
265 			}
266 			break;
267 		case ST_HDR:
268 			read = h4_read(hci_uart_dev, &hdr_buf[hdr_len(type) - remaining],
269 				remaining);
270 			remaining -= read;
271 			if (remaining == 0) {
272 				/* Header received. Allocate buffer and get
273 				 * payload length. If allocation fails leave
274 				 * interrupt. On failed allocation state machine
275 				 * is reset.
276 				 */
277 				uint8_t header_length;
278 
279 				buf = alloc_tx_buf(type);
280 				if (!buf) {
281 					LOG_ERR("No available command buffers!");
282 					state = ST_IDLE;
283 					return;
284 				}
285 
286 				remaining = get_len(hdr_buf, type);
287 				header_length = hdr_len(type);
288 				if (type == H4_ST_EXT_CMD) {
289 					/* Convert to regular HCI_CMD */
290 					if (remaining > 255) {
291 						LOG_ERR("len > 255");
292 						net_buf_unref(buf);
293 						state = ST_DISCARD;
294 					} else {
295 						header_length--;
296 					}
297 				}
298 				net_buf_add_mem(buf, hdr_buf, header_length);
299 				if (remaining > net_buf_tailroom(buf)) {
300 					LOG_ERR("Not enough space in buffer");
301 					net_buf_unref(buf);
302 					state = ST_DISCARD;
303 				} else {
304 					state = ST_PAYLOAD;
305 				}
306 
307 			}
308 			break;
309 		case ST_PAYLOAD:
310 			read = h4_read(hci_uart_dev, net_buf_tail(buf), remaining);
311 			buf->len += read;
312 			remaining -= read;
313 			if (remaining == 0) {
314 				/* Packet received */
315 				LOG_DBG("putting RX packet in queue.");
316 				k_fifo_put(&tx_queue, buf);
317 				state = ST_IDLE;
318 			}
319 			break;
320 		case ST_DISCARD:
321 			uint8_t discard[H4_DISCARD_LEN];
322 			size_t to_read = MIN(remaining, sizeof(discard));
323 
324 			read = h4_read(hci_uart_dev, discard, to_read);
325 			remaining -= read;
326 			if (remaining == 0) {
327 				state = ST_IDLE;
328 			}
329 			break;
330 		default:
331 			read = 0;
332 			__ASSERT_NO_MSG(0);
333 			break;
334 
335 		}
336 	} while (read);
337 }
338 
tx_isr(void)339 static void tx_isr(void)
340 {
341 	static struct net_buf *buf;
342 	int len;
343 
344 	if (!buf) {
345 		buf = k_fifo_get(&uart_tx_queue, K_NO_WAIT);
346 		if (!buf) {
347 			uart_irq_tx_disable(hci_uart_dev);
348 			return;
349 		}
350 	}
351 	len = uart_fifo_fill(hci_uart_dev, buf->data, buf->len);
352 	net_buf_pull(buf, len);
353 	if (!buf->len) {
354 		net_buf_unref(buf);
355 		buf = NULL;
356 	}
357 }
358 
bt_uart_isr(const struct device * unused,void * user_data)359 static void bt_uart_isr(const struct device *unused, void *user_data)
360 {
361 	ARG_UNUSED(unused);
362 	ARG_UNUSED(user_data);
363 
364 	if (!(uart_irq_rx_ready(hci_uart_dev) || uart_irq_tx_ready(hci_uart_dev))) {
365 		LOG_DBG("spurious interrupt");
366 	}
367 	if (uart_irq_tx_ready(hci_uart_dev)) {
368 		tx_isr();
369 	}
370 	if (uart_irq_rx_ready(hci_uart_dev)) {
371 		rx_isr();
372 	}
373 }
374 
tx_thread(void * p1,void * p2,void * p3)375 static void tx_thread(void *p1, void *p2, void *p3)
376 {
377 	enum bt_buf_type buf_type;
378 
379 	while (1) {
380 		struct net_buf *buf;
381 		int err = 0;
382 		uint8_t len;
383 		uint8_t response[16];
384 
385 		/* Wait until a buffer is available */
386 		buf = k_fifo_get(&tx_queue, K_FOREVER);
387 		buf_type = buf->data[0];
388 		if (buf_type == H4_ST_VND_CMD) {
389 			net_buf_pull(buf, 1);
390 			len = parse_cmd(buf->data, buf->len, response);
391 			err =  send_evt(response, len);
392 			if (!err) {
393 				net_buf_unref(buf);
394 			}
395 		} else {
396 			/* Pass buffer to the stack */
397 			err = bt_send(buf);
398 		}
399 		if (err) {
400 			LOG_ERR("Unable to send (err %d)", err);
401 			net_buf_unref(buf);
402 		}
403 
404 		/* Give other threads a chance to run if tx_queue keeps getting
405 		 * new data all the time.
406 		 */
407 		k_yield();
408 	}
409 }
410 
h4_send(struct net_buf * buf)411 static int h4_send(struct net_buf *buf)
412 {
413 	LOG_DBG("buf %p type %u len %u", buf, buf->data[0], buf->len);
414 	k_fifo_put(&uart_tx_queue, buf);
415 	uart_irq_tx_enable(hci_uart_dev);
416 	return 0;
417 }
418 
hci_uart_init(void)419 static int hci_uart_init(void)
420 {
421 	LOG_DBG("");
422 	if (!device_is_ready(hci_uart_dev)) {
423 		LOG_ERR("HCI UART %s is not ready", hci_uart_dev->name);
424 		return -EINVAL;
425 	}
426 	uart_irq_rx_disable(hci_uart_dev);
427 	uart_irq_tx_disable(hci_uart_dev);
428 	uart_irq_callback_set(hci_uart_dev, bt_uart_isr);
429 	uart_irq_rx_enable(hci_uart_dev);
430 	return 0;
431 }
432 
main(void)433 int main(void)
434 {
435 	/* incoming events and data from the controller */
436 	static K_FIFO_DEFINE(rx_queue);
437 	int err;
438 
439 	LOG_DBG("Start");
440 	__ASSERT(hci_uart_dev, "UART device is NULL");
441 
442 	/* Enable the raw interface, this will in turn open the HCI driver */
443 	bt_enable_raw(&rx_queue);
444 	/* Spawn the TX thread and start feeding commands and data to the controller */
445 	k_thread_create(&tx_thread_data, tx_thread_stack,
446 			K_THREAD_STACK_SIZEOF(tx_thread_stack), tx_thread,
447 			NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT);
448 	k_thread_name_set(&tx_thread_data, "HCI uart TX");
449 
450 	while (1) {
451 		struct net_buf *buf;
452 
453 		buf = k_fifo_get(&rx_queue, K_FOREVER);
454 		err = h4_send(buf);
455 		if (err) {
456 			LOG_ERR("Failed to send");
457 		}
458 	}
459 	return 0;
460 }
461 
462 SYS_INIT(hci_uart_init, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
463