1 /*
2  * Copyright (c) 2019-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdio.h>
10 #include <string.h>
11 
12 #include <zephyr/device.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/sys/byteorder.h>
15 #include <zephyr/sys/util.h>
16 
17 #include <zephyr/ipc/ipc_service.h>
18 
19 #include <zephyr/net_buf.h>
20 #include <zephyr/bluetooth/bluetooth.h>
21 #include <zephyr/bluetooth/l2cap.h>
22 #include <zephyr/bluetooth/hci.h>
23 #include <zephyr/bluetooth/buf.h>
24 #include <zephyr/bluetooth/hci_raw.h>
25 #include <zephyr/bluetooth/hci_vs.h>
26 
27 #include <zephyr/logging/log_ctrl.h>
28 #include <zephyr/logging/log.h>
29 
30 LOG_MODULE_REGISTER(hci_ipc, CONFIG_BT_LOG_LEVEL);
31 
32 BUILD_ASSERT(!IS_ENABLED(CONFIG_BT_CONN) || IS_ENABLED(CONFIG_BT_HCI_ACL_FLOW_CONTROL),
33 	     "HCI IPC driver can drop ACL data without Controller-to-Host ACL flow control");
34 
35 static struct ipc_ept hci_ept;
36 
37 static K_THREAD_STACK_DEFINE(tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
38 static struct k_thread tx_thread_data;
39 static K_FIFO_DEFINE(tx_queue);
40 static K_SEM_DEFINE(ipc_bound_sem, 0, 1);
41 #if defined(CONFIG_BT_CTLR_ASSERT_HANDLER) || defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
42 /* A flag used to store information if the IPC endpoint has already been bound. The end point can't
43  * be used before that happens.
44  */
45 static bool ipc_ept_ready;
46 #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER || CONFIG_BT_HCI_VS_FATAL_ERROR */
47 
48 #define HCI_IPC_CMD 0x01
49 #define HCI_IPC_ACL 0x02
50 #define HCI_IPC_SCO 0x03
51 #define HCI_IPC_EVT 0x04
52 #define HCI_IPC_ISO 0x05
53 
54 #define HCI_FATAL_ERR_MSG true
55 #define HCI_REGULAR_MSG false
56 
hci_ipc_cmd_recv(uint8_t * data,size_t remaining)57 static struct net_buf *hci_ipc_cmd_recv(uint8_t *data, size_t remaining)
58 {
59 	struct bt_hci_cmd_hdr *hdr = (void *)data;
60 	struct net_buf *buf;
61 
62 	if (remaining < sizeof(*hdr)) {
63 		LOG_ERR("Not enough data for command header");
64 		return NULL;
65 	}
66 
67 	buf = bt_buf_get_tx(BT_BUF_CMD, K_NO_WAIT, hdr, sizeof(*hdr));
68 	if (buf) {
69 		data += sizeof(*hdr);
70 		remaining -= sizeof(*hdr);
71 	} else {
72 		LOG_ERR("No available command buffers!");
73 		return NULL;
74 	}
75 
76 	if (remaining != hdr->param_len) {
77 		LOG_ERR("Command payload length is not correct");
78 		net_buf_unref(buf);
79 		return NULL;
80 	}
81 
82 	if (remaining > net_buf_tailroom(buf)) {
83 		LOG_ERR("Not enough space in buffer");
84 		net_buf_unref(buf);
85 		return NULL;
86 	}
87 
88 	LOG_DBG("len %u", hdr->param_len);
89 	net_buf_add_mem(buf, data, remaining);
90 
91 	return buf;
92 }
93 
hci_ipc_acl_recv(uint8_t * data,size_t remaining)94 static struct net_buf *hci_ipc_acl_recv(uint8_t *data, size_t remaining)
95 {
96 	struct bt_hci_acl_hdr *hdr = (void *)data;
97 	struct net_buf *buf;
98 
99 	if (remaining < sizeof(*hdr)) {
100 		LOG_ERR("Not enough data for ACL header");
101 		return NULL;
102 	}
103 
104 	buf = bt_buf_get_tx(BT_BUF_ACL_OUT, K_NO_WAIT, hdr, sizeof(*hdr));
105 	if (buf) {
106 		data += sizeof(*hdr);
107 		remaining -= sizeof(*hdr);
108 	} else {
109 		LOG_ERR("No available ACL buffers!");
110 		return NULL;
111 	}
112 
113 	if (remaining != sys_le16_to_cpu(hdr->len)) {
114 		LOG_ERR("ACL payload length is not correct");
115 		net_buf_unref(buf);
116 		return NULL;
117 	}
118 
119 	if (remaining > net_buf_tailroom(buf)) {
120 		LOG_ERR("Not enough space in buffer");
121 		net_buf_unref(buf);
122 		return NULL;
123 	}
124 
125 	LOG_DBG("len %u", remaining);
126 	net_buf_add_mem(buf, data, remaining);
127 
128 	return buf;
129 }
130 
hci_ipc_iso_recv(uint8_t * data,size_t remaining)131 static struct net_buf *hci_ipc_iso_recv(uint8_t *data, size_t remaining)
132 {
133 	struct bt_hci_iso_hdr *hdr = (void *)data;
134 	struct net_buf *buf;
135 
136 	if (remaining < sizeof(*hdr)) {
137 		LOG_ERR("Not enough data for ISO header");
138 		return NULL;
139 	}
140 
141 	buf = bt_buf_get_tx(BT_BUF_ISO_OUT, K_NO_WAIT, hdr, sizeof(*hdr));
142 	if (buf) {
143 		data += sizeof(*hdr);
144 		remaining -= sizeof(*hdr);
145 	} else {
146 		LOG_ERR("No available ISO buffers!");
147 		return NULL;
148 	}
149 
150 	if (remaining != bt_iso_hdr_len(sys_le16_to_cpu(hdr->len))) {
151 		LOG_ERR("ISO payload length is not correct");
152 		net_buf_unref(buf);
153 		return NULL;
154 	}
155 
156 	if (remaining > net_buf_tailroom(buf)) {
157 		LOG_ERR("Not enough space in buffer");
158 		net_buf_unref(buf);
159 		return NULL;
160 	}
161 
162 	LOG_DBG("len %zu", remaining);
163 	net_buf_add_mem(buf, data, remaining);
164 
165 	return buf;
166 }
167 
hci_ipc_rx(uint8_t * data,size_t len)168 static void hci_ipc_rx(uint8_t *data, size_t len)
169 {
170 	uint8_t pkt_indicator;
171 	struct net_buf *buf = NULL;
172 	size_t remaining = len;
173 
174 	LOG_HEXDUMP_DBG(data, len, "IPC data:");
175 
176 	pkt_indicator = *data++;
177 	remaining -= sizeof(pkt_indicator);
178 
179 	switch (pkt_indicator) {
180 	case HCI_IPC_CMD:
181 		buf = hci_ipc_cmd_recv(data, remaining);
182 		break;
183 
184 	case HCI_IPC_ACL:
185 		buf = hci_ipc_acl_recv(data, remaining);
186 		break;
187 
188 	case HCI_IPC_ISO:
189 		buf = hci_ipc_iso_recv(data, remaining);
190 		break;
191 
192 	default:
193 		LOG_ERR("Unknown HCI type %u", pkt_indicator);
194 		return;
195 	}
196 
197 	if (buf) {
198 		k_fifo_put(&tx_queue, buf);
199 
200 		LOG_HEXDUMP_DBG(buf->data, buf->len, "Final net buffer:");
201 	}
202 }
203 
tx_thread(void * p1,void * p2,void * p3)204 static void tx_thread(void *p1, void *p2, void *p3)
205 {
206 	while (1) {
207 		struct net_buf *buf;
208 		int err;
209 
210 		/* Wait until a buffer is available */
211 		buf = k_fifo_get(&tx_queue, K_FOREVER);
212 		/* Pass buffer to the stack */
213 		err = bt_send(buf);
214 		if (err) {
215 			LOG_ERR("Unable to send (err %d)", err);
216 			net_buf_unref(buf);
217 		}
218 
219 		/* Give other threads a chance to run if tx_queue keeps getting
220 		 * new data all the time.
221 		 */
222 		k_yield();
223 	}
224 }
225 
hci_ipc_send(struct net_buf * buf,bool is_fatal_err)226 static void hci_ipc_send(struct net_buf *buf, bool is_fatal_err)
227 {
228 	uint8_t retries = 0;
229 	int ret;
230 
231 	LOG_DBG("buf %p type %u len %u", buf, buf->data[0], buf->len);
232 
233 	LOG_HEXDUMP_DBG(buf->data, buf->len, "Final HCI buffer:");
234 
235 	do {
236 		ret = ipc_service_send(&hci_ept, buf->data, buf->len);
237 		if (ret < 0) {
238 			retries++;
239 			if (retries > 10) {
240 				/* Default backend (rpmsg_virtio) has a timeout of 150ms. */
241 				LOG_WRN("IPC send has been blocked for 1.5 seconds.");
242 				retries = 0;
243 			}
244 
245 			/* The function can be called by the application main thread,
246 			 * bt_ctlr_assert_handle and k_sys_fatal_error_handler. In case of a call by
247 			 * Bluetooth Controller assert handler or system fatal error handler the
248 			 * call can be from ISR context, hence there is no thread to yield. Besides
249 			 * that both handlers implement a policy to provide error information and
250 			 * stop the system in an infinite loop. The goal is to prevent any other
251 			 * damage to the system if one of such exeptional situations occur, hence
252 			 * call to k_yield is against it.
253 			 */
254 			if (is_fatal_err) {
255 				LOG_ERR("ipc_service_send error: %d", ret);
256 			} else {
257 				/* In the POSIX ARCH, code takes zero simulated time to execute,
258 				 * so busy wait loops become infinite loops, unless we
259 				 * force the loop to take a bit of time.
260 				 *
261 				 * This delay allows the IPC consumer to execute, thus making
262 				 * it possible to send more data over IPC afterwards.
263 				 */
264 				Z_SPIN_DELAY(500);
265 				k_yield();
266 			}
267 		}
268 	} while (ret < 0);
269 
270 	LOG_INF("ipc_service_send sent %d/%u bytes", ret, buf->len);
271 	__ASSERT_NO_MSG(ret == buf->len);
272 
273 	net_buf_unref(buf);
274 }
275 
276 #if defined(CONFIG_BT_CTLR_ASSERT_HANDLER)
bt_ctlr_assert_handle(char * file,uint32_t line)277 void bt_ctlr_assert_handle(char *file, uint32_t line)
278 {
279 	/* Disable interrupts, this is unrecoverable */
280 	(void)irq_lock();
281 
282 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
283 	/* Generate an error event only when IPC service endpoint is already bound. */
284 	if (ipc_ept_ready) {
285 		/* Prepare vendor specific HCI debug event */
286 		struct net_buf *buf;
287 
288 		buf = hci_vs_err_assert(file, line);
289 		if (buf != NULL) {
290 			/* Send the event over ipc */
291 			hci_ipc_send(buf, HCI_FATAL_ERR_MSG);
292 		} else {
293 			LOG_ERR("Can't create Fatal Error HCI event: %s at %d", __FILE__, __LINE__);
294 		}
295 	} else {
296 		LOG_ERR("IPC endpoint is not ready yet: %s at %d", __FILE__, __LINE__);
297 	}
298 
299 	LOG_ERR("Halting system");
300 
301 #else /* !CONFIG_BT_HCI_VS_FATAL_ERROR */
302 	LOG_ERR("Controller assert in: %s at %d", file, line);
303 
304 #endif /* !CONFIG_BT_HCI_VS_FATAL_ERROR */
305 
306 	/* Flush the logs before locking the CPU */
307 	LOG_PANIC();
308 
309 	while (true) {
310 	};
311 }
312 #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER */
313 
314 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * esf)315 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf)
316 {
317 	/* Disable interrupts, this is unrecoverable */
318 	(void)irq_lock();
319 
320 	/* Generate an error event only when there is a stack frame and IPC service endpoint is
321 	 * already bound.
322 	 */
323 	if (esf != NULL && ipc_ept_ready) {
324 		/* Prepare vendor specific HCI debug event */
325 		struct net_buf *buf;
326 
327 		buf = hci_vs_err_stack_frame(reason, esf);
328 		if (buf != NULL) {
329 			hci_ipc_send(buf, HCI_FATAL_ERR_MSG);
330 		} else {
331 			LOG_ERR("Can't create Fatal Error HCI event.\n");
332 		}
333 	}
334 
335 	LOG_ERR("Halting system");
336 
337 	/* Flush the logs before locking the CPU */
338 	LOG_PANIC();
339 
340 	while (true) {
341 	};
342 
343 	CODE_UNREACHABLE;
344 }
345 #endif /* CONFIG_BT_HCI_VS_FATAL_ERROR */
346 
hci_ept_bound(void * priv)347 static void hci_ept_bound(void *priv)
348 {
349 	k_sem_give(&ipc_bound_sem);
350 #if defined(CONFIG_BT_CTLR_ASSERT_HANDLER) || defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
351 	ipc_ept_ready = true;
352 #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER || CONFIG_BT_HCI_VS_FATAL_ERROR */
353 }
354 
hci_ept_recv(const void * data,size_t len,void * priv)355 static void hci_ept_recv(const void *data, size_t len, void *priv)
356 {
357 	LOG_INF("Received message of %u bytes.", len);
358 	hci_ipc_rx((uint8_t *) data, len);
359 }
360 
361 static struct ipc_ept_cfg hci_ept_cfg = {
362 	.name = "nrf_bt_hci",
363 	.cb = {
364 		.bound    = hci_ept_bound,
365 		.received = hci_ept_recv,
366 	},
367 };
368 
main(void)369 int main(void)
370 {
371 	int err;
372 	const struct device *hci_ipc_instance =
373 		DEVICE_DT_GET(DT_CHOSEN(zephyr_bt_hci_ipc));
374 
375 	/* incoming events and data from the controller */
376 	static K_FIFO_DEFINE(rx_queue);
377 
378 	LOG_DBG("Start");
379 
380 	/* Enable the raw interface, this will in turn open the HCI driver */
381 	bt_enable_raw(&rx_queue);
382 
383 	/* Spawn the TX thread and start feeding commands and data to the
384 	 * controller
385 	 */
386 	k_thread_create(&tx_thread_data, tx_thread_stack,
387 			K_THREAD_STACK_SIZEOF(tx_thread_stack), tx_thread,
388 			NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT);
389 	k_thread_name_set(&tx_thread_data, "HCI ipc TX");
390 
391 	/* Initialize IPC service instance and register endpoint. */
392 	err = ipc_service_open_instance(hci_ipc_instance);
393 	if (err < 0 && err != -EALREADY) {
394 		LOG_ERR("IPC service instance initialization failed: %d\n", err);
395 	}
396 
397 	err = ipc_service_register_endpoint(hci_ipc_instance, &hci_ept, &hci_ept_cfg);
398 	if (err) {
399 		LOG_ERR("Registering endpoint failed with %d", err);
400 	}
401 
402 	k_sem_take(&ipc_bound_sem, K_FOREVER);
403 
404 	while (1) {
405 		struct net_buf *buf;
406 
407 		buf = k_fifo_get(&rx_queue, K_FOREVER);
408 		hci_ipc_send(buf, HCI_REGULAR_MSG);
409 	}
410 	return 0;
411 }
412