1 /*
2 * Copyright (c) 2017 Nordic Semiconductor ASA
3 * Copyright (c) 2015 Intel Corporation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <string.h>
11
12 #include <zephyr/autoconf.h>
13 #include <zephyr/bluetooth/hci.h>
14 #include <zephyr/bluetooth/buf.h>
15 #include <zephyr/bluetooth/hci_types.h>
16 #include <zephyr/bluetooth/l2cap.h>
17 #include <zephyr/kernel.h>
18 #include <zephyr/logging/log.h>
19 #include <zephyr/net_buf.h>
20 #include <zephyr/sys/__assert.h>
21 #include <zephyr/sys/util_macro.h>
22 #include <zephyr/sys_clock.h>
23
24 #include "buf_view.h"
25 #include "common/hci_common_internal.h"
26 #include "conn_internal.h"
27 #include "hci_core.h"
28 #include "iso_internal.h"
29
30 LOG_MODULE_REGISTER(bt_buf, CONFIG_BT_LOG_LEVEL);
31
32 /* Events have a length field of 1 byte. This size fits all events.
33 *
34 * It's true that we don't put all kinds of events there (yet). However, the
35 * command complete event has an arbitrary payload, depending on opcode.
36 */
37 #define SYNC_EVT_SIZE (BT_BUF_RESERVE + BT_HCI_EVT_HDR_SIZE + 255)
38
39 static bt_buf_rx_freed_cb_t buf_rx_freed_cb;
40
buf_rx_freed_notify(enum bt_buf_type mask)41 static void buf_rx_freed_notify(enum bt_buf_type mask)
42 {
43 k_sched_lock();
44
45 if (buf_rx_freed_cb) {
46 buf_rx_freed_cb(mask);
47 }
48
49 k_sched_unlock();
50 }
51
52 #if defined(CONFIG_BT_ISO_RX)
iso_rx_freed_cb(void)53 static void iso_rx_freed_cb(void)
54 {
55 buf_rx_freed_notify(BT_BUF_ISO_IN);
56 }
57 #endif
58
59 /* Pool for RX HCI buffers that are always freed by `bt_recv`
60 * before it returns.
61 *
62 * A singleton buffer shall be sufficient for correct operation.
63 * The buffer count may be increased as an optimization to allow
64 * the HCI transport to fill buffers in parallel with `bt_recv`
65 * consuming them.
66 */
67 NET_BUF_POOL_FIXED_DEFINE(sync_evt_pool, 1, SYNC_EVT_SIZE, 0, NULL);
68
69 NET_BUF_POOL_FIXED_DEFINE(discardable_pool, CONFIG_BT_BUF_EVT_DISCARDABLE_COUNT,
70 BT_BUF_EVT_SIZE(CONFIG_BT_BUF_EVT_DISCARDABLE_SIZE),
71 0, NULL);
72
73 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
acl_in_pool_destroy(struct net_buf * buf)74 static void acl_in_pool_destroy(struct net_buf *buf)
75 {
76 bt_hci_host_num_completed_packets(buf);
77 buf_rx_freed_notify(BT_BUF_ACL_IN);
78 }
79
evt_pool_destroy(struct net_buf * buf)80 static void evt_pool_destroy(struct net_buf *buf)
81 {
82 net_buf_destroy(buf);
83 buf_rx_freed_notify(BT_BUF_EVT);
84 }
85
86 NET_BUF_POOL_DEFINE(acl_in_pool, (BT_BUF_ACL_RX_COUNT_EXTRA + BT_BUF_HCI_ACL_RX_COUNT),
87 BT_BUF_ACL_SIZE(CONFIG_BT_BUF_ACL_RX_SIZE), sizeof(struct bt_conn_rx),
88 acl_in_pool_destroy);
89
90 NET_BUF_POOL_FIXED_DEFINE(evt_pool, CONFIG_BT_BUF_EVT_RX_COUNT, BT_BUF_EVT_RX_SIZE, 0,
91 evt_pool_destroy);
92 #else
hci_rx_pool_destroy(struct net_buf * buf)93 static void hci_rx_pool_destroy(struct net_buf *buf)
94 {
95 net_buf_destroy(buf);
96
97 /* When ACL Flow Control is disabled, a single pool is used for events and acl data.
98 * Therefore the callback will always notify about both types of buffers, BT_BUF_EVT and
99 * BT_BUF_ACL_IN.
100 */
101 buf_rx_freed_notify(BT_BUF_EVT | BT_BUF_ACL_IN);
102 }
103
104 NET_BUF_POOL_FIXED_DEFINE(hci_rx_pool, BT_BUF_RX_COUNT, BT_BUF_RX_SIZE,
105 sizeof(struct bt_conn_rx), hci_rx_pool_destroy);
106 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
107
bt_buf_get_rx(enum bt_buf_type type,k_timeout_t timeout)108 struct net_buf *bt_buf_get_rx(enum bt_buf_type type, k_timeout_t timeout)
109 {
110 struct net_buf *buf;
111
112 __ASSERT(type == BT_BUF_EVT || type == BT_BUF_ACL_IN ||
113 type == BT_BUF_ISO_IN, "Invalid buffer type requested");
114
115 if (IS_ENABLED(CONFIG_BT_ISO_RX) && type == BT_BUF_ISO_IN) {
116 return bt_iso_get_rx(timeout);
117 }
118
119 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
120 if (type == BT_BUF_EVT) {
121 buf = net_buf_alloc(&evt_pool, timeout);
122 } else {
123 buf = net_buf_alloc(&acl_in_pool, timeout);
124 }
125 #else
126 buf = net_buf_alloc(&hci_rx_pool, timeout);
127 #endif
128 if (buf) {
129 net_buf_add_u8(buf, bt_buf_type_to_h4(type));
130 }
131
132 return buf;
133 }
134
bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb)135 void bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb)
136 {
137 k_sched_lock();
138
139 buf_rx_freed_cb = cb;
140
141 #if defined(CONFIG_BT_ISO_RX)
142 bt_iso_buf_rx_freed_cb_set(cb != NULL ? iso_rx_freed_cb : NULL);
143 #endif
144
145 k_sched_unlock();
146 }
147
bt_buf_get_evt(uint8_t evt,bool discardable,k_timeout_t timeout)148 struct net_buf *bt_buf_get_evt(uint8_t evt, bool discardable,
149 k_timeout_t timeout)
150 {
151 struct net_buf *buf;
152
153 switch (evt) {
154 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
155 case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
156 #endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
157 case BT_HCI_EVT_CMD_STATUS:
158 case BT_HCI_EVT_CMD_COMPLETE:
159 buf = net_buf_alloc(&sync_evt_pool, timeout);
160 break;
161 default:
162 if (discardable) {
163 /* Discardable, decided in Host-side HCI Transport driver. */
164 buf = net_buf_alloc(&discardable_pool, timeout);
165 } else {
166 return bt_buf_get_rx(BT_BUF_EVT, timeout);
167 }
168 }
169
170 if (buf) {
171 net_buf_add_u8(buf, BT_HCI_H4_EVT);
172 }
173
174 return buf;
175 }
176
177 #ifdef ZTEST_UNITTEST
178 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
bt_buf_get_evt_pool(void)179 struct net_buf_pool *bt_buf_get_evt_pool(void)
180 {
181 return &evt_pool;
182 }
183
bt_buf_get_acl_in_pool(void)184 struct net_buf_pool *bt_buf_get_acl_in_pool(void)
185 {
186 return &acl_in_pool;
187 }
188 #else
bt_buf_get_hci_rx_pool(void)189 struct net_buf_pool *bt_buf_get_hci_rx_pool(void)
190 {
191 return &hci_rx_pool;
192 }
193 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
194
195 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
bt_buf_get_num_complete_pool(void)196 struct net_buf_pool *bt_buf_get_num_complete_pool(void)
197 {
198 return &sync_evt_pool;
199 }
200 #endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
201 #endif /* ZTEST_UNITTEST */
202
bt_buf_make_view(struct net_buf * view,struct net_buf * parent,size_t len,struct bt_buf_view_meta * meta)203 struct net_buf *bt_buf_make_view(struct net_buf *view,
204 struct net_buf *parent,
205 size_t len,
206 struct bt_buf_view_meta *meta)
207 {
208 __ASSERT_NO_MSG(len);
209 __ASSERT_NO_MSG(view);
210 /* The whole point of this API is to allow prepending data. If the
211 * headroom is 0, that will not happen.
212 */
213 __ASSERT_NO_MSG(net_buf_headroom(parent) > 0);
214
215 __ASSERT_NO_MSG(!bt_buf_has_view(parent));
216
217 LOG_DBG("make-view %p viewsize %zu meta %p", view, len, meta);
218
219 net_buf_simple_clone(&parent->b, &view->b);
220 view->size = net_buf_headroom(parent) + len;
221 view->len = len;
222 view->flags = NET_BUF_EXTERNAL_DATA;
223
224 /* we have a view, eat `len`'s worth of data from the parent */
225 (void)net_buf_pull(parent, len);
226
227 meta->backup.data = parent->data;
228 parent->data = NULL;
229
230 meta->backup.size = parent->size;
231 parent->size = 0;
232
233 /* The ref to `parent` is moved in by passing `parent` as argument. */
234 /* save backup & "clip" the buffer so the next `make_view` will fail */
235 meta->parent = parent;
236 parent = NULL;
237
238 return view;
239 }
240
bt_buf_destroy_view(struct net_buf * view,struct bt_buf_view_meta * meta)241 void bt_buf_destroy_view(struct net_buf *view, struct bt_buf_view_meta *meta)
242 {
243 LOG_DBG("destroy-view %p meta %p", view, meta);
244 __ASSERT_NO_MSG(meta->parent);
245
246 /* "unclip" the parent buf */
247 meta->parent->data = meta->backup.data;
248 meta->parent->size = meta->backup.size;
249
250 net_buf_unref(meta->parent);
251
252 memset(meta, 0, sizeof(*meta));
253 net_buf_destroy(view);
254 }
255
bt_buf_has_view(const struct net_buf * parent)256 bool bt_buf_has_view(const struct net_buf *parent)
257 {
258 /* This is enforced by `make_view`. see comment there. */
259 return parent->size == 0 && parent->data == NULL;
260 }
261