1 /* conn.c - Bluetooth connection handling */
2 
3 /*
4  * Copyright (c) 2015-2016 Intel Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <ble_os.h>
10 #include <string.h>
11 #include <bt_errno.h>
12 #include <stdbool.h>
13 #include <atomic.h>
14 #include <misc/byteorder.h>
15 #include <misc/util.h>
16 #include <misc/slist.h>
17 #include <misc/stack.h>
18 #include <misc/__assert.h>
19 
20 #include <bluetooth/hci.h>
21 #include <bluetooth/bluetooth.h>
22 #include <bluetooth/conn.h>
23 #include <bluetooth/hci_driver.h>
24 #include <bluetooth/att.h>
25 
26 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_CONN)
27 #define LOG_MODULE_NAME bt_conn
28 #include "common/log.h"
29 
30 #include "hci_core.h"
31 #include "conn_internal.h"
32 #include "l2cap_internal.h"
33 #include "keys.h"
34 #include "smp.h"
35 #include "att_internal.h"
36 #include "gatt_internal.h"
37 #include <hci_api.h>
38 
39 struct tx_meta {
40 	struct bt_conn_tx *tx;
41 };
42 
43 #define tx_data(buf) ((struct tx_meta *)net_buf_user_data(buf))
44 
45 NET_BUF_POOL_DEFINE(acl_tx_pool, CONFIG_BT_L2CAP_TX_BUF_COUNT,
46 		    BT_L2CAP_BUF_SIZE(CONFIG_BT_L2CAP_TX_MTU),
47 		    sizeof(struct tx_meta), NULL);
48 
49 #if CONFIG_BT_L2CAP_TX_FRAG_COUNT > 0
50 
51 #if defined(CONFIG_BT_CTLR_TX_BUFFER_SIZE)
52 #define FRAG_SIZE BT_L2CAP_BUF_SIZE(CONFIG_BT_CTLR_TX_BUFFER_SIZE - 4)
53 #else
54 #define FRAG_SIZE BT_L2CAP_BUF_SIZE(CONFIG_BT_L2CAP_TX_MTU)
55 #endif
56 
57 /* Dedicated pool for fragment buffers in case queued up TX buffers don't
58  * fit the controllers buffer size. We can't use the acl_tx_pool for the
59  * fragmentation, since it's possible that pool is empty and all buffers
60  * are queued up in the TX queue. In such a situation, trying to allocate
61  * another buffer from the acl_tx_pool would result in a deadlock.
62  */
63 NET_BUF_POOL_FIXED_DEFINE(frag_pool, CONFIG_BT_L2CAP_TX_FRAG_COUNT, FRAG_SIZE,
64 			  NULL);
65 
66 #endif /* CONFIG_BT_L2CAP_TX_FRAG_COUNT > 0 */
67 
68 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
69 const struct bt_conn_auth_cb *bt_auth;
70 #endif /* CONFIG_BT_SMP || CONFIG_BT_BREDR */
71 
72 static struct bt_conn conns[CONFIG_BT_MAX_CONN];
73 static struct bt_conn_cb *callback_list;
74 
75 static struct bt_conn_tx conn_tx[CONFIG_BT_CONN_TX_MAX];
76 struct kfifo free_tx;
77 
78 #if defined(CONFIG_BT_BREDR)
79 static struct bt_conn sco_conns[CONFIG_BT_MAX_SCO_CONN];
80 
81 enum pairing_method {
82 	LEGACY,			/* Legacy (pre-SSP) pairing */
83 	JUST_WORKS,		/* JustWorks pairing */
84 	PASSKEY_INPUT,		/* Passkey Entry input */
85 	PASSKEY_DISPLAY,	/* Passkey Entry display */
86 	PASSKEY_CONFIRM,	/* Passkey confirm */
87 };
88 
89 /* based on table 5.7, Core Spec 4.2, Vol.3 Part C, 5.2.2.6 */
90 static const u8_t ssp_method[4 /* remote */][4 /* local */] = {
91 	      { JUST_WORKS, JUST_WORKS, PASSKEY_INPUT, JUST_WORKS },
92 	      { JUST_WORKS, PASSKEY_CONFIRM, PASSKEY_INPUT, JUST_WORKS },
93 	      { PASSKEY_DISPLAY, PASSKEY_DISPLAY, PASSKEY_INPUT, JUST_WORKS },
94 	      { JUST_WORKS, JUST_WORKS, JUST_WORKS, JUST_WORKS },
95 };
96 #endif /* CONFIG_BT_BREDR */
97 
98 void bt_conn_del(struct bt_conn *conn);
99 
bt_conn_get_pkts(struct bt_conn * conn)100 struct k_sem *bt_conn_get_pkts(struct bt_conn *conn)
101 {
102 #if defined(CONFIG_BT_BREDR)
103 	if (conn->type == BT_CONN_TYPE_BR || !bt_dev.le.mtu) {
104 		return &bt_dev.br.pkts;
105 	}
106 #endif /* CONFIG_BT_BREDR */
107 
108 	return &bt_dev.le.pkts;
109 }
110 
state2str(bt_conn_state_t state)111 static inline const char *state2str(bt_conn_state_t state)
112 {
113 	switch (state) {
114 	case BT_CONN_DISCONNECTED:
115 		return "disconnected";
116 	case BT_CONN_CONNECT_SCAN:
117 		return "connect-scan";
118 	case BT_CONN_CONNECT_DIR_ADV:
119 		return "connect-dir-adv";
120 	case BT_CONN_CONNECT_ADV:
121 		return "connect-adv";
122 	case BT_CONN_CONNECT_AUTO:
123 		return "connect-auto";
124 	case BT_CONN_CONNECT:
125 		return "connect";
126 	case BT_CONN_CONNECTED:
127 		return "connected";
128 	case BT_CONN_DISCONNECT:
129 		return "disconnect";
130 	default:
131 		return "(unknown)";
132 	}
133 }
134 
notify_connected(struct bt_conn * conn)135 static void notify_connected(struct bt_conn *conn)
136 {
137 	struct bt_conn_cb *cb;
138 
139 	for (cb = callback_list; cb; cb = cb->_next) {
140 		if (cb->connected) {
141 			cb->connected(conn, conn->err);
142 		}
143 	}
144 
145 	if (!conn->err) {
146 		bt_gatt_connected(conn);
147 	}
148 }
149 
notify_disconnected(struct bt_conn * conn)150 static void notify_disconnected(struct bt_conn *conn)
151 {
152 	struct bt_conn_cb *cb;
153 
154 	for (cb = callback_list; cb; cb = cb->_next) {
155 		if (cb->disconnected) {
156 			cb->disconnected(conn, conn->err);
157 		}
158 	}
159 }
160 
161 #if defined(CONFIG_BT_REMOTE_INFO)
notify_remote_info(struct bt_conn * conn)162 void notify_remote_info(struct bt_conn *conn)
163 {
164 	struct bt_conn_remote_info remote_info;
165 	struct bt_conn_cb *cb;
166 	int err;
167 
168 	err = bt_conn_get_remote_info(conn, &remote_info);
169 	if (err) {
170 		BT_DBG("Notify remote info failed %d", err);
171 		return;
172 	}
173 
174 	for (cb = callback_list; cb; cb = cb->_next) {
175 		if (cb->remote_info_available) {
176 			cb->remote_info_available(conn, &remote_info);
177 		}
178 	}
179 }
180 #endif /* defined(CONFIG_BT_REMOTE_INFO) */
181 
notify_le_param_updated(struct bt_conn * conn)182 void notify_le_param_updated(struct bt_conn *conn)
183 {
184 	struct bt_conn_cb *cb;
185 
186 	/* If new connection parameters meet requirement of pending
187 	 * parameters don't send slave conn param request anymore on timeout
188 	 */
189 	if (atomic_test_bit(conn->flags, BT_CONN_SLAVE_PARAM_SET) &&
190 	    conn->le.interval >= conn->le.interval_min &&
191 	    conn->le.interval <= conn->le.interval_max &&
192 	    conn->le.latency == conn->le.pending_latency &&
193 	    conn->le.timeout == conn->le.pending_timeout) {
194 		atomic_clear_bit(conn->flags, BT_CONN_SLAVE_PARAM_SET);
195 	}
196 
197 	for (cb = callback_list; cb; cb = cb->_next) {
198 		if (cb->le_param_updated) {
199 			cb->le_param_updated(conn, conn->le.interval,
200 					     conn->le.latency,
201 					     conn->le.timeout);
202 		}
203 	}
204 }
205 
206 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
notify_le_data_len_updated(struct bt_conn * conn)207 void notify_le_data_len_updated(struct bt_conn *conn)
208 {
209 	struct bt_conn_cb *cb;
210 
211 	for (cb = callback_list; cb; cb = cb->_next) {
212 		if (cb->le_data_len_updated) {
213 			cb->le_data_len_updated(conn, &conn->le.data_len);
214 		}
215 	}
216 }
217 #endif
218 
219 #if defined(CONFIG_BT_USER_PHY_UPDATE)
notify_le_phy_updated(struct bt_conn * conn)220 void notify_le_phy_updated(struct bt_conn *conn)
221 {
222 	struct bt_conn_cb *cb;
223 
224 	for (cb = callback_list; cb; cb = cb->_next) {
225 		if (cb->le_phy_updated) {
226 			cb->le_phy_updated(conn, &conn->le.phy);
227 		}
228 	}
229 }
230 #endif
231 
le_param_req(struct bt_conn * conn,struct bt_le_conn_param * param)232 bool le_param_req(struct bt_conn *conn, struct bt_le_conn_param *param)
233 {
234 	struct bt_conn_cb *cb;
235 
236 	if (!bt_le_conn_params_valid(param)) {
237 		return false;
238 	}
239 
240 	for (cb = callback_list; cb; cb = cb->_next) {
241 		if (!cb->le_param_req) {
242 			continue;
243 		}
244 
245 		if (!cb->le_param_req(conn, param)) {
246 			return false;
247 		}
248 
249 		/* The callback may modify the parameters so we need to
250 		 * double-check that it returned valid parameters.
251 		 */
252 		if (!bt_le_conn_params_valid(param)) {
253 			return false;
254 		}
255 	}
256 
257 	/* Default to accepting if there's no app callback */
258 	return true;
259 }
260 
send_conn_le_param_update(struct bt_conn * conn,const struct bt_le_conn_param * param)261 static int send_conn_le_param_update(struct bt_conn *conn,
262 				const struct bt_le_conn_param *param)
263 {
264 	BT_DBG("conn %p features 0x%02x params (%d-%d %d %d)", conn,
265 	       conn->le.features[0], param->interval_min,
266 	       param->interval_max, param->latency, param->timeout);
267 
268 	/* Proceed only if connection parameters contains valid values*/
269 	if (!bt_le_conn_params_valid(param)) {
270 		return -EINVAL;
271 	}
272 
273 	/* Use LE connection parameter request if both local and remote support
274 	 * it; or if local role is master then use LE connection update.
275 	 */
276 	if ((BT_FEAT_LE_CONN_PARAM_REQ_PROC(bt_dev.le.features) &&
277 	     BT_FEAT_LE_CONN_PARAM_REQ_PROC(conn->le.features) &&
278 	     !atomic_test_bit(conn->flags, BT_CONN_SLAVE_PARAM_L2CAP)) ||
279 	     (conn->role == BT_HCI_ROLE_MASTER)) {
280 		int rc;
281 
282 		rc = bt_conn_le_conn_update(conn, param);
283 
284 		/* store those in case of fallback to L2CAP */
285 		if (rc == 0) {
286 			conn->le.pending_latency = param->latency;
287 			conn->le.pending_timeout = param->timeout;
288 		}
289 
290 		return rc;
291 	}
292 
293 	/* If remote master does not support LL Connection Parameters Request
294 	 * Procedure
295 	 */
296 	return bt_l2cap_update_conn_param(conn, param);
297 }
298 
tx_free(struct bt_conn_tx * tx)299 static void tx_free(struct bt_conn_tx *tx)
300 {
301 	tx->cb = NULL;
302 	tx->user_data = NULL;
303 	tx->pending_no_cb = 0U;
304 	k_fifo_put(&free_tx, tx);
305 }
306 
tx_notify(struct bt_conn * conn)307 static void tx_notify(struct bt_conn *conn)
308 {
309 	BT_DBG("conn %p", conn);
310 
311 	while (1) {
312 		struct bt_conn_tx *tx;
313 		unsigned int key;
314 		bt_conn_tx_cb_t cb;
315 		void *user_data;
316 
317 		key = irq_lock();
318 		if (sys_slist_is_empty(&conn->tx_complete)) {
319 			irq_unlock(key);
320 			break;
321 		}
322 
323 		tx = (void *)sys_slist_get_not_empty(&conn->tx_complete);
324 		irq_unlock(key);
325 
326 		BT_DBG("tx %p cb %p user_data %p", tx, tx->cb, tx->user_data);
327 
328 		/* Copy over the params */
329 		cb = tx->cb;
330 		user_data = tx->user_data;
331 
332 		/* Free up TX notify since there may be user waiting */
333 		tx_free(tx);
334 
335 		/* Run the callback, at this point it should be safe to
336 		 * allocate new buffers since the TX should have been
337 		 * unblocked by tx_free.
338 		 */
339 		cb(conn, user_data);
340 	}
341 }
342 
tx_complete_work(struct k_work * work)343 static void tx_complete_work(struct k_work *work)
344 {
345 	struct bt_conn *conn = CONTAINER_OF(work, struct bt_conn,
346 					   tx_complete_work);
347 
348 	BT_DBG("conn %p", conn);
349 
350 	tx_notify(conn);
351 }
352 
conn_update_timeout(struct k_work * work)353 static void conn_update_timeout(struct k_work *work)
354 {
355 	struct bt_conn *conn = CONTAINER_OF(work, struct bt_conn, update_work);
356 	const struct bt_le_conn_param *param;
357 
358 	BT_DBG("conn %p", conn);
359 
360 	if (conn->state == BT_CONN_DISCONNECTED) {
361 		bt_l2cap_disconnected(conn);
362 //		notify_disconnected(conn);
363 
364 		/* Release the reference we took for the very first
365 		 * state transition.
366 		 */
367 		bt_conn_unref(conn);
368 
369 		/* A new reference likely to have been released here,
370 		 * Resume advertising.
371 		 */
372 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
373 			bt_le_adv_resume();
374 		}
375 
376 		return;
377 	}
378 
379 	if (conn->type != BT_CONN_TYPE_LE) {
380 		return;
381 	}
382 
383 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
384 	    conn->role == BT_CONN_ROLE_MASTER) {
385 		/* we don't call bt_conn_disconnect as it would also clear
386 		 * auto connect flag if it was set, instead just cancel
387 		 * connection directly
388 		 */
389 		bt_le_create_conn_cancel();
390 		return;
391 	}
392 
393 	if (IS_ENABLED(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)) {
394 		/* if application set own params use those, otherwise
395 		 * use defaults.
396 		 */
397 		if (atomic_test_and_clear_bit(conn->flags,
398 					      BT_CONN_SLAVE_PARAM_SET)) {
399 			param = BT_LE_CONN_PARAM(conn->le.interval_min,
400 						conn->le.interval_max,
401 						conn->le.pending_latency,
402 						conn->le.pending_timeout);
403 			send_conn_le_param_update(conn, param);
404 		} else {
405 #if defined(CONFIG_BT_GAP_PERIPHERAL_PREF_PARAMS)
406 			param = BT_LE_CONN_PARAM(
407 					CONFIG_BT_PERIPHERAL_PREF_MIN_INT,
408 					CONFIG_BT_PERIPHERAL_PREF_MAX_INT,
409 					CONFIG_BT_PERIPHERAL_PREF_SLAVE_LATENCY,
410 					CONFIG_BT_PERIPHERAL_PREF_TIMEOUT);
411 			send_conn_le_param_update(conn, param);
412 #endif
413 		}
414 	}
415 
416 	atomic_set_bit(conn->flags, BT_CONN_SLAVE_PARAM_UPDATE);
417 }
418 
conn_new(void)419 static struct bt_conn *conn_new(void)
420 {
421 	struct bt_conn *conn = NULL;
422 	int i;
423 
424 	for (i = 0; i < ARRAY_SIZE(conns); i++) {
425 		if (!atomic_get(&conns[i].ref)) {
426 			conn = &conns[i];
427 			(void)memset(conn, 0, sizeof(*conn));
428 			conn->handle = i;
429 			break;
430 		}
431 	}
432 
433 	if (!conn) {
434 		return NULL;
435 	}
436 
437 	k_delayed_work_init(&conn->update_work, conn_update_timeout);
438 
439 	k_work_init(&conn->tx_complete_work, tx_complete_work);
440 
441 	atomic_set(&conn->ref, 1);
442 
443 	return conn;
444 }
445 
446 #if defined(CONFIG_BT_BREDR)
bt_sco_cleanup(struct bt_conn * sco_conn)447 void bt_sco_cleanup(struct bt_conn *sco_conn)
448 {
449 	bt_conn_unref(sco_conn->sco.acl);
450 	sco_conn->sco.acl = NULL;
451 	bt_conn_unref(sco_conn);
452 }
453 
sco_conn_new(void)454 static struct bt_conn *sco_conn_new(void)
455 {
456 	struct bt_conn *sco_conn = NULL;
457 	int i;
458 
459 	for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
460 		if (!atomic_get(&sco_conns[i].ref)) {
461 			sco_conn = &sco_conns[i];
462 			break;
463 		}
464 	}
465 
466 	if (!sco_conn) {
467 		return NULL;
468 	}
469 
470 	(void)memset(sco_conn, 0, sizeof(*sco_conn));
471 
472 	atomic_set(&sco_conn->ref, 1);
473 
474 	return sco_conn;
475 }
476 
bt_conn_create_br(const bt_addr_t * peer,const struct bt_br_conn_param * param)477 struct bt_conn *bt_conn_create_br(const bt_addr_t *peer,
478 				  const struct bt_br_conn_param *param)
479 {
480 	struct bt_hci_cp_connect *cp;
481 	struct bt_conn *conn;
482 	struct net_buf *buf;
483 
484 	conn = bt_conn_lookup_addr_br(peer);
485 	if (conn) {
486 		switch (conn->state) {
487 		case BT_CONN_CONNECT:
488 		case BT_CONN_CONNECTED:
489 			return conn;
490 		default:
491 			bt_conn_unref(conn);
492 			return NULL;
493 		}
494 	}
495 
496 	conn = bt_conn_add_br(peer);
497 	if (!conn) {
498 		return NULL;
499 	}
500 
501 	buf = bt_hci_cmd_create(BT_HCI_OP_CONNECT, sizeof(*cp));
502 	if (!buf) {
503 		bt_conn_unref(conn);
504 		return NULL;
505 	}
506 
507 	cp = net_buf_add(buf, sizeof(*cp));
508 
509 	(void)memset(cp, 0, sizeof(*cp));
510 
511 	memcpy(&cp->bdaddr, peer, sizeof(cp->bdaddr));
512 	cp->packet_type = sys_cpu_to_le16(0xcc18); /* DM1 DH1 DM3 DH5 DM5 DH5 */
513 	cp->pscan_rep_mode = 0x02; /* R2 */
514 	cp->allow_role_switch = param->allow_role_switch ? 0x01 : 0x00;
515 	cp->clock_offset = 0x0000; /* TODO used cached clock offset */
516 
517 	if (bt_hci_cmd_send_sync(BT_HCI_OP_CONNECT, buf, NULL) < 0) {
518 		bt_conn_unref(conn);
519 		return NULL;
520 	}
521 
522 	bt_conn_set_state(conn, BT_CONN_CONNECT);
523 	conn->role = BT_CONN_ROLE_MASTER;
524 
525 	return conn;
526 }
527 
bt_conn_create_sco(const bt_addr_t * peer)528 struct bt_conn *bt_conn_create_sco(const bt_addr_t *peer)
529 {
530 	struct bt_hci_cp_setup_sync_conn *cp;
531 	struct bt_conn *sco_conn;
532 	struct net_buf *buf;
533 	int link_type;
534 
535 	sco_conn = bt_conn_lookup_addr_sco(peer);
536 	if (sco_conn) {
537 		switch (sco_conn->state) {
538 		case BT_CONN_CONNECT:
539 		case BT_CONN_CONNECTED:
540 			return sco_conn;
541 		default:
542 			bt_conn_unref(sco_conn);
543 			return NULL;
544 		}
545 	}
546 
547 	if (BT_FEAT_LMP_ESCO_CAPABLE(bt_dev.features)) {
548 		link_type = BT_HCI_ESCO;
549 	} else {
550 		link_type = BT_HCI_SCO;
551 	}
552 
553 	sco_conn = bt_conn_add_sco(peer, link_type);
554 	if (!sco_conn) {
555 		return NULL;
556 	}
557 
558 	buf = bt_hci_cmd_create(BT_HCI_OP_SETUP_SYNC_CONN, sizeof(*cp));
559 	if (!buf) {
560 		bt_sco_cleanup(sco_conn);
561 		return NULL;
562 	}
563 
564 	cp = net_buf_add(buf, sizeof(*cp));
565 
566 	(void)memset(cp, 0, sizeof(*cp));
567 
568 	BT_ERR("handle : %x", sco_conn->sco.acl->handle);
569 
570 	cp->handle = sco_conn->sco.acl->handle;
571 	cp->pkt_type = sco_conn->sco.pkt_type;
572 	cp->tx_bandwidth = 0x00001f40;
573 	cp->rx_bandwidth = 0x00001f40;
574 	cp->max_latency = 0x0007;
575 	cp->retrans_effort = 0x01;
576 	cp->content_format = BT_VOICE_CVSD_16BIT;
577 
578 	if (bt_hci_cmd_send_sync(BT_HCI_OP_SETUP_SYNC_CONN, buf,
579 				 NULL) < 0) {
580 		bt_sco_cleanup(sco_conn);
581 		return NULL;
582 	}
583 
584 	bt_conn_set_state(sco_conn, BT_CONN_CONNECT);
585 
586 	return sco_conn;
587 }
588 
bt_conn_lookup_addr_sco(const bt_addr_t * peer)589 struct bt_conn *bt_conn_lookup_addr_sco(const bt_addr_t *peer)
590 {
591 	int i;
592 
593 	for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
594 		if (!atomic_get(&sco_conns[i].ref)) {
595 			continue;
596 		}
597 
598 		if (sco_conns[i].type != BT_CONN_TYPE_SCO) {
599 			continue;
600 		}
601 
602 		if (!bt_addr_cmp(peer, &sco_conns[i].sco.acl->br.dst)) {
603 			return bt_conn_ref(&sco_conns[i]);
604 		}
605 	}
606 
607 	return NULL;
608 }
609 
bt_conn_lookup_addr_br(const bt_addr_t * peer)610 struct bt_conn *bt_conn_lookup_addr_br(const bt_addr_t *peer)
611 {
612 	int i;
613 
614 	for (i = 0; i < ARRAY_SIZE(conns); i++) {
615 		if (!atomic_get(&conns[i].ref)) {
616 			continue;
617 		}
618 
619 		if (conns[i].type != BT_CONN_TYPE_BR) {
620 			continue;
621 		}
622 
623 		if (!bt_addr_cmp(peer, &conns[i].br.dst)) {
624 			return bt_conn_ref(&conns[i]);
625 		}
626 	}
627 
628 	return NULL;
629 }
630 
bt_conn_add_sco(const bt_addr_t * peer,int link_type)631 struct bt_conn *bt_conn_add_sco(const bt_addr_t *peer, int link_type)
632 {
633 	struct bt_conn *sco_conn = sco_conn_new();
634 
635 	if (!sco_conn) {
636 		return NULL;
637 	}
638 
639 	sco_conn->sco.acl = bt_conn_lookup_addr_br(peer);
640 	sco_conn->type = BT_CONN_TYPE_SCO;
641 
642 	if (link_type == BT_HCI_SCO) {
643 		if (BT_FEAT_LMP_ESCO_CAPABLE(bt_dev.features)) {
644 			sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
645 						  ESCO_PKT_MASK);
646 		} else {
647 			sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
648 						  SCO_PKT_MASK);
649 		}
650 	} else if (link_type == BT_HCI_ESCO) {
651 		sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
652 					  ~EDR_ESCO_PKT_MASK);
653 	}
654 
655 	return sco_conn;
656 }
657 
bt_conn_add_br(const bt_addr_t * peer)658 struct bt_conn *bt_conn_add_br(const bt_addr_t *peer)
659 {
660 	struct bt_conn *conn = conn_new();
661 
662 	if (!conn) {
663 		return NULL;
664 	}
665 
666 	bt_addr_copy(&conn->br.dst, peer);
667 	conn->type = BT_CONN_TYPE_BR;
668 
669 	return conn;
670 }
671 
pin_code_neg_reply(const bt_addr_t * bdaddr)672 static int pin_code_neg_reply(const bt_addr_t *bdaddr)
673 {
674 	struct bt_hci_cp_pin_code_neg_reply *cp;
675 	struct net_buf *buf;
676 
677 	BT_DBG("");
678 
679 	buf = bt_hci_cmd_create(BT_HCI_OP_PIN_CODE_NEG_REPLY, sizeof(*cp));
680 	if (!buf) {
681 		return -ENOBUFS;
682 	}
683 
684 	cp = net_buf_add(buf, sizeof(*cp));
685 	bt_addr_copy(&cp->bdaddr, bdaddr);
686 
687 	return bt_hci_cmd_send_sync(BT_HCI_OP_PIN_CODE_NEG_REPLY, buf, NULL);
688 }
689 
pin_code_reply(struct bt_conn * conn,const char * pin,u8_t len)690 static int pin_code_reply(struct bt_conn *conn, const char *pin, u8_t len)
691 {
692 	struct bt_hci_cp_pin_code_reply *cp;
693 	struct net_buf *buf;
694 
695 	BT_DBG("");
696 
697 	buf = bt_hci_cmd_create(BT_HCI_OP_PIN_CODE_REPLY, sizeof(*cp));
698 	if (!buf) {
699 		return -ENOBUFS;
700 	}
701 
702 	cp = net_buf_add(buf, sizeof(*cp));
703 
704 	bt_addr_copy(&cp->bdaddr, &conn->br.dst);
705 	cp->pin_len = len;
706 	strncpy((char *)cp->pin_code, pin, sizeof(cp->pin_code));
707 
708 	return bt_hci_cmd_send_sync(BT_HCI_OP_PIN_CODE_REPLY, buf, NULL);
709 }
710 
bt_conn_auth_pincode_entry(struct bt_conn * conn,const char * pin)711 int bt_conn_auth_pincode_entry(struct bt_conn *conn, const char *pin)
712 {
713 	size_t len;
714 
715 	if (!bt_auth) {
716 		return -EINVAL;
717 	}
718 
719 	if (conn->type != BT_CONN_TYPE_BR) {
720 		return -EINVAL;
721 	}
722 
723 	len = strlen(pin);
724 	if (len > 16) {
725 		return -EINVAL;
726 	}
727 
728 	if (conn->required_sec_level == BT_SECURITY_L3 && len < 16) {
729 		BT_WARN("PIN code for %s is not 16 bytes wide",
730 			bt_addr_str(&conn->br.dst));
731 		return -EPERM;
732 	}
733 
734 	/* Allow user send entered PIN to remote, then reset user state. */
735 	if (!atomic_test_and_clear_bit(conn->flags, BT_CONN_USER)) {
736 		return -EPERM;
737 	}
738 
739 	if (len == 16) {
740 		atomic_set_bit(conn->flags, BT_CONN_BR_LEGACY_SECURE);
741 	}
742 
743 	return pin_code_reply(conn, pin, len);
744 }
745 
bt_conn_pin_code_req(struct bt_conn * conn)746 void bt_conn_pin_code_req(struct bt_conn *conn)
747 {
748 	if (bt_auth && bt_auth->pincode_entry) {
749 		bool secure = false;
750 
751 		if (conn->required_sec_level == BT_SECURITY_L3) {
752 			secure = true;
753 		}
754 
755 		atomic_set_bit(conn->flags, BT_CONN_USER);
756 		atomic_set_bit(conn->flags, BT_CONN_BR_PAIRING);
757 		bt_auth->pincode_entry(conn, secure);
758 	} else {
759 		pin_code_neg_reply(&conn->br.dst);
760 	}
761 }
762 
bt_conn_get_io_capa(void)763 u8_t bt_conn_get_io_capa(void)
764 {
765 	if (!bt_auth) {
766 		return BT_IO_NO_INPUT_OUTPUT;
767 	}
768 
769 	if (bt_auth->passkey_confirm && bt_auth->passkey_display) {
770 		return BT_IO_DISPLAY_YESNO;
771 	}
772 
773 	if (bt_auth->passkey_entry) {
774 		return BT_IO_KEYBOARD_ONLY;
775 	}
776 
777 	if (bt_auth->passkey_display) {
778 		return BT_IO_DISPLAY_ONLY;
779 	}
780 
781 	return BT_IO_NO_INPUT_OUTPUT;
782 }
783 
ssp_pair_method(const struct bt_conn * conn)784 static u8_t ssp_pair_method(const struct bt_conn *conn)
785 {
786 	return ssp_method[conn->br.remote_io_capa][bt_conn_get_io_capa()];
787 }
788 
bt_conn_ssp_get_auth(const struct bt_conn * conn)789 u8_t bt_conn_ssp_get_auth(const struct bt_conn *conn)
790 {
791 	/* Validate no bond auth request, and if valid use it. */
792 	if ((conn->br.remote_auth == BT_HCI_NO_BONDING) ||
793 	    ((conn->br.remote_auth == BT_HCI_NO_BONDING_MITM) &&
794 	     (ssp_pair_method(conn) > JUST_WORKS))) {
795 		return conn->br.remote_auth;
796 	}
797 
798 	/* Local & remote have enough IO capabilities to get MITM protection. */
799 	if (ssp_pair_method(conn) > JUST_WORKS) {
800 		return conn->br.remote_auth | BT_MITM;
801 	}
802 
803 	/* No MITM protection possible so ignore remote MITM requirement. */
804 	return (conn->br.remote_auth & ~BT_MITM);
805 }
806 
ssp_confirm_reply(struct bt_conn * conn)807 static int ssp_confirm_reply(struct bt_conn *conn)
808 {
809 	struct bt_hci_cp_user_confirm_reply *cp;
810 	struct net_buf *buf;
811 
812 	BT_DBG("");
813 
814 	buf = bt_hci_cmd_create(BT_HCI_OP_USER_CONFIRM_REPLY, sizeof(*cp));
815 	if (!buf) {
816 		return -ENOBUFS;
817 	}
818 
819 	cp = net_buf_add(buf, sizeof(*cp));
820 	bt_addr_copy(&cp->bdaddr, &conn->br.dst);
821 
822 	return bt_hci_cmd_send_sync(BT_HCI_OP_USER_CONFIRM_REPLY, buf, NULL);
823 }
824 
ssp_confirm_neg_reply(struct bt_conn * conn)825 static int ssp_confirm_neg_reply(struct bt_conn *conn)
826 {
827 	struct bt_hci_cp_user_confirm_reply *cp;
828 	struct net_buf *buf;
829 
830 	BT_DBG("");
831 
832 	buf = bt_hci_cmd_create(BT_HCI_OP_USER_CONFIRM_NEG_REPLY, sizeof(*cp));
833 	if (!buf) {
834 		return -ENOBUFS;
835 	}
836 
837 	cp = net_buf_add(buf, sizeof(*cp));
838 	bt_addr_copy(&cp->bdaddr, &conn->br.dst);
839 
840 	return bt_hci_cmd_send_sync(BT_HCI_OP_USER_CONFIRM_NEG_REPLY, buf,
841 				    NULL);
842 }
843 
bt_conn_ssp_auth_complete(struct bt_conn * conn,u8_t status)844 void bt_conn_ssp_auth_complete(struct bt_conn *conn, u8_t status)
845 {
846 	if (!status) {
847 		bool bond = !atomic_test_bit(conn->flags, BT_CONN_BR_NOBOND);
848 
849 		if (bt_auth && bt_auth->pairing_complete) {
850 			bt_auth->pairing_complete(conn, bond);
851 		}
852 	} else {
853 		if (bt_auth && bt_auth->pairing_failed) {
854 			bt_auth->pairing_failed(conn, status);
855 		}
856 	}
857 }
858 
bt_conn_ssp_auth(struct bt_conn * conn,bt_u32_t passkey)859 void bt_conn_ssp_auth(struct bt_conn *conn, bt_u32_t passkey)
860 {
861 	conn->br.pairing_method = ssp_pair_method(conn);
862 
863 	/*
864 	 * If local required security is HIGH then MITM is mandatory.
865 	 * MITM protection is no achievable when SSP 'justworks' is applied.
866 	 */
867 	if (conn->required_sec_level > BT_SECURITY_L2 &&
868 	    conn->br.pairing_method == JUST_WORKS) {
869 		BT_DBG("MITM protection infeasible for required security");
870 		ssp_confirm_neg_reply(conn);
871 		return;
872 	}
873 
874 	switch (conn->br.pairing_method) {
875 	case PASSKEY_CONFIRM:
876 		atomic_set_bit(conn->flags, BT_CONN_USER);
877 		bt_auth->passkey_confirm(conn, passkey);
878 		break;
879 	case PASSKEY_DISPLAY:
880 		atomic_set_bit(conn->flags, BT_CONN_USER);
881 		bt_auth->passkey_display(conn, passkey);
882 		break;
883 	case PASSKEY_INPUT:
884 		atomic_set_bit(conn->flags, BT_CONN_USER);
885 		bt_auth->passkey_entry(conn);
886 		break;
887 	case JUST_WORKS:
888 		/*
889 		 * When local host works as pairing acceptor and 'justworks'
890 		 * model is applied then notify user about such pairing request.
891 		 * [BT Core 4.2 table 5.7, Vol 3, Part C, 5.2.2.6]
892 		 */
893 		if (bt_auth && bt_auth->pairing_confirm &&
894 		    !atomic_test_bit(conn->flags,
895 				     BT_CONN_BR_PAIRING_INITIATOR)) {
896 			atomic_set_bit(conn->flags, BT_CONN_USER);
897 			bt_auth->pairing_confirm(conn);
898 			break;
899 		}
900 		ssp_confirm_reply(conn);
901 		break;
902 	default:
903 		break;
904 	}
905 }
906 
ssp_passkey_reply(struct bt_conn * conn,unsigned int passkey)907 static int ssp_passkey_reply(struct bt_conn *conn, unsigned int passkey)
908 {
909 	struct bt_hci_cp_user_passkey_reply *cp;
910 	struct net_buf *buf;
911 
912 	BT_DBG("");
913 
914 	buf = bt_hci_cmd_create(BT_HCI_OP_USER_PASSKEY_REPLY, sizeof(*cp));
915 	if (!buf) {
916 		return -ENOBUFS;
917 	}
918 
919 	cp = net_buf_add(buf, sizeof(*cp));
920 	bt_addr_copy(&cp->bdaddr, &conn->br.dst);
921 	cp->passkey = sys_cpu_to_le32(passkey);
922 
923 	return bt_hci_cmd_send_sync(BT_HCI_OP_USER_PASSKEY_REPLY, buf, NULL);
924 }
925 
ssp_passkey_neg_reply(struct bt_conn * conn)926 static int ssp_passkey_neg_reply(struct bt_conn *conn)
927 {
928 	struct bt_hci_cp_user_passkey_neg_reply *cp;
929 	struct net_buf *buf;
930 
931 	BT_DBG("");
932 
933 	buf = bt_hci_cmd_create(BT_HCI_OP_USER_PASSKEY_NEG_REPLY, sizeof(*cp));
934 	if (!buf) {
935 		return -ENOBUFS;
936 	}
937 
938 	cp = net_buf_add(buf, sizeof(*cp));
939 	bt_addr_copy(&cp->bdaddr, &conn->br.dst);
940 
941 	return bt_hci_cmd_send_sync(BT_HCI_OP_USER_PASSKEY_NEG_REPLY, buf,
942 				    NULL);
943 }
944 
bt_hci_connect_br_cancel(struct bt_conn * conn)945 static int bt_hci_connect_br_cancel(struct bt_conn *conn)
946 {
947 	struct bt_hci_cp_connect_cancel *cp;
948 	struct bt_hci_rp_connect_cancel *rp;
949 	struct net_buf *buf, *rsp;
950 	int err;
951 
952 	buf = bt_hci_cmd_create(BT_HCI_OP_CONNECT_CANCEL, sizeof(*cp));
953 	if (!buf) {
954 		return -ENOBUFS;
955 	}
956 
957 	cp = net_buf_add(buf, sizeof(*cp));
958 	memcpy(&cp->bdaddr, &conn->br.dst, sizeof(cp->bdaddr));
959 
960 	err = bt_hci_cmd_send_sync(BT_HCI_OP_CONNECT_CANCEL, buf, &rsp);
961 	if (err) {
962 		return err;
963 	}
964 
965 	rp = (void *)rsp->data;
966 
967 	err = rp->status ? -EIO : 0;
968 
969 	net_buf_unref(rsp);
970 
971 	return err;
972 }
973 
conn_auth(struct bt_conn * conn)974 static int conn_auth(struct bt_conn *conn)
975 {
976 	struct bt_hci_cp_auth_requested *auth;
977 	struct net_buf *buf;
978 
979 	BT_DBG("");
980 
981 	buf = bt_hci_cmd_create(BT_HCI_OP_AUTH_REQUESTED, sizeof(*auth));
982 	if (!buf) {
983 		return -ENOBUFS;
984 	}
985 
986 	auth = net_buf_add(buf, sizeof(*auth));
987 	auth->handle = sys_cpu_to_le16(conn->handle);
988 
989 	atomic_set_bit(conn->flags, BT_CONN_BR_PAIRING_INITIATOR);
990 
991 	return bt_hci_cmd_send_sync(BT_HCI_OP_AUTH_REQUESTED, buf, NULL);
992 }
993 #endif /* CONFIG_BT_BREDR */
994 
995 #if defined(CONFIG_BT_SMP)
bt_conn_identity_resolved(struct bt_conn * conn)996 void bt_conn_identity_resolved(struct bt_conn *conn)
997 {
998 	const bt_addr_le_t *rpa;
999 	struct bt_conn_cb *cb;
1000 
1001 	if (conn->role == BT_HCI_ROLE_MASTER) {
1002 		rpa = &conn->le.resp_addr;
1003 	} else {
1004 		rpa = &conn->le.init_addr;
1005 	}
1006 
1007 	for (cb = callback_list; cb; cb = cb->_next) {
1008 		if (cb->identity_resolved) {
1009 			cb->identity_resolved(conn, rpa, &conn->le.dst);
1010 		}
1011 	}
1012 }
1013 
bt_conn_le_start_encryption(struct bt_conn * conn,u8_t rand[8],u8_t ediv[2],const u8_t * ltk,size_t len)1014 int bt_conn_le_start_encryption(struct bt_conn *conn, u8_t rand[8],
1015 				u8_t ediv[2], const u8_t *ltk, size_t len)
1016 {
1017 #if !defined(CONFIG_BT_USE_HCI_API)
1018 	struct bt_hci_cp_le_start_encryption *cp;
1019 	struct net_buf *buf;
1020 
1021 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_START_ENCRYPTION, sizeof(*cp));
1022 	if (!buf) {
1023 		return -ENOBUFS;
1024 	}
1025 
1026 	cp = net_buf_add(buf, sizeof(*cp));
1027 	cp->handle = sys_cpu_to_le16(conn->handle);
1028 	memcpy(&cp->rand, rand, sizeof(cp->rand));
1029 	memcpy(&cp->ediv, ediv, sizeof(cp->ediv));
1030 
1031 	memcpy(cp->ltk, ltk, len);
1032 	if (len < sizeof(cp->ltk)) {
1033 		(void)memset(cp->ltk + len, 0, sizeof(cp->ltk) - len);
1034 	}
1035 
1036 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_START_ENCRYPTION, buf, NULL);
1037 #else
1038 	u8_t ltk_buf[16];
1039 
1040 	memcpy(ltk_buf, ltk, len);
1041 
1042 	if (len < sizeof(ltk_buf)) {
1043 		memset(ltk_buf + len, 0, sizeof(ltk_buf) - len);
1044 	}
1045 
1046 	return hci_api_le_start_encrypt(conn->handle, rand, ediv, ltk_buf);
1047 #endif
1048 }
1049 #endif /* CONFIG_BT_SMP */
1050 
1051 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
bt_conn_enc_key_size(struct bt_conn * conn)1052 u8_t bt_conn_enc_key_size(struct bt_conn *conn)
1053 {
1054 	if (!conn->encrypt) {
1055 		return 0;
1056 	}
1057 
1058 	if (IS_ENABLED(CONFIG_BT_BREDR) &&
1059 	    conn->type == BT_CONN_TYPE_BR) {
1060 		struct bt_hci_cp_read_encryption_key_size *cp;
1061 		struct bt_hci_rp_read_encryption_key_size *rp;
1062 		struct net_buf *buf;
1063 		struct net_buf *rsp;
1064 		u8_t key_size;
1065 
1066 		buf = bt_hci_cmd_create(BT_HCI_OP_READ_ENCRYPTION_KEY_SIZE,
1067 					sizeof(*cp));
1068 		if (!buf) {
1069 			return 0;
1070 		}
1071 
1072 		cp = net_buf_add(buf, sizeof(*cp));
1073 		cp->handle = sys_cpu_to_le16(conn->handle);
1074 
1075 		if (bt_hci_cmd_send_sync(BT_HCI_OP_READ_ENCRYPTION_KEY_SIZE,
1076 					buf, &rsp)) {
1077 			return 0;
1078 		}
1079 
1080 		rp = (void *)rsp->data;
1081 
1082 		key_size = rp->status ? 0 : rp->key_size;
1083 
1084 		net_buf_unref(rsp);
1085 
1086 		return key_size;
1087 	}
1088 
1089 	if (IS_ENABLED(CONFIG_BT_SMP)) {
1090 		return conn->le.keys ? conn->le.keys->enc_size : 0;
1091 	}
1092 
1093 	return 0;
1094 }
1095 
bt_conn_security_changed(struct bt_conn * conn,enum bt_security_err err)1096 void bt_conn_security_changed(struct bt_conn *conn, enum bt_security_err err)
1097 {
1098 	struct bt_conn_cb *cb;
1099 
1100 	for (cb = callback_list; cb; cb = cb->_next) {
1101 		if (cb->security_changed) {
1102 			cb->security_changed(conn, conn->sec_level, err);
1103 		}
1104 	}
1105 #if IS_ENABLED(CONFIG_BT_KEYS_OVERWRITE_OLDEST)
1106 	if (!err && conn->sec_level >= BT_SECURITY_L2) {
1107 		bt_keys_update_usage(conn->id, bt_conn_get_dst(conn));
1108 	}
1109 #endif
1110 }
1111 
start_security(struct bt_conn * conn)1112 static int start_security(struct bt_conn *conn)
1113 {
1114 #if defined(CONFIG_BT_BREDR)
1115 	if (conn->type == BT_CONN_TYPE_BR) {
1116 		if (atomic_test_bit(conn->flags, BT_CONN_BR_PAIRING)) {
1117 			return -EBUSY;
1118 		}
1119 
1120 		if (conn->required_sec_level > BT_SECURITY_L3) {
1121 			return -ENOTSUP;
1122 		}
1123 
1124 		if (bt_conn_get_io_capa() == BT_IO_NO_INPUT_OUTPUT &&
1125 		    conn->required_sec_level > BT_SECURITY_L2) {
1126 			return -EINVAL;
1127 		}
1128 
1129 		return conn_auth(conn);
1130 	}
1131 #endif /* CONFIG_BT_BREDR */
1132 
1133 	if (IS_ENABLED(CONFIG_BT_SMP)) {
1134 		return bt_smp_start_security(conn);
1135 	}
1136 
1137 	return -EINVAL;
1138 }
1139 
bt_conn_set_security(struct bt_conn * conn,bt_security_t sec)1140 int bt_conn_set_security(struct bt_conn *conn, bt_security_t sec)
1141 {
1142 	int err;
1143 
1144 	if (conn->state != BT_CONN_CONNECTED) {
1145 		return -ENOTCONN;
1146 	}
1147 
1148 	if (IS_ENABLED(CONFIG_BT_SMP_SC_ONLY) &&
1149 	    sec < BT_SECURITY_L4) {
1150 		return -EOPNOTSUPP;
1151 	}
1152 
1153 	if (IS_ENABLED(CONFIG_BT_SMP_OOB_LEGACY_PAIR_ONLY) &&
1154 	    sec > BT_SECURITY_L3) {
1155 		return -EOPNOTSUPP;
1156 	}
1157 
1158 	/* nothing to do */
1159 	if (conn->sec_level >= sec || conn->required_sec_level >= sec) {
1160 		return 0;
1161 	}
1162 
1163 	atomic_set_bit_to(conn->flags, BT_CONN_FORCE_PAIR,
1164 			  sec & BT_SECURITY_FORCE_PAIR);
1165 	conn->required_sec_level = sec & ~BT_SECURITY_FORCE_PAIR;
1166 
1167 	err = start_security(conn);
1168 
1169 	/* reset required security level in case of error */
1170 	if (err) {
1171 		conn->required_sec_level = conn->sec_level;
1172 	}
1173 
1174 	return err;
1175 }
1176 
bt_conn_get_security(struct bt_conn * conn)1177 bt_security_t bt_conn_get_security(struct bt_conn *conn)
1178 {
1179 	return conn->sec_level;
1180 }
1181 #else
bt_conn_get_security(struct bt_conn * conn)1182 bt_security_t bt_conn_get_security(struct bt_conn *conn)
1183 {
1184 	return BT_SECURITY_L1;
1185 }
1186 #endif /* CONFIG_BT_SMP */
1187 
bt_conn_cb_register(struct bt_conn_cb * cb)1188 void bt_conn_cb_register(struct bt_conn_cb *cb)
1189 {
1190 	cb->_next = callback_list;
1191 	callback_list = cb;
1192 }
1193 
bt_conn_reset_rx_state(struct bt_conn * conn)1194 static void bt_conn_reset_rx_state(struct bt_conn *conn)
1195 {
1196 	if (!conn->rx_len) {
1197 		return;
1198 	}
1199 
1200 	net_buf_unref(conn->rx);
1201 	conn->rx = NULL;
1202 	conn->rx_len = 0U;
1203 }
1204 
bt_conn_recv(struct bt_conn * conn,struct net_buf * buf,u8_t flags)1205 void bt_conn_recv(struct bt_conn *conn, struct net_buf *buf, u8_t flags)
1206 {
1207 	struct bt_l2cap_hdr *hdr;
1208 	u16_t len;
1209 
1210 	/* Make sure we notify any pending TX callbacks before processing
1211 	 * new data for this connection.
1212 	 */
1213 	tx_notify(conn);
1214 
1215 	BT_DBG("handle %u len %u flags %02x", conn->handle, buf->len, flags);
1216 
1217 	/* Check packet boundary flags */
1218 	switch (flags) {
1219 	case BT_ACL_START:
1220 		hdr = (void *)buf->data;
1221 		len = sys_le16_to_cpu(hdr->len);
1222 
1223 		BT_DBG("First, len %u final %u", buf->len, len);
1224 
1225 		if (conn->rx_len) {
1226 			BT_ERR("Unexpected first L2CAP frame");
1227 			bt_conn_reset_rx_state(conn);
1228 		}
1229 
1230 		conn->rx_len = (sizeof(*hdr) + len) - buf->len;
1231 		BT_DBG("rx_len %u", conn->rx_len);
1232 		if (conn->rx_len) {
1233 			conn->rx = buf;
1234 			return;
1235 		}
1236 
1237 		break;
1238 	case BT_ACL_CONT:
1239 		if (!conn->rx_len) {
1240 			BT_ERR("Unexpected L2CAP continuation");
1241 			bt_conn_reset_rx_state(conn);
1242 			net_buf_unref(buf);
1243 			return;
1244 		}
1245 
1246 		if (buf->len > conn->rx_len) {
1247 			BT_ERR("L2CAP data overflow");
1248 			bt_conn_reset_rx_state(conn);
1249 			net_buf_unref(buf);
1250 			return;
1251 		}
1252 
1253 		BT_DBG("Cont, len %u rx_len %u", buf->len, conn->rx_len);
1254 
1255 		if (buf->len > net_buf_tailroom(conn->rx)) {
1256 			BT_ERR("Not enough buffer space for L2CAP data");
1257 			bt_conn_reset_rx_state(conn);
1258 			net_buf_unref(buf);
1259 			return;
1260 		}
1261 
1262 		net_buf_add_mem(conn->rx, buf->data, buf->len);
1263 		conn->rx_len -= buf->len;
1264 		net_buf_unref(buf);
1265 
1266 		if (conn->rx_len) {
1267 			return;
1268 		}
1269 
1270 		buf = conn->rx;
1271 		conn->rx = NULL;
1272 		conn->rx_len = 0U;
1273 
1274 		break;
1275 	default:
1276 		/* BT_ACL_START_NO_FLUSH and BT_ACL_COMPLETE are not allowed on
1277 		 * LE-U from Controller to Host.
1278 		 * Only BT_ACL_POINT_TO_POINT is supported.
1279 		 */
1280 		BT_ERR("Unexpected ACL flags (0x%02x)", flags);
1281 		bt_conn_reset_rx_state(conn);
1282 		net_buf_unref(buf);
1283 		return;
1284 	}
1285 
1286 	hdr = (void *)buf->data;
1287 	len = sys_le16_to_cpu(hdr->len);
1288 
1289 	if (sizeof(*hdr) + len != buf->len) {
1290 		BT_ERR("ACL len mismatch (%u != %u)", len, buf->len);
1291 		net_buf_unref(buf);
1292 		return;
1293 	}
1294 
1295 	BT_DBG("Successfully parsed %u byte L2CAP packet", buf->len);
1296 
1297 	bt_l2cap_recv(conn, buf);
1298 }
1299 
conn_tx_alloc(void)1300 static struct bt_conn_tx *conn_tx_alloc(void)
1301 {
1302 	//sys_snode_t *node;
1303 	/* The TX context always get freed in the system workqueue,
1304 	 * so if we're in the same workqueue but there are no immediate
1305 	 * contexts available, there's no chance we'll get one by waiting.
1306 	 */
1307 	//if (k_current_get() == &k_sys_work_q.thread) {
1308 	//	return k_fifo_get(&free_tx, K_NO_WAIT);
1309 	//}
1310 
1311 	if (IS_ENABLED(CONFIG_BT_DEBUG_CONN)) {
1312 		struct bt_conn_tx *tx = k_fifo_get(&free_tx, K_NO_WAIT);
1313 
1314 		if (tx) {
1315 			return tx;
1316 		}
1317 
1318 		BT_WARN("Unable to get an immediate free conn_tx");
1319 	}
1320 
1321 	return k_fifo_get(&free_tx, K_FOREVER);
1322 }
1323 
bt_conn_send_cb(struct bt_conn * conn,struct net_buf * buf,bt_conn_tx_cb_t cb,void * user_data)1324 int bt_conn_send_cb(struct bt_conn *conn, struct net_buf *buf,
1325 		    bt_conn_tx_cb_t cb, void *user_data)
1326 {
1327 	struct bt_conn_tx *tx;
1328 
1329 	BT_DBG("conn handle %u buf len %u cb %p user_data %p", conn->handle,
1330 	       buf->len, cb, user_data);
1331 
1332 	if (conn->state != BT_CONN_CONNECTED) {
1333 		BT_ERR("not connected!");
1334 		net_buf_unref(buf);
1335 		return -ENOTCONN;
1336 	}
1337 
1338 	if (cb) {
1339 		tx = conn_tx_alloc();
1340 		if (!tx) {
1341 			BT_ERR("Unable to allocate TX context");
1342 			net_buf_unref(buf);
1343 			return -ENOBUFS;
1344 		}
1345 
1346 		/* Verify that we're still connected after blocking */
1347 		if (conn->state != BT_CONN_CONNECTED) {
1348 			BT_WARN("Disconnected while allocating context");
1349 			net_buf_unref(buf);
1350 			tx_free(tx);
1351 			return -ENOTCONN;
1352 		}
1353 
1354 		tx->cb = cb;
1355 		tx->user_data = user_data;
1356 		tx->pending_no_cb = 0U;
1357 
1358 		tx_data(buf)->tx = tx;
1359 	} else {
1360 		tx_data(buf)->tx = NULL;
1361 	}
1362 
1363 	net_buf_put(&conn->tx_queue, buf);
1364 
1365     return 0;
1366 }
1367 
send_frag(struct bt_conn * conn,struct net_buf * buf,u8_t flags,bool always_consume)1368 static bool send_frag(struct bt_conn *conn, struct net_buf *buf, u8_t flags,
1369 		      bool always_consume)
1370 {
1371 	struct bt_conn_tx *tx = tx_data(buf)->tx;
1372 	struct bt_hci_acl_hdr *hdr;
1373 	bt_u32_t *pending_no_cb;
1374 	unsigned int key;
1375 	int err;
1376 
1377 	BT_DBG("conn %p buf %p len %u flags 0x%02x", conn, buf, buf->len,
1378 	       flags);
1379 
1380 	/* Wait until the controller can accept ACL packets */
1381 	k_sem_take(bt_conn_get_pkts(conn), K_FOREVER);
1382 
1383 	/* Check for disconnection while waiting for pkts_sem */
1384 	if (conn->state != BT_CONN_CONNECTED) {
1385 		goto fail;
1386 	}
1387 
1388 	hdr = net_buf_push(buf, sizeof(*hdr));
1389 	hdr->handle = sys_cpu_to_le16(bt_acl_handle_pack(conn->handle, flags));
1390 	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
1391 
1392 	/* Add to pending, it must be done before bt_buf_set_type */
1393 	key = irq_lock();
1394 	if (tx) {
1395 		sys_slist_append(&conn->tx_pending, &tx->node);
1396 	} else {
1397 		struct bt_conn_tx *tail_tx;
1398 
1399 		tail_tx = (void *)sys_slist_peek_tail(&conn->tx_pending);
1400 		if (tail_tx) {
1401 			pending_no_cb = &tail_tx->pending_no_cb;
1402 		} else {
1403 			pending_no_cb = &conn->pending_no_cb;
1404 		}
1405 
1406 		(*pending_no_cb)++;
1407 	}
1408 	irq_unlock(key);
1409 
1410 	bt_buf_set_type(buf, BT_BUF_ACL_OUT);
1411 
1412 	err = bt_send(buf);
1413 	if (err) {
1414 		BT_ERR("Unable to send to driver (err %d)", err);
1415 		key = irq_lock();
1416 		/* Roll back the pending TX info */
1417 		if (tx) {
1418 			sys_slist_find_and_remove(&conn->tx_pending, &tx->node);
1419 		} else {
1420 			__ASSERT_NO_MSG(*pending_no_cb > 0);
1421 			(*pending_no_cb)--;
1422 		}
1423 		irq_unlock(key);
1424 		goto fail;
1425 	}
1426 
1427 	return true;
1428 
1429 fail:
1430 	k_sem_give(bt_conn_get_pkts(conn));
1431 	if (tx) {
1432 		tx_free(tx);
1433 	}
1434 
1435 	if (always_consume) {
1436 		net_buf_unref(buf);
1437 	}
1438 	return false;
1439 }
1440 
conn_mtu(struct bt_conn * conn)1441 static inline u16_t conn_mtu(struct bt_conn *conn)
1442 {
1443 #if defined(CONFIG_BT_BREDR)
1444 	if (conn->type == BT_CONN_TYPE_BR || !bt_dev.le.mtu) {
1445 		return bt_dev.br.mtu;
1446 	}
1447 #endif /* CONFIG_BT_BREDR */
1448 
1449 	return bt_dev.le.mtu;
1450 }
1451 
create_frag(struct bt_conn * conn,struct net_buf * buf)1452 static struct net_buf *create_frag(struct bt_conn *conn, struct net_buf *buf)
1453 {
1454 	struct net_buf *frag;
1455 	u16_t frag_len;
1456 
1457 	frag = bt_conn_create_frag(0);
1458 
1459 	if (conn->state != BT_CONN_CONNECTED) {
1460 		net_buf_unref(frag);
1461 		return NULL;
1462 	}
1463 
1464 	/* Fragments never have a TX completion callback */
1465 	tx_data(frag)->tx = NULL;
1466 
1467 	frag_len = MIN(conn_mtu(conn), net_buf_tailroom(frag));
1468 
1469 	net_buf_add_mem(frag, buf->data, frag_len);
1470 	net_buf_pull(buf, frag_len);
1471 
1472 	return frag;
1473 }
1474 
send_buf(struct bt_conn * conn,struct net_buf * buf)1475 static bool send_buf(struct bt_conn *conn, struct net_buf *buf)
1476 {
1477 	struct net_buf *frag;
1478 
1479 	BT_DBG("conn %p buf %p len %u", conn, buf, buf->len);
1480 
1481 	/* Send directly if the packet fits the ACL MTU */
1482 	if (buf->len <= conn_mtu(conn)) {
1483 		return send_frag(conn, buf, BT_ACL_START_NO_FLUSH, false);
1484 	}
1485 
1486 	/* Create & enqueue first fragment */
1487 	frag = create_frag(conn, buf);
1488 	if (!frag) {
1489 		return false;
1490 	}
1491 
1492 	if (!send_frag(conn, frag, BT_ACL_START_NO_FLUSH, true)) {
1493 		return false;
1494 	}
1495 
1496 	/*
1497 	 * Send the fragments. For the last one simply use the original
1498 	 * buffer (which works since we've used net_buf_pull on it.
1499 	 */
1500 	while (buf->len > conn_mtu(conn)) {
1501 		frag = create_frag(conn, buf);
1502 		if (!frag) {
1503 			return false;
1504 		}
1505 
1506 		if (!send_frag(conn, frag, BT_ACL_CONT, true)) {
1507 			return false;
1508 		}
1509 	}
1510 
1511 	return send_frag(conn, buf, BT_ACL_CONT, false);
1512 }
1513 
1514 static struct k_poll_signal conn_change =
1515 		K_POLL_SIGNAL_INITIALIZER(conn_change);
1516 
conn_cleanup(struct bt_conn * conn)1517 static void conn_cleanup(struct bt_conn *conn)
1518 {
1519 	struct net_buf *buf;
1520 
1521 	/* Give back any allocated buffers */
1522 	while ((buf = net_buf_get(&conn->tx_queue, K_NO_WAIT))) {
1523 		if (tx_data(buf)->tx) {
1524 			tx_free(tx_data(buf)->tx);
1525 		}
1526 
1527 		net_buf_unref(buf);
1528 	}
1529 
1530 	__ASSERT(sys_slist_is_empty(&conn->tx_pending), "Pending TX packets");
1531 	__ASSERT_NO_MSG(conn->pending_no_cb == 0);
1532 
1533 	bt_conn_reset_rx_state(conn);
1534 
1535 	k_delayed_work_submit(&conn->update_work, K_NO_WAIT);
1536 }
1537 
bt_conn_prepare_events(struct k_poll_event events[])1538 int bt_conn_prepare_events(struct k_poll_event events[])
1539 {
1540 	int i, ev_count = 0;
1541 
1542 //	BT_DBG("");
1543 
1544 	conn_change.signaled = 0U;
1545 	k_poll_event_init(&events[ev_count++], K_POLL_TYPE_SIGNAL,
1546 			  K_POLL_MODE_NOTIFY_ONLY, &conn_change);
1547 
1548 	for (i = 0; i < ARRAY_SIZE(conns); i++) {
1549 		struct bt_conn *conn = &conns[i];
1550 
1551 		if (!atomic_get(&conn->ref)) {
1552 			continue;
1553 		}
1554 
1555 		if (conn->state == BT_CONN_DISCONNECTED &&
1556 		    atomic_test_and_clear_bit(conn->flags, BT_CONN_CLEANUP)) {
1557 			conn_cleanup(conn);
1558 			continue;
1559 		}
1560 
1561 		if (conn->state != BT_CONN_CONNECTED) {
1562 			continue;
1563 		}
1564 
1565 		BT_DBG("Adding conn %p to poll list", conn);
1566 
1567 		k_poll_event_init(&events[ev_count],
1568 				  K_POLL_TYPE_FIFO_DATA_AVAILABLE,
1569 				  K_POLL_MODE_NOTIFY_ONLY,
1570 				  &conn->tx_queue);
1571 		events[ev_count++].tag = BT_EVENT_CONN_TX_QUEUE;
1572 	}
1573 
1574 	return ev_count;
1575 }
1576 
bt_conn_process_tx(struct bt_conn * conn)1577 void bt_conn_process_tx(struct bt_conn *conn)
1578 {
1579 	struct net_buf *buf;
1580 
1581 	BT_DBG("conn %p", conn);
1582 
1583 	if (conn->state == BT_CONN_DISCONNECTED &&
1584 	    atomic_test_and_clear_bit(conn->flags, BT_CONN_CLEANUP)) {
1585 		BT_DBG("handle %u disconnected - cleaning up", conn->handle);
1586 		conn_cleanup(conn);
1587 		return;
1588 	}
1589 
1590 	/* Get next ACL packet for connection */
1591 	buf = net_buf_get(&conn->tx_queue, K_NO_WAIT);
1592 	BT_ASSERT(buf);
1593 	if (!send_buf(conn, buf)) {
1594 		net_buf_unref(buf);
1595 	}
1596 }
1597 
bt_conn_exists_le(u8_t id,const bt_addr_le_t * peer)1598 bool bt_conn_exists_le(u8_t id, const bt_addr_le_t *peer)
1599 {
1600 	struct bt_conn *conn = bt_conn_lookup_addr_le(id, peer);
1601 
1602 	if (conn) {
1603 		/* Connection object already exists.
1604 		 * If the connection state is not "disconnected",then the
1605 		 * connection was created but has not yet been disconnected.
1606 		 * If the connection state is "disconnected" then the connection
1607 		 * still has valid references. The last reference of the stack
1608 		 * is released after the disconnected callback.
1609 		 */
1610 		BT_WARN("Found valid connection in %s state",
1611 			state2str(conn->state));
1612 		bt_conn_unref(conn);
1613 		return true;
1614 	}
1615 
1616 	return false;
1617 }
1618 
bt_conn_add_le(u8_t id,const bt_addr_le_t * peer)1619 struct bt_conn *bt_conn_add_le(u8_t id, const bt_addr_le_t *peer)
1620 {
1621 	struct bt_conn *conn = conn_new();
1622 
1623 	if (!conn) {
1624 		return NULL;
1625 	}
1626 
1627 	conn->id = id;
1628 	bt_addr_le_copy(&conn->le.dst, peer);
1629 #if defined(CONFIG_BT_SMP)
1630 	conn->sec_level = BT_SECURITY_L1;
1631 	conn->required_sec_level = BT_SECURITY_L1;
1632 #endif /* CONFIG_BT_SMP */
1633 	conn->type = BT_CONN_TYPE_LE;
1634 	conn->le.interval_min = BT_GAP_INIT_CONN_INT_MIN;
1635 	conn->le.interval_max = BT_GAP_INIT_CONN_INT_MAX;
1636 
1637 	return conn;
1638 }
1639 
process_unack_tx(struct bt_conn * conn)1640 static void process_unack_tx(struct bt_conn *conn)
1641 {
1642 	/* Return any unacknowledged packets */
1643 	while (1) {
1644 		struct bt_conn_tx *tx;
1645 		sys_snode_t *node;
1646 		unsigned int key;
1647 
1648 		key = irq_lock();
1649 
1650 		if (conn->pending_no_cb) {
1651 			conn->pending_no_cb--;
1652 			irq_unlock(key);
1653 			k_sem_give(bt_conn_get_pkts(conn));
1654 			continue;
1655 		}
1656 
1657 		node = sys_slist_get(&conn->tx_pending);
1658 		irq_unlock(key);
1659 
1660 		if (!node) {
1661 			break;
1662 		}
1663 
1664 		tx = CONTAINER_OF(node, struct bt_conn_tx, node);
1665 
1666 		key = irq_lock();
1667 		conn->pending_no_cb = tx->pending_no_cb;
1668 		tx->pending_no_cb = 0U;
1669 		irq_unlock(key);
1670 
1671 		tx_free(tx);
1672 
1673 		k_sem_give(bt_conn_get_pkts(conn));
1674 	}
1675 }
1676 
bt_conn_set_state(struct bt_conn * conn,bt_conn_state_t state)1677 void bt_conn_set_state(struct bt_conn *conn, bt_conn_state_t state)
1678 {
1679 	bt_conn_state_t old_state;
1680 
1681 	BT_DBG("%s -> %s", state2str(conn->state), state2str(state));
1682 
1683 	if (conn->state == state) {
1684 		BT_WARN("no transition %s", state2str(state));
1685 		return;
1686 	}
1687 
1688 	old_state = conn->state;
1689 	conn->state = state;
1690 
1691 	/* Actions needed for exiting the old state */
1692 	switch (old_state) {
1693 	case BT_CONN_DISCONNECTED:
1694 		/* Take a reference for the first state transition after
1695 		 * bt_conn_add_le() and keep it until reaching DISCONNECTED
1696 		 * again.
1697 		 */
1698 		bt_conn_ref(conn);
1699 		break;
1700 	case BT_CONN_CONNECT:
1701 		if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1702 		    conn->type == BT_CONN_TYPE_LE) {
1703 			BT_DBG("k_delayed_work_cancel(&conn->update_work)");
1704 			k_delayed_work_cancel(&conn->update_work);
1705 		}
1706 		break;
1707 	default:
1708 		break;
1709 	}
1710 
1711 	/* Actions needed for entering the new state */
1712 	switch (conn->state) {
1713 	case BT_CONN_CONNECTED:
1714 		if (conn->type == BT_CONN_TYPE_SCO) {
1715 			/* TODO: Notify sco connected */
1716 			break;
1717 		}
1718 
1719 		bt_dev.le.mtu = bt_dev.le.mtu_init;
1720 		k_fifo_init(&conn->tx_queue);
1721 		k_poll_signal_raise(&conn_change, 0);
1722 
1723 		sys_slist_init(&conn->channels);
1724 
1725 		bt_l2cap_connected(conn);
1726 		notify_connected(conn);
1727 		break;
1728 	case BT_CONN_DISCONNECTED:
1729 		if (conn->type == BT_CONN_TYPE_SCO) {
1730 			/* TODO: Notify sco disconnected */
1731 			bt_conn_unref(conn);
1732 			break;
1733 		}
1734 
1735 		/* Notify disconnection and queue a dummy buffer to wake
1736 		 * up and stop the tx thread for states where it was
1737 		 * running.
1738 		 */
1739 		switch (old_state) {
1740 		case BT_CONN_CONNECTED:
1741 		case BT_CONN_DISCONNECT:
1742 			process_unack_tx(conn);
1743 			tx_notify(conn);
1744             bt_conn_del(conn);
1745             bt_l2cap_disconnected(conn);
1746             notify_disconnected(conn);
1747 
1748 			/* Cancel Connection Update if it is pending */
1749 			if (conn->type == BT_CONN_TYPE_LE) {
1750 				BT_DBG("k_delayed_work_cancel(&conn->update_work)");
1751 				k_delayed_work_cancel(&conn->update_work);
1752 			}
1753 
1754 			atomic_set_bit(conn->flags, BT_CONN_CLEANUP);
1755 			k_poll_signal_raise(&conn_change, 0);
1756 
1757 			/* The last ref will be dropped during cleanup */
1758 			break;
1759 		case BT_CONN_CONNECT:
1760 			/* LE Create Connection command failed. This might be
1761 			 * directly from the API, don't notify application in
1762 			 * this case.
1763 			 */
1764 			if (conn->err) {
1765 				notify_connected(conn);
1766 			}
1767 
1768 			bt_conn_unref(conn);
1769 			break;
1770 		case BT_CONN_CONNECT_SCAN:
1771 			/* this indicate LE Create Connection with peer address
1772 			 * has been stopped. This could either be triggered by
1773 			 * the application through bt_conn_disconnect or by
1774 			 * timeout set by bt_conn_le_create_param.timeout.
1775 			 */
1776 			if (conn->err) {
1777 				notify_connected(conn);
1778 			}
1779 
1780 			bt_conn_unref(conn);
1781 			break;
1782 		case BT_CONN_CONNECT_DIR_ADV:
1783 			/* this indicate Directed advertising stopped */
1784 			if (conn->err) {
1785 				notify_connected(conn);
1786 			}
1787 
1788 			bt_conn_unref(conn);
1789 			break;
1790 		case BT_CONN_CONNECT_AUTO:
1791 			/* this indicates LE Create Connection with filter
1792 			 * policy has been stopped. This can only be triggered
1793 			 * by the application, so don't notify.
1794 			 */
1795 			bt_conn_unref(conn);
1796 			break;
1797 		case BT_CONN_CONNECT_ADV:
1798 			/* This can only happen when application stops the
1799 			 * advertiser, conn->err is never set in this case.
1800 			 */
1801 			bt_conn_unref(conn);
1802 			break;
1803 		case BT_CONN_DISCONNECTED:
1804 			/* Cannot happen, no transition. */
1805 			break;
1806 		}
1807 		break;
1808 	case BT_CONN_CONNECT_AUTO:
1809 		break;
1810 	case BT_CONN_CONNECT_ADV:
1811 		break;
1812 	case BT_CONN_CONNECT_SCAN:
1813 		break;
1814 	case BT_CONN_CONNECT_DIR_ADV:
1815 		break;
1816 	case BT_CONN_CONNECT:
1817 		if (conn->type == BT_CONN_TYPE_SCO) {
1818 			break;
1819 		}
1820 		/*
1821 		 * Timer is needed only for LE. For other link types controller
1822 		 * will handle connection timeout.
1823 		 */
1824 		if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1825 		    conn->type == BT_CONN_TYPE_LE) {
1826 			k_delayed_work_submit(&conn->update_work,
1827 				K_MSEC(10 * bt_dev.create_param.timeout));
1828 		}
1829 
1830 		break;
1831 	case BT_CONN_DISCONNECT:
1832 		break;
1833 	default:
1834 		BT_WARN("no valid (%u) state was set", state);
1835 
1836 		break;
1837 	}
1838 }
1839 
bt_conn_lookup_handle(u16_t handle)1840 struct bt_conn *bt_conn_lookup_handle(u16_t handle)
1841 {
1842 	int i;
1843 
1844 	for (i = 0; i < ARRAY_SIZE(conns); i++) {
1845 		if (!atomic_get(&conns[i].ref)) {
1846 			continue;
1847 		}
1848 
1849 		/* We only care about connections with a valid handle */
1850 		if (conns[i].state != BT_CONN_CONNECTED &&
1851 		    conns[i].state != BT_CONN_DISCONNECT) {
1852 			continue;
1853 		}
1854 
1855 		if (conns[i].handle == handle) {
1856 			return bt_conn_ref(&conns[i]);
1857 		}
1858 	}
1859 
1860 #if defined(CONFIG_BT_BREDR)
1861 	for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
1862 		if (!atomic_get(&sco_conns[i].ref)) {
1863 			continue;
1864 		}
1865 
1866 		/* We only care about connections with a valid handle */
1867 		if (sco_conns[i].state != BT_CONN_CONNECTED &&
1868 		    sco_conns[i].state != BT_CONN_DISCONNECT) {
1869 			continue;
1870 		}
1871 
1872 		if (sco_conns[i].handle == handle) {
1873 			return bt_conn_ref(&sco_conns[i]);
1874 		}
1875 	}
1876 #endif
1877 
1878 	return NULL;
1879 }
1880 
bt_conn_is_peer_addr_le(const struct bt_conn * conn,u8_t id,const bt_addr_le_t * peer)1881 bool bt_conn_is_peer_addr_le(const struct bt_conn *conn, u8_t id,
1882 			     const bt_addr_le_t *peer)
1883 {
1884 	if (id != conn->id) {
1885 		return false;
1886 	}
1887 
1888 	/* Check against conn dst address as it may be the identity address */
1889 	if (!bt_addr_le_cmp(peer, &conn->le.dst)) {
1890 		return true;
1891 	}
1892 
1893 	/* Check against initial connection address */
1894 	if (conn->role == BT_HCI_ROLE_MASTER) {
1895 		return bt_addr_le_cmp(peer, &conn->le.resp_addr) == 0;
1896 	}
1897 
1898 	return bt_addr_le_cmp(peer, &conn->le.init_addr) == 0;
1899 }
1900 
bt_conn_lookup_addr_le(u8_t id,const bt_addr_le_t * peer)1901 struct bt_conn *bt_conn_lookup_addr_le(u8_t id, const bt_addr_le_t *peer)
1902 {
1903 	int i;
1904 
1905 	for (i = 0; i < ARRAY_SIZE(conns); i++) {
1906 		if (!atomic_get(&conns[i].ref)) {
1907 			continue;
1908 		}
1909 
1910 		if (conns[i].type != BT_CONN_TYPE_LE) {
1911 			continue;
1912 		}
1913 
1914 		if (bt_conn_is_peer_addr_le(&conns[i], id, peer)) {
1915 			return bt_conn_ref(&conns[i]);
1916 		}
1917 	}
1918 
1919 	return NULL;
1920 }
1921 
bt_conn_lookup_state_le(u8_t id,const bt_addr_le_t * peer,const bt_conn_state_t state)1922 struct bt_conn *bt_conn_lookup_state_le(u8_t id, const bt_addr_le_t *peer,
1923 					const bt_conn_state_t state)
1924 {
1925 	int i;
1926 
1927 	for (i = 0; i < ARRAY_SIZE(conns); i++) {
1928 		if (!atomic_get(&conns[i].ref)) {
1929 			continue;
1930 		}
1931 
1932 		if (conns[i].type != BT_CONN_TYPE_LE) {
1933 			continue;
1934 		}
1935 
1936 		if (peer && !bt_conn_is_peer_addr_le(&conns[i], id, peer)) {
1937 			continue;
1938 		}
1939 
1940 		if (conns[i].state == state && conns[i].id == id) {
1941 			return bt_conn_ref(&conns[i]);
1942 		}
1943 	}
1944 
1945 	return NULL;
1946 }
1947 
bt_conn_foreach(int type,void (* func)(struct bt_conn * conn,void * data),void * data)1948 void bt_conn_foreach(int type, void (*func)(struct bt_conn *conn, void *data),
1949 		     void *data)
1950 {
1951 	int i;
1952 
1953 	for (i = 0; i < ARRAY_SIZE(conns); i++) {
1954 		if (!atomic_get(&conns[i].ref)) {
1955 			continue;
1956 		}
1957 
1958 		if (!(conns[i].type & type)) {
1959 			continue;
1960 		}
1961 
1962 		func(&conns[i], data);
1963 	}
1964 #if defined(CONFIG_BT_BREDR)
1965 	if (type & BT_CONN_TYPE_SCO) {
1966 		for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
1967 			if (!atomic_get(&sco_conns[i].ref)) {
1968 				continue;
1969 			}
1970 
1971 			func(&sco_conns[i], data);
1972 		}
1973 	}
1974 #endif /* defined(CONFIG_BT_BREDR) */
1975 }
1976 
bt_conn_ref(struct bt_conn * conn)1977 struct bt_conn *bt_conn_ref(struct bt_conn *conn)
1978 {
1979 	atomic_val_t old = atomic_inc(&conn->ref);
1980 
1981 	BT_DBG("%s: handle %u ref %u -> %u", __func__, conn->handle, old,
1982 	       atomic_get(&conn->ref));
1983 	(void)old;
1984 
1985 	return conn;
1986 }
1987 
bt_conn_unref(struct bt_conn * conn)1988 void bt_conn_unref(struct bt_conn *conn)
1989 {
1990     if (atomic_get(&conn->ref) == 0)
1991         return;
1992 
1993 	atomic_val_t old = atomic_dec(&conn->ref);
1994 	(void)old;
1995 
1996 	BT_DBG("%s: handle %u ref %u -> %u", __func__, conn->handle, old,
1997 	       atomic_get(&conn->ref));
1998 }
1999 
bt_conn_del(struct bt_conn * conn)2000 void bt_conn_del(struct bt_conn *conn)
2001 {
2002 	atomic_val_t old = atomic_set(&conn->ref, 0);
2003 	(void)old;
2004 
2005 	BT_DBG("%s: handle %u ref %u -> %u", __func__, conn->handle, old,
2006 	       atomic_get(&conn->ref));
2007 }
2008 
bt_conn_get_dst(const struct bt_conn * conn)2009 const bt_addr_le_t *bt_conn_get_dst(const struct bt_conn *conn)
2010 {
2011 	return &conn->le.dst;
2012 }
2013 
bt_conn_get_info(const struct bt_conn * conn,struct bt_conn_info * info)2014 int bt_conn_get_info(const struct bt_conn *conn, struct bt_conn_info *info)
2015 {
2016 	info->type = conn->type;
2017 	info->role = conn->role;
2018 	info->id = conn->id;
2019 
2020 	switch (conn->type) {
2021 	case BT_CONN_TYPE_LE:
2022 		info->le.dst = &conn->le.dst;
2023 		info->le.src = &bt_dev.id_addr[conn->id];
2024 		if (conn->role == BT_HCI_ROLE_MASTER) {
2025 			info->le.local = &conn->le.init_addr;
2026 			info->le.remote = &conn->le.resp_addr;
2027 		} else {
2028 			info->le.local = &conn->le.resp_addr;
2029 			info->le.remote = &conn->le.init_addr;
2030 		}
2031 		info->le.interval = conn->le.interval;
2032 		info->le.latency = conn->le.latency;
2033 		info->le.timeout = conn->le.timeout;
2034 #if defined(CONFIG_BT_USER_PHY_UPDATE)
2035 		info->le.phy = &conn->le.phy;
2036 #endif
2037 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
2038 		info->le.data_len = &conn->le.data_len;
2039 #endif
2040 		return 0;
2041 #if defined(CONFIG_BT_BREDR)
2042 	case BT_CONN_TYPE_BR:
2043 		info->br.dst = &conn->br.dst;
2044 		return 0;
2045 #endif
2046 	}
2047 
2048 	return -EINVAL;
2049 }
2050 
bt_conn_get_remote_info(struct bt_conn * conn,struct bt_conn_remote_info * remote_info)2051 int bt_conn_get_remote_info(struct bt_conn *conn,
2052 			    struct bt_conn_remote_info *remote_info)
2053 {
2054 	if (!atomic_test_bit(conn->flags, BT_CONN_AUTO_FEATURE_EXCH) ||
2055 	    (IS_ENABLED(CONFIG_BT_REMOTE_VERSION) &&
2056 	     !atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO))) {
2057 		return -EBUSY;
2058 	}
2059 
2060 	remote_info->type = conn->type;
2061 #if defined(CONFIG_BT_REMOTE_VERSION)
2062 	/* The conn->rv values will be just zeroes if the operation failed */
2063 	remote_info->version = conn->rv.version;
2064 	remote_info->manufacturer = conn->rv.manufacturer;
2065 	remote_info->subversion = conn->rv.subversion;
2066 #else
2067 	remote_info->version = 0;
2068 	remote_info->manufacturer = 0;
2069 	remote_info->subversion = 0;
2070 #endif
2071 
2072 	switch (conn->type) {
2073 	case BT_CONN_TYPE_LE:
2074 		remote_info->le.features = conn->le.features;
2075 		return 0;
2076 #if defined(CONFIG_BT_BREDR)
2077 	case BT_CONN_TYPE_BR:
2078 		/* TODO: Make sure the HCI commands to read br features and
2079 		*  extended features has finished. */
2080 		return -ENOTSUP;
2081 #endif
2082 	default:
2083 		return -EINVAL;
2084 	}
2085 }
2086 
conn_disconnect(struct bt_conn * conn,u8_t reason)2087 static int conn_disconnect(struct bt_conn *conn, u8_t reason)
2088 {
2089 	int err;
2090 
2091 	err = bt_hci_disconnect(conn->handle, reason);
2092 	if (err) {
2093 		return err;
2094 	}
2095 
2096 	bt_conn_set_state(conn, BT_CONN_DISCONNECT);
2097 
2098 	return 0;
2099 }
2100 
bt_conn_le_param_update(struct bt_conn * conn,const struct bt_le_conn_param * param)2101 int bt_conn_le_param_update(struct bt_conn *conn,
2102 			    const struct bt_le_conn_param *param)
2103 {
2104 	BT_DBG("conn %p features 0x%02x params (%d-%d %d %d)", conn,
2105 	       conn->le.features[0], param->interval_min,
2106 	       param->interval_max, param->latency, param->timeout);
2107 
2108 	/* Check if there's a need to update conn params */
2109 	if (conn->le.interval >= param->interval_min &&
2110 	    conn->le.interval <= param->interval_max &&
2111 	    conn->le.latency == param->latency &&
2112 	    conn->le.timeout == param->timeout) {
2113 		atomic_clear_bit(conn->flags, BT_CONN_SLAVE_PARAM_SET);
2114 		return -EALREADY;
2115 	}
2116 
2117 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
2118 	    conn->role == BT_CONN_ROLE_MASTER) {
2119 		return send_conn_le_param_update(conn, param);
2120 	}
2121 
2122 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
2123 		/* if slave conn param update timer expired just send request */
2124 		if (atomic_test_bit(conn->flags, BT_CONN_SLAVE_PARAM_UPDATE)) {
2125 			return send_conn_le_param_update(conn, param);
2126 		}
2127 
2128 		/* store new conn params to be used by update timer */
2129 		conn->le.interval_min = param->interval_min;
2130 		conn->le.interval_max = param->interval_max;
2131 		conn->le.pending_latency = param->latency;
2132 		conn->le.pending_timeout = param->timeout;
2133 		atomic_set_bit(conn->flags, BT_CONN_SLAVE_PARAM_SET);
2134 	}
2135 
2136 	return 0;
2137 }
2138 
2139 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
bt_conn_le_data_len_update(struct bt_conn * conn,const struct bt_conn_le_data_len_param * param)2140 int bt_conn_le_data_len_update(struct bt_conn *conn,
2141 			       const struct bt_conn_le_data_len_param *param)
2142 {
2143 	if (conn->le.data_len.tx_max_len == param->tx_max_len &&
2144 	    conn->le.data_len.tx_max_time == param->tx_max_time) {
2145 		return -EALREADY;
2146 	}
2147 
2148 	if (IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) &&
2149 	    !atomic_test_bit(conn->flags, BT_CONN_AUTO_DATA_LEN_COMPLETE)) {
2150 		return -EAGAIN;
2151 	}
2152 
2153 	return bt_le_set_data_len(conn, param->tx_max_len, param->tx_max_time);
2154 }
2155 #endif
2156 
2157 #if defined(CONFIG_BT_USER_PHY_UPDATE)
bt_conn_le_phy_update(struct bt_conn * conn,const struct bt_conn_le_phy_param * param)2158 int bt_conn_le_phy_update(struct bt_conn *conn,
2159 			  const struct bt_conn_le_phy_param *param)
2160 {
2161 	if (conn->le.phy.tx_phy == param->pref_tx_phy &&
2162 	    conn->le.phy.rx_phy == param->pref_rx_phy) {
2163 		return -EALREADY;
2164 	}
2165 
2166 	if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) &&
2167 	    !atomic_test_bit(conn->flags, BT_CONN_AUTO_PHY_COMPLETE)) {
2168 		return -EAGAIN;
2169 	}
2170 
2171 	return bt_le_set_phy(conn, param->pref_tx_phy, param->pref_rx_phy);
2172 }
2173 #endif
2174 
bt_conn_disconnect(struct bt_conn * conn,u8_t reason)2175 int bt_conn_disconnect(struct bt_conn *conn, u8_t reason)
2176 {
2177 	/* Disconnection is initiated by us, so auto connection shall
2178 	 * be disabled. Otherwise the passive scan would be enabled
2179 	 * and we could send LE Create Connection as soon as the remote
2180 	 * starts advertising.
2181 	 */
2182 #if !defined(CONFIG_BT_WHITELIST)
2183 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
2184 	    conn->type == BT_CONN_TYPE_LE) {
2185 		bt_le_set_auto_conn(&conn->le.dst, NULL);
2186 	}
2187 #endif /* !defined(CONFIG_BT_WHITELIST) */
2188 
2189 	switch (conn->state) {
2190 	case BT_CONN_CONNECT_SCAN:
2191 		conn->err = reason;
2192 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
2193 		if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
2194 			bt_le_scan_update(false);
2195 		}
2196 		return 0;
2197 	case BT_CONN_CONNECT_DIR_ADV:
2198 		BT_WARN("Deprecated: Use bt_le_adv_stop instead");
2199 		conn->err = reason;
2200 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
2201 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
2202 			/* User should unref connection object when receiving
2203 			 * error in connection callback.
2204 			 */
2205 			return bt_le_adv_stop();
2206 		}
2207 		return 0;
2208 	case BT_CONN_CONNECT:
2209 #if defined(CONFIG_BT_BREDR)
2210 		if (conn->type == BT_CONN_TYPE_BR) {
2211 			return bt_hci_connect_br_cancel(conn);
2212 		}
2213 #endif /* CONFIG_BT_BREDR */
2214 
2215 		if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
2216 			k_delayed_work_cancel(&conn->update_work);
2217 			return bt_le_create_conn_cancel();
2218 		}
2219 
2220 		return 0;
2221 	case BT_CONN_CONNECTED:
2222 		return conn_disconnect(conn, reason);
2223 	case BT_CONN_DISCONNECT:
2224 		return 0;
2225 	case BT_CONN_DISCONNECTED:
2226 	default:
2227 		return -ENOTCONN;
2228 	}
2229 }
2230 
2231 #if defined(CONFIG_BT_CENTRAL)
bt_conn_set_param_le(struct bt_conn * conn,const struct bt_le_conn_param * param)2232 static void bt_conn_set_param_le(struct bt_conn *conn,
2233 				 const struct bt_le_conn_param *param)
2234 {
2235 	conn->le.interval_min = param->interval_min;
2236 	conn->le.interval_max = param->interval_max;
2237 	conn->le.latency = param->latency;
2238 	conn->le.timeout = param->timeout;
2239 }
2240 
create_param_validate(const struct bt_conn_le_create_param * param)2241 static bool create_param_validate(const struct bt_conn_le_create_param *param)
2242 {
2243 #if defined(CONFIG_BT_PRIVACY)
2244 	/* Initiation timeout cannot be greater than the RPA timeout */
2245 	const bt_u32_t timeout_max = (MSEC_PER_SEC / 10) * CONFIG_BT_RPA_TIMEOUT;
2246 
2247 	if (param->timeout > timeout_max) {
2248 		return false;
2249 	}
2250 #endif
2251 
2252 	return true;
2253 }
2254 
create_param_setup(const struct bt_conn_le_create_param * param)2255 static void create_param_setup(const struct bt_conn_le_create_param *param)
2256 {
2257 	bt_dev.create_param = *param;
2258 
2259 	bt_dev.create_param.timeout =
2260 		(bt_dev.create_param.timeout != 0) ?
2261 		bt_dev.create_param.timeout :
2262 		(MSEC_PER_SEC / 10) * CONFIG_BT_CREATE_CONN_TIMEOUT;
2263 
2264 	bt_dev.create_param.interval_coded =
2265 		(bt_dev.create_param.interval_coded != 0) ?
2266 		bt_dev.create_param.interval_coded :
2267 		bt_dev.create_param.interval;
2268 
2269 	bt_dev.create_param.window_coded =
2270 		(bt_dev.create_param.window_coded != 0) ?
2271 		bt_dev.create_param.window_coded :
2272 		bt_dev.create_param.window;
2273 }
2274 
2275 #if defined(CONFIG_BT_WHITELIST)
bt_conn_le_create_auto(const struct bt_conn_le_create_param * create_param,const struct bt_le_conn_param * param)2276 int bt_conn_le_create_auto(const struct bt_conn_le_create_param *create_param,
2277 			   const struct bt_le_conn_param *param)
2278 {
2279 	struct bt_conn *conn;
2280 	int err;
2281 
2282 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2283 		return -EAGAIN;
2284 	}
2285 
2286 	if (!bt_le_conn_params_valid(param)) {
2287 		return -EINVAL;
2288 	}
2289 
2290 	conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE,
2291 				       BT_CONN_CONNECT_AUTO);
2292 	if (conn) {
2293 		bt_conn_unref(conn);
2294 		return -EALREADY;
2295 	}
2296 
2297 	/* Scanning either to connect or explicit scan, either case scanner was
2298 	 * started by application and should not be stopped.
2299 	 */
2300 	if (atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING)) {
2301 		return -EINVAL;
2302 	}
2303 
2304 	if (atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
2305 		return -EINVAL;
2306 	}
2307 
2308 	if (!bt_le_scan_random_addr_check()) {
2309 		return -EINVAL;
2310 	}
2311 
2312 	conn = bt_conn_add_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE);
2313 	if (!conn) {
2314 		return -ENOMEM;
2315 	}
2316 
2317 	bt_conn_set_param_le(conn, param);
2318 	create_param_setup(create_param);
2319 
2320 	atomic_set_bit(conn->flags, BT_CONN_AUTO_CONNECT);
2321 	bt_conn_set_state(conn, BT_CONN_CONNECT_AUTO);
2322 
2323 	err = bt_le_create_conn(conn);
2324 	if (err) {
2325 		BT_ERR("Failed to start whitelist scan");
2326 		conn->err = 0;
2327 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
2328 		bt_conn_unref(conn);
2329 		return err;
2330 	}
2331 
2332 	/* Since we don't give the application a reference to manage in
2333 	 * this case, we need to release this reference here.
2334 	 */
2335 	bt_conn_unref(conn);
2336 	return 0;
2337 }
2338 
bt_conn_create_auto_stop(void)2339 int bt_conn_create_auto_stop(void)
2340 {
2341 	struct bt_conn *conn;
2342 	int err;
2343 
2344 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2345 		return -EINVAL;
2346 	}
2347 
2348 	conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE,
2349 				       BT_CONN_CONNECT_AUTO);
2350 	if (!conn) {
2351 		return -EINVAL;
2352 	}
2353 
2354 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
2355 		return -EINVAL;
2356 	}
2357 
2358 	bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
2359 	bt_conn_unref(conn);
2360 
2361 	err = bt_le_create_conn_cancel();
2362 	if (err) {
2363 		BT_ERR("Failed to stop initiator");
2364 		return err;
2365 	}
2366 
2367 	return 0;
2368 }
2369 #endif /* defined(CONFIG_BT_WHITELIST) */
2370 
bt_conn_le_create(const bt_addr_le_t * peer,const struct bt_conn_le_create_param * create_param,const struct bt_le_conn_param * conn_param,struct bt_conn ** ret_conn)2371 int bt_conn_le_create(const bt_addr_le_t *peer,
2372 		      const struct bt_conn_le_create_param *create_param,
2373 		      const struct bt_le_conn_param *conn_param,
2374 		      struct bt_conn **ret_conn)
2375 {
2376 	struct bt_conn *conn;
2377 	bt_addr_le_t dst;
2378 	int err;
2379 
2380 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2381 		return -EAGAIN;
2382 	}
2383 
2384 	if (!bt_le_conn_params_valid(conn_param)) {
2385 		return -EINVAL;
2386 	}
2387 
2388 	if (!create_param_validate(create_param)) {
2389 		return -EINVAL;
2390 	}
2391 
2392 	if (atomic_test_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN)) {
2393 		return -EINVAL;
2394 	}
2395 
2396 	if (atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
2397 		return -EALREADY;
2398 	}
2399 
2400 	if (!bt_le_scan_random_addr_check()) {
2401 		return -EINVAL;
2402 	}
2403 
2404 	if (bt_conn_exists_le(BT_ID_DEFAULT, peer)) {
2405 		return -EINVAL;
2406 	}
2407 
2408 	if (peer->type == BT_ADDR_LE_PUBLIC_ID ||
2409 	    peer->type == BT_ADDR_LE_RANDOM_ID) {
2410 		bt_addr_le_copy(&dst, peer);
2411 		dst.type -= BT_ADDR_LE_PUBLIC_ID;
2412 	} else {
2413 		bt_addr_le_copy(&dst, bt_lookup_id_addr(BT_ID_DEFAULT, peer));
2414 	}
2415 
2416 	/* Only default identity supported for now */
2417 	conn = bt_conn_add_le(BT_ID_DEFAULT, &dst);
2418 	if (!conn) {
2419 		return -ENOMEM;
2420 	}
2421 
2422 	bt_conn_set_param_le(conn, conn_param);
2423 	create_param_setup(create_param);
2424 
2425 #if defined(CONFIG_BT_SMP)
2426 	if (!bt_dev.le.rl_size || bt_dev.le.rl_entries > bt_dev.le.rl_size) {
2427 		bt_conn_set_state(conn, BT_CONN_CONNECT_SCAN);
2428 
2429 		err = bt_le_scan_update(true);
2430 		if (err) {
2431 			bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
2432 			bt_conn_unref(conn);
2433 
2434 			return err;
2435 		}
2436 
2437 		*ret_conn = conn;
2438 		return 0;
2439 	}
2440 #endif
2441 
2442 	bt_conn_set_state(conn, BT_CONN_CONNECT);
2443 
2444 	err = bt_le_create_conn(conn);
2445 	if (err) {
2446 		conn->err = 0;
2447 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
2448 		bt_conn_unref(conn);
2449 
2450 		bt_le_scan_update(false);
2451 		return err;
2452 	}
2453 
2454 	*ret_conn = conn;
2455 	return 0;
2456 }
2457 
2458 #if !defined(CONFIG_BT_WHITELIST)
bt_le_set_auto_conn(const bt_addr_le_t * addr,const struct bt_le_conn_param * param)2459 int bt_le_set_auto_conn(const bt_addr_le_t *addr,
2460 			const struct bt_le_conn_param *param)
2461 {
2462 	struct bt_conn *conn;
2463 
2464 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2465 		return -EAGAIN;
2466 	}
2467 
2468 	if (param && !bt_le_conn_params_valid(param)) {
2469 		return -EINVAL;
2470 	}
2471 
2472 	if (!bt_le_scan_random_addr_check()) {
2473 		return -EINVAL;
2474 	}
2475 
2476 	/* Only default identity is supported */
2477 	conn = bt_conn_lookup_addr_le(BT_ID_DEFAULT, addr);
2478 	if (!conn) {
2479 		conn = bt_conn_add_le(BT_ID_DEFAULT, addr);
2480 		if (!conn) {
2481 			return -ENOMEM;
2482 		}
2483 	}
2484 
2485 	if (param) {
2486 		bt_conn_set_param_le(conn, param);
2487 
2488 		if (!atomic_test_and_set_bit(conn->flags,
2489 					     BT_CONN_AUTO_CONNECT)) {
2490 			bt_conn_ref(conn);
2491 		}
2492 	} else {
2493 		if (atomic_test_and_clear_bit(conn->flags,
2494 					      BT_CONN_AUTO_CONNECT)) {
2495 			bt_conn_unref(conn);
2496 			if (conn->state == BT_CONN_CONNECT_SCAN) {
2497 				bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
2498 			}
2499 		}
2500 	}
2501 
2502 	if (conn->state == BT_CONN_DISCONNECTED &&
2503 	    atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2504 		if (param) {
2505 			bt_conn_set_state(conn, BT_CONN_CONNECT_SCAN);
2506 		}
2507 		bt_le_scan_update(false);
2508 	}
2509 
2510 	bt_conn_unref(conn);
2511 
2512 	return 0;
2513 }
2514 #endif /* !defined(CONFIG_BT_WHITELIST) */
2515 #endif /* CONFIG_BT_CENTRAL */
2516 
bt_conn_le_conn_update(struct bt_conn * conn,const struct bt_le_conn_param * param)2517 int bt_conn_le_conn_update(struct bt_conn *conn,
2518 			   const struct bt_le_conn_param *param)
2519 {
2520 #if !defined(CONFIG_BT_USE_HCI_API)
2521 	struct hci_cp_le_conn_update *conn_update;
2522 	struct net_buf *buf;
2523 
2524 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_UPDATE,
2525 				sizeof(*conn_update));
2526 	if (!buf) {
2527 		return -ENOBUFS;
2528 	}
2529 
2530 	conn_update = net_buf_add(buf, sizeof(*conn_update));
2531 	(void)memset(conn_update, 0, sizeof(*conn_update));
2532 	conn_update->handle = sys_cpu_to_le16(conn->handle);
2533 	conn_update->conn_interval_min = sys_cpu_to_le16(param->interval_min);
2534 	conn_update->conn_interval_max = sys_cpu_to_le16(param->interval_max);
2535 	conn_update->conn_latency = sys_cpu_to_le16(param->latency);
2536 	conn_update->supervision_timeout = sys_cpu_to_le16(param->timeout);
2537 
2538 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CONN_UPDATE, buf, NULL);
2539 #else
2540 	return hci_api_le_conn_updata(conn->handle,
2541 									param->interval_min,
2542 									param->interval_max,
2543 									param->latency,
2544 									param->timeout, 0, 0);
2545 #endif
2546 }
2547 
2548 #if defined(CONFIG_NET_BUF_LOG)
bt_conn_create_frag_timeout_debug(size_t reserve,k_timeout_t timeout,const char * func,int line)2549 struct net_buf *bt_conn_create_frag_timeout_debug(size_t reserve,
2550 						  k_timeout_t timeout,
2551 						  const char *func, int line)
2552 #else
2553 struct net_buf *bt_conn_create_frag_timeout(size_t reserve, k_timeout_t timeout)
2554 #endif
2555 {
2556 	struct net_buf_pool *pool = NULL;
2557 
2558 #if CONFIG_BT_L2CAP_TX_FRAG_COUNT > 0
2559 	pool = &frag_pool;
2560 #endif
2561 
2562 #if defined(CONFIG_NET_BUF_LOG)
2563 	return bt_conn_create_pdu_timeout_debug(pool, reserve, timeout,
2564 						func, line);
2565 #else
2566 	return bt_conn_create_pdu_timeout(pool, reserve, timeout);
2567 #endif /* CONFIG_NET_BUF_LOG */
2568 }
2569 
2570 #if defined(CONFIG_NET_BUF_LOG)
bt_conn_create_pdu_timeout_debug(struct net_buf_pool * pool,size_t reserve,k_timeout_t timeout,const char * func,int line)2571 struct net_buf *bt_conn_create_pdu_timeout_debug(struct net_buf_pool *pool,
2572 						 size_t reserve,
2573 						 k_timeout_t timeout,
2574 						 const char *func, int line)
2575 #else
2576 struct net_buf *bt_conn_create_pdu_timeout(struct net_buf_pool *pool,
2577 					   size_t reserve, k_timeout_t timeout)
2578 #endif
2579 {
2580 	struct net_buf *buf;
2581 
2582 	/*
2583 	 * PDU must not be allocated from ISR as we block with 'K_FOREVER'
2584 	 * during the allocation
2585 	 */
2586 	__ASSERT_NO_MSG(!k_is_in_isr());
2587 
2588 	if (!pool) {
2589 		pool = &acl_tx_pool;
2590 	}
2591 
2592 	if (IS_ENABLED(CONFIG_BT_DEBUG_CONN)) {
2593 #if defined(CONFIG_NET_BUF_LOG)
2594 		buf = net_buf_alloc_fixed_debug(pool, K_NO_WAIT, func, line);
2595 #else
2596 		buf = net_buf_alloc(pool, K_NO_WAIT);
2597 #endif
2598 		if (!buf) {
2599 			BT_WARN("Unable to allocate buffer with K_NO_WAIT");
2600 #if defined(CONFIG_NET_BUF_LOG)
2601 			buf = net_buf_alloc_fixed_debug(pool, timeout, func,
2602 							line);
2603 #else
2604 			buf = net_buf_alloc(pool, timeout);
2605 #endif
2606 		}
2607 	} else {
2608 #if defined(CONFIG_NET_BUF_LOG)
2609 		buf = net_buf_alloc_fixed_debug(pool, timeout, func,
2610 							line);
2611 #else
2612 		buf = net_buf_alloc(pool, timeout);
2613 #endif
2614 	}
2615 
2616 	if (!buf) {
2617 		BT_WARN("Unable to allocate buffer within timeout");
2618 		return NULL;
2619 	}
2620 
2621 	reserve += sizeof(struct bt_hci_acl_hdr) + BT_BUF_RESERVE;
2622 	net_buf_reserve(buf, reserve);
2623 
2624 	return buf;
2625 }
2626 
2627 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
bt_conn_auth_cb_register(const struct bt_conn_auth_cb * cb)2628 int bt_conn_auth_cb_register(const struct bt_conn_auth_cb *cb)
2629 {
2630 	if (!cb) {
2631 		bt_auth = NULL;
2632 		return 0;
2633 	}
2634 
2635 	if (bt_auth) {
2636 		return -EALREADY;
2637 	}
2638 
2639 	/* The cancel callback must always be provided if the app provides
2640 	 * interactive callbacks.
2641 	 */
2642 	if (!cb->cancel &&
2643 	    (cb->passkey_display || cb->passkey_entry || cb->passkey_confirm ||
2644 #if defined(CONFIG_BT_BREDR)
2645 	     cb->pincode_entry ||
2646 #endif
2647 	     cb->pairing_confirm)) {
2648 		return -EINVAL;
2649 	}
2650 
2651 	bt_auth = cb;
2652 	return 0;
2653 }
2654 
bt_conn_auth_passkey_entry(struct bt_conn * conn,unsigned int passkey)2655 int bt_conn_auth_passkey_entry(struct bt_conn *conn, unsigned int passkey)
2656 {
2657 	if (!bt_auth) {
2658 		return -EINVAL;
2659 	}
2660 
2661 	if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
2662 		bt_smp_auth_passkey_entry(conn, passkey);
2663 		return 0;
2664 	}
2665 
2666 #if defined(CONFIG_BT_BREDR)
2667 	if (conn->type == BT_CONN_TYPE_BR) {
2668 		/* User entered passkey, reset user state. */
2669 		if (!atomic_test_and_clear_bit(conn->flags, BT_CONN_USER)) {
2670 			return -EPERM;
2671 		}
2672 
2673 		if (conn->br.pairing_method == PASSKEY_INPUT) {
2674 			return ssp_passkey_reply(conn, passkey);
2675 		}
2676 	}
2677 #endif /* CONFIG_BT_BREDR */
2678 
2679 	return -EINVAL;
2680 }
2681 
bt_conn_auth_passkey_confirm(struct bt_conn * conn)2682 int bt_conn_auth_passkey_confirm(struct bt_conn *conn)
2683 {
2684 	if (!bt_auth) {
2685 		return -EINVAL;
2686 	}
2687 
2688 	if (IS_ENABLED(CONFIG_BT_SMP) &&
2689 	    conn->type == BT_CONN_TYPE_LE) {
2690 		return bt_smp_auth_passkey_confirm(conn);
2691 	}
2692 
2693 #if defined(CONFIG_BT_BREDR)
2694 	if (conn->type == BT_CONN_TYPE_BR) {
2695 		/* Allow user confirm passkey value, then reset user state. */
2696 		if (!atomic_test_and_clear_bit(conn->flags, BT_CONN_USER)) {
2697 			return -EPERM;
2698 		}
2699 
2700 		return ssp_confirm_reply(conn);
2701 	}
2702 #endif /* CONFIG_BT_BREDR */
2703 
2704 	return -EINVAL;
2705 }
2706 
bt_conn_auth_cancel(struct bt_conn * conn)2707 int bt_conn_auth_cancel(struct bt_conn *conn)
2708 {
2709 	if (!bt_auth) {
2710 		return -EINVAL;
2711 	}
2712 
2713 	if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
2714 		return bt_smp_auth_cancel(conn);
2715 	}
2716 
2717 #if defined(CONFIG_BT_BREDR)
2718 	if (conn->type == BT_CONN_TYPE_BR) {
2719 		/* Allow user cancel authentication, then reset user state. */
2720 		if (!atomic_test_and_clear_bit(conn->flags, BT_CONN_USER)) {
2721 			return -EPERM;
2722 		}
2723 
2724 		switch (conn->br.pairing_method) {
2725 		case JUST_WORKS:
2726 		case PASSKEY_CONFIRM:
2727 			return ssp_confirm_neg_reply(conn);
2728 		case PASSKEY_INPUT:
2729 			return ssp_passkey_neg_reply(conn);
2730 		case PASSKEY_DISPLAY:
2731 			return bt_conn_disconnect(conn,
2732 						  BT_HCI_ERR_AUTH_FAIL);
2733 		case LEGACY:
2734 			return pin_code_neg_reply(&conn->br.dst);
2735 		default:
2736 			break;
2737 		}
2738 	}
2739 #endif /* CONFIG_BT_BREDR */
2740 
2741 	return -EINVAL;
2742 }
2743 
bt_conn_auth_pairing_confirm(struct bt_conn * conn)2744 int bt_conn_auth_pairing_confirm(struct bt_conn *conn)
2745 {
2746 	if (!bt_auth) {
2747 		return -EINVAL;
2748 	}
2749 
2750 	switch (conn->type) {
2751 #if defined(CONFIG_BT_SMP)
2752 	case BT_CONN_TYPE_LE:
2753 		return bt_smp_auth_pairing_confirm(conn);
2754 #endif /* CONFIG_BT_SMP */
2755 #if defined(CONFIG_BT_BREDR)
2756 	case BT_CONN_TYPE_BR:
2757 		return ssp_confirm_reply(conn);
2758 #endif /* CONFIG_BT_BREDR */
2759 	default:
2760 		return -EINVAL;
2761 	}
2762 }
2763 #endif /* CONFIG_BT_SMP || CONFIG_BT_BREDR */
2764 
bt_conn_index(struct bt_conn * conn)2765 u8_t bt_conn_index(struct bt_conn *conn)
2766 {
2767 	u8_t index = conn - conns;
2768 
2769 	__ASSERT(index < CONFIG_BT_MAX_CONN, "Invalid bt_conn pointer");
2770 	return index;
2771 }
2772 
bt_conn_lookup_index(u8_t index)2773 struct bt_conn *bt_conn_lookup_index(u8_t index)
2774 {
2775 	struct bt_conn *conn;
2776 
2777 	if (index >= ARRAY_SIZE(conns)) {
2778 		return NULL;
2779 	}
2780 
2781 	conn = &conns[index];
2782 
2783 	if (!atomic_get(&conn->ref)) {
2784 		return NULL;
2785 	}
2786 
2787 	return bt_conn_ref(conn);
2788 }
2789 
bt_conn_init(void)2790 int bt_conn_init(void)
2791 {
2792 	int err, i;
2793 
2794     k_fifo_init(&free_tx);
2795 
2796 	for (i = 0; i < ARRAY_SIZE(conn_tx); i++) {
2797 		k_fifo_put(&free_tx, &conn_tx[i]);
2798 	}
2799 
2800 	bt_att_init();
2801 
2802 	err = bt_smp_init();
2803 	if (err) {
2804 		return err;
2805 	}
2806 
2807 	bt_l2cap_init();
2808 
2809     NET_BUF_POOL_INIT(acl_tx_pool);
2810     NET_BUF_POOL_INIT(frag_pool);
2811 
2812 	/* Initialize background scan */
2813 	if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
2814 		for (i = 0; i < ARRAY_SIZE(conns); i++) {
2815 			struct bt_conn *conn = &conns[i];
2816 
2817 			if (!atomic_get(&conn->ref)) {
2818 				continue;
2819 			}
2820 
2821 #if !defined(CONFIG_BT_WHITELIST)
2822 			if (atomic_test_bit(conn->flags,
2823 					    BT_CONN_AUTO_CONNECT)) {
2824 				/* Only the default identity is supported */
2825 				conn->id = BT_ID_DEFAULT;
2826 				bt_conn_set_state(conn, BT_CONN_CONNECT_SCAN);
2827 			}
2828 #endif /* !defined(CONFIG_BT_WHITELIST) */
2829 		}
2830 	}
2831 
2832 	return 0;
2833 }
2834