1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <string.h>
7 #include <zephyr/bluetooth/mesh.h>
8 #include <common/bt_str.h>
9 #include "net.h"
10 #include "access.h"
11 #include "transport.h"
12 #include "lpn.h"
13 #include "blob.h"
14
15 #define LOG_LEVEL CONFIG_BT_MESH_MODEL_LOG_LEVEL
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_REGISTER(bt_mesh_blob_srv);
18
19 #define MTU_SIZE_MAX (BT_MESH_RX_SDU_MAX - BT_MESH_MIC_SHORT)
20
21 /* The Receive BLOB Timeout Timer */
22 #define SERVER_TIMEOUT_SECS(srv) (10 * (1 + (srv)->state.timeout_base))
23 /* The initial timer value used by an instance of the Pull BLOB State machine - T_BPI */
24 #define REPORT_TIMER_TIMEOUT K_SECONDS(CONFIG_BT_MESH_BLOB_REPORT_TIMEOUT)
25
26 BUILD_ASSERT(BLOB_BLOCK_SIZE_LOG_MIN <= BLOB_BLOCK_SIZE_LOG_MAX,
27 "The must be at least one number between the min and "
28 "max block size that is the power of two.");
29
30 BUILD_ASSERT((BLOB_XFER_STATUS_MSG_MAXLEN + BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_XFER_STATUS) +
31 BT_MESH_MIC_SHORT) <= BT_MESH_TX_SDU_MAX,
32 "The BLOB Transfer Status message does not fit into the maximum outgoing SDU size.");
33
34 BUILD_ASSERT((BLOB_BLOCK_REPORT_STATUS_MSG_MAXLEN +
35 BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_BLOCK_REPORT) + BT_MESH_MIC_SHORT)
36 <= BT_MESH_TX_SDU_MAX,
37 "The BLOB Partial Block Report message does not fit into the maximum outgoing SDU "
38 "size.");
39
40 BUILD_ASSERT((BLOB_BLOCK_STATUS_MSG_MAXLEN + BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_BLOCK_STATUS) +
41 BT_MESH_MIC_SHORT) <= BT_MESH_TX_SDU_MAX,
42 "The BLOB Block Status message does not fit into the maximum outgoing SDU size.");
43
44 static void cancel(struct bt_mesh_blob_srv *srv);
45 static void suspend(struct bt_mesh_blob_srv *srv);
46
block_count_get(const struct bt_mesh_blob_srv * srv)47 static inline uint32_t block_count_get(const struct bt_mesh_blob_srv *srv)
48 {
49 return DIV_ROUND_UP(srv->state.xfer.size,
50 (1U << srv->state.xfer.block_size_log));
51 }
52
max_chunk_size(const struct bt_mesh_blob_srv * srv)53 static inline uint32_t max_chunk_size(const struct bt_mesh_blob_srv *srv)
54 {
55 return MIN((srv->state.mtu_size - 2 - BT_MESH_MODEL_OP_LEN(BT_MESH_BLOB_OP_CHUNK)),
56 BLOB_RX_CHUNK_SIZE);
57 }
58
max_chunk_count(const struct bt_mesh_blob_srv * srv)59 static inline uint32_t max_chunk_count(const struct bt_mesh_blob_srv *srv)
60 {
61 return MIN(8 * (srv->state.mtu_size - 6),
62 CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX);
63 }
64
missing_chunks(const struct bt_mesh_blob_block * block)65 static inline uint32_t missing_chunks(const struct bt_mesh_blob_block *block)
66 {
67 int i;
68 uint32_t count = 0;
69
70 for (i = 0; i < ARRAY_SIZE(block->missing); ++i) {
71 count += POPCOUNT(block->missing[i]);
72 }
73
74 return count;
75 }
76
store_state(const struct bt_mesh_blob_srv * srv)77 static void store_state(const struct bt_mesh_blob_srv *srv)
78 {
79 if (!IS_ENABLED(CONFIG_BT_SETTINGS)) {
80 return;
81 }
82
83 /* Convert bit count to byte count: */
84 uint32_t block_len = DIV_ROUND_UP(block_count_get(srv), 8);
85
86 bt_mesh_model_data_store(
87 srv->mod, false, NULL, &srv->state,
88 offsetof(struct bt_mesh_blob_srv_state, blocks) + block_len);
89 }
90
erase_state(struct bt_mesh_blob_srv * srv)91 static void erase_state(struct bt_mesh_blob_srv *srv)
92 {
93 if (!IS_ENABLED(CONFIG_BT_SETTINGS)) {
94 return;
95 }
96
97 bt_mesh_model_data_store(srv->mod, false, NULL, NULL, 0);
98 }
99
io_open(struct bt_mesh_blob_srv * srv)100 static int io_open(struct bt_mesh_blob_srv *srv)
101 {
102 if (!srv->io->open) {
103 return 0;
104 }
105
106 return srv->io->open(srv->io, &srv->state.xfer, BT_MESH_BLOB_WRITE);
107 }
108
io_close(struct bt_mesh_blob_srv * srv)109 static void io_close(struct bt_mesh_blob_srv *srv)
110 {
111 if (!srv->io->close) {
112 return;
113 }
114
115 srv->io->close(srv->io, &srv->state.xfer);
116 }
117
reset_timer(struct bt_mesh_blob_srv * srv)118 static void reset_timer(struct bt_mesh_blob_srv *srv)
119 {
120 uint32_t timeout_secs =
121 srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL ?
122 MAX(SERVER_TIMEOUT_SECS(srv),
123 CONFIG_BT_MESH_BLOB_REPORT_TIMEOUT + 1) :
124 SERVER_TIMEOUT_SECS(srv);
125 k_work_reschedule(&srv->rx_timeout, K_SECONDS(timeout_secs));
126 }
127
buf_chunk_index_add(struct net_buf_simple * buf,uint16_t chunk)128 static void buf_chunk_index_add(struct net_buf_simple *buf, uint16_t chunk)
129 {
130 /* utf-8 encoded: */
131 if (chunk < 0x80) {
132 net_buf_simple_add_u8(buf, chunk);
133 } else if (chunk < 0x8000) {
134 net_buf_simple_add_u8(buf, 0xc0 | chunk >> 6);
135 net_buf_simple_add_u8(buf, 0x80 | (chunk & BIT_MASK(6)));
136 } else {
137 net_buf_simple_add_u8(buf, 0xe0 | chunk >> 12);
138 net_buf_simple_add_u8(buf, 0x80 | ((chunk >> 6) & BIT_MASK(6)));
139 net_buf_simple_add_u8(buf, 0x80 | (chunk & BIT_MASK(6)));
140 }
141 }
142
pull_req_max(const struct bt_mesh_blob_srv * srv)143 static int pull_req_max(const struct bt_mesh_blob_srv *srv)
144 {
145 int count = CONFIG_BT_MESH_BLOB_SRV_PULL_REQ_COUNT;
146
147 #if defined(CONFIG_BT_MESH_LOW_POWER)
148 /* No point in requesting more than the friend node can hold: */
149 if (bt_mesh_lpn_established()) {
150 uint32_t segments_per_chunk = DIV_ROUND_UP(
151 BLOB_CHUNK_SDU_LEN(srv->state.xfer.chunk_size),
152 BT_MESH_APP_SEG_SDU_MAX);
153
154 /* It is possible that the friend node cannot hold all segments per chunk. In this
155 * case, we should request at least 1 chunk. As requesting `0` would be invalid.
156 */
157 count = MAX(1, MIN(CONFIG_BT_MESH_BLOB_SRV_PULL_REQ_COUNT,
158 bt_mesh.lpn.queue_size / segments_per_chunk));
159
160 }
161 #endif
162
163 return MIN(count, missing_chunks(&srv->block));
164 }
165
report_sent(int err,void * cb_data)166 static void report_sent(int err, void *cb_data)
167 {
168 struct bt_mesh_blob_srv *srv = cb_data;
169
170 LOG_DBG("");
171
172 if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) && bt_mesh_lpn_established()) {
173 bt_mesh_lpn_poll();
174 }
175
176 if (k_work_delayable_is_pending(&srv->rx_timeout)) {
177 k_work_reschedule(&srv->pull.report, REPORT_TIMER_TIMEOUT);
178 }
179 }
180
block_report(struct bt_mesh_blob_srv * srv)181 static void block_report(struct bt_mesh_blob_srv *srv)
182 {
183 static const struct bt_mesh_send_cb report_cb = { .end = report_sent };
184 struct bt_mesh_msg_ctx ctx = {
185 .app_idx = srv->state.app_idx,
186 .send_ttl = srv->state.ttl,
187 .addr = srv->state.cli,
188 };
189 int count;
190 int i;
191
192 LOG_DBG("rx BLOB Timeout Timer: %i", k_work_delayable_is_pending(&srv->rx_timeout));
193
194 BT_MESH_MODEL_BUF_DEFINE(buf, BT_MESH_BLOB_OP_BLOCK_REPORT,
195 BLOB_BLOCK_REPORT_STATUS_MSG_MAXLEN);
196 bt_mesh_model_msg_init(&buf, BT_MESH_BLOB_OP_BLOCK_REPORT);
197
198 count = pull_req_max(srv);
199
200 for (i = 0; i < srv->block.chunk_count && count; ++i) {
201 if (blob_chunk_missing_get(srv->block.missing, i)) {
202 buf_chunk_index_add(&buf, i);
203 count--;
204 }
205 }
206
207 (void)bt_mesh_model_send(srv->mod, &ctx, &buf, &report_cb, srv);
208 }
209
phase_set(struct bt_mesh_blob_srv * srv,enum bt_mesh_blob_xfer_phase phase)210 static void phase_set(struct bt_mesh_blob_srv *srv,
211 enum bt_mesh_blob_xfer_phase phase)
212 {
213 srv->phase = phase;
214 LOG_DBG("Phase: %u", phase);
215 }
216
cancel(struct bt_mesh_blob_srv * srv)217 static void cancel(struct bt_mesh_blob_srv *srv)
218 {
219 /* TODO: Could this state be preserved instead somehow? Wiping the
220 * entire transfer state is a bit overkill
221 */
222 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_INACTIVE);
223 srv->state.xfer.mode = BT_MESH_BLOB_XFER_MODE_NONE;
224 srv->state.ttl = BT_MESH_TTL_DEFAULT;
225 srv->block.number = 0xffff;
226 memset(srv->block.missing, 0, sizeof(srv->block.missing));
227 srv->state.xfer.chunk_size = 0xffff;
228 k_work_cancel_delayable(&srv->rx_timeout);
229 k_work_cancel_delayable(&srv->pull.report);
230 io_close(srv);
231 erase_state(srv);
232
233 if (srv->cb && srv->cb->end) {
234 srv->cb->end(srv, srv->state.xfer.id, false);
235 }
236 }
237
suspend(struct bt_mesh_blob_srv * srv)238 static void suspend(struct bt_mesh_blob_srv *srv)
239 {
240 LOG_DBG("");
241 k_work_cancel_delayable(&srv->rx_timeout);
242 k_work_cancel_delayable(&srv->pull.report);
243 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_SUSPENDED);
244 if (srv->cb && srv->cb->suspended) {
245 srv->cb->suspended(srv);
246 }
247 }
248
resume(struct bt_mesh_blob_srv * srv)249 static void resume(struct bt_mesh_blob_srv *srv)
250 {
251 LOG_DBG("Resuming");
252
253 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK);
254 reset_timer(srv);
255 }
256
end(struct bt_mesh_blob_srv * srv)257 static void end(struct bt_mesh_blob_srv *srv)
258 {
259 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_COMPLETE);
260 k_work_cancel_delayable(&srv->rx_timeout);
261 k_work_cancel_delayable(&srv->pull.report);
262 io_close(srv);
263 erase_state(srv);
264
265 if (srv->cb && srv->cb->end) {
266 srv->cb->end(srv, srv->state.xfer.id, true);
267 }
268 }
269
all_blocks_received(struct bt_mesh_blob_srv * srv)270 static bool all_blocks_received(struct bt_mesh_blob_srv *srv)
271 {
272 for (int i = 0; i < ARRAY_SIZE(srv->state.blocks); ++i) {
273 if (srv->state.blocks[i]) {
274 return false;
275 }
276 }
277
278 return true;
279 }
280
pull_mode_xfer_complete(struct bt_mesh_blob_srv * srv)281 static bool pull_mode_xfer_complete(struct bt_mesh_blob_srv *srv)
282 {
283 return srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL &&
284 srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK &&
285 all_blocks_received(srv);
286 }
287
timeout(struct k_work * work)288 static void timeout(struct k_work *work)
289 {
290 struct bt_mesh_blob_srv *srv =
291 CONTAINER_OF(work, struct bt_mesh_blob_srv, rx_timeout.work);
292
293 LOG_DBG("");
294
295 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
296 cancel(srv);
297 } else if (pull_mode_xfer_complete(srv)) {
298 end(srv);
299 } else {
300 suspend(srv);
301 }
302 }
303
report_timeout(struct k_work * work)304 static void report_timeout(struct k_work *work)
305 {
306 struct bt_mesh_blob_srv *srv =
307 CONTAINER_OF(work, struct bt_mesh_blob_srv, pull.report.work);
308
309 LOG_DBG("");
310
311 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK &&
312 srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK) {
313 return;
314 }
315
316 block_report(srv);
317 }
318
319 /*******************************************************************************
320 * Message handling
321 ******************************************************************************/
322
xfer_status_rsp(struct bt_mesh_blob_srv * srv,struct bt_mesh_msg_ctx * ctx,enum bt_mesh_blob_status status)323 static void xfer_status_rsp(struct bt_mesh_blob_srv *srv,
324 struct bt_mesh_msg_ctx *ctx,
325 enum bt_mesh_blob_status status)
326 {
327 BT_MESH_MODEL_BUF_DEFINE(buf, BT_MESH_BLOB_OP_XFER_STATUS,
328 BLOB_XFER_STATUS_MSG_MAXLEN);
329 bt_mesh_model_msg_init(&buf, BT_MESH_BLOB_OP_XFER_STATUS);
330
331 net_buf_simple_add_u8(&buf, ((status & BIT_MASK(4)) |
332 (srv->state.xfer.mode << 6)));
333 net_buf_simple_add_u8(&buf, srv->phase);
334
335 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
336 goto send;
337 }
338
339 net_buf_simple_add_le64(&buf, srv->state.xfer.id);
340
341 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
342 goto send;
343 }
344
345 net_buf_simple_add_le32(&buf, srv->state.xfer.size);
346 net_buf_simple_add_u8(&buf, srv->state.xfer.block_size_log);
347 net_buf_simple_add_le16(&buf, srv->state.mtu_size);
348 net_buf_simple_add_mem(&buf, srv->state.blocks,
349 DIV_ROUND_UP(block_count_get(srv), 8));
350
351 send:
352 ctx->send_ttl = srv->state.ttl;
353 (void)bt_mesh_model_send(srv->mod, ctx, &buf, NULL, NULL);
354 }
355
block_status_rsp(struct bt_mesh_blob_srv * srv,struct bt_mesh_msg_ctx * ctx,enum bt_mesh_blob_status status)356 static void block_status_rsp(struct bt_mesh_blob_srv *srv,
357 struct bt_mesh_msg_ctx *ctx,
358 enum bt_mesh_blob_status status)
359 {
360 enum bt_mesh_blob_chunks_missing format;
361 uint32_t missing;
362 int i;
363
364 BT_MESH_MODEL_BUF_DEFINE(buf, BT_MESH_BLOB_OP_BLOCK_STATUS,
365 BLOB_BLOCK_STATUS_MSG_MAXLEN);
366 bt_mesh_model_msg_init(&buf, BT_MESH_BLOB_OP_BLOCK_STATUS);
367
368 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE ||
369 srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
370 missing = srv->block.chunk_count;
371 } else if (srv->phase == BT_MESH_BLOB_XFER_PHASE_COMPLETE) {
372 missing = 0U;
373 } else {
374 missing = missing_chunks(&srv->block);
375 }
376
377 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
378 format = BT_MESH_BLOB_CHUNKS_MISSING_ENCODED;
379 } else if (missing == srv->block.chunk_count) {
380 format = BT_MESH_BLOB_CHUNKS_MISSING_ALL;
381 } else if (missing == 0) {
382 format = BT_MESH_BLOB_CHUNKS_MISSING_NONE;
383 } else {
384 format = BT_MESH_BLOB_CHUNKS_MISSING_SOME;
385 }
386
387 LOG_DBG("Status: %u, missing: %u/%u", status, missing, srv->block.chunk_count);
388
389 net_buf_simple_add_u8(&buf, (status & BIT_MASK(4)) | (format << 6));
390 net_buf_simple_add_le16(&buf, srv->block.number);
391 net_buf_simple_add_le16(&buf, srv->state.xfer.chunk_size);
392
393 if (format == BT_MESH_BLOB_CHUNKS_MISSING_SOME) {
394 net_buf_simple_add_mem(&buf, srv->block.missing,
395 DIV_ROUND_UP(srv->block.chunk_count,
396 8));
397
398 LOG_DBG("Bits: %s",
399 bt_hex(srv->block.missing,
400 DIV_ROUND_UP(srv->block.chunk_count, 8)));
401
402 } else if (format == BT_MESH_BLOB_CHUNKS_MISSING_ENCODED) {
403 int count = pull_req_max(srv);
404
405 for (i = 0; (i < srv->block.chunk_count) && count; ++i) {
406 if (blob_chunk_missing_get(srv->block.missing, i)) {
407 LOG_DBG("Missing %u", i);
408 buf_chunk_index_add(&buf, i);
409 count--;
410 }
411 }
412 }
413
414 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
415 ctx->send_ttl = srv->state.ttl;
416 }
417
418 (void)bt_mesh_model_send(srv->mod, ctx, &buf, NULL, NULL);
419 }
420
handle_xfer_get(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)421 static int handle_xfer_get(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
422 struct net_buf_simple *buf)
423 {
424 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
425
426 LOG_DBG("");
427
428 if (pull_mode_xfer_complete(srv)) {
429 /* The client requested transfer. If we are in Pull mode and all blocks were
430 * received, we should change the Transfer state here to Complete so that the client
431 * receives the correct state.
432 */
433 end(srv);
434 }
435
436 xfer_status_rsp(srv, ctx, BT_MESH_BLOB_SUCCESS);
437
438 return 0;
439 }
440
handle_xfer_start(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)441 static int handle_xfer_start(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
442 struct net_buf_simple *buf)
443 {
444 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
445 enum bt_mesh_blob_status status;
446 enum bt_mesh_blob_xfer_mode mode;
447 uint64_t id;
448 size_t size;
449 uint8_t block_size_log;
450 uint32_t block_count;
451 uint16_t mtu_size;
452 int err;
453
454 mode = (net_buf_simple_pull_u8(buf) >> 6);
455 id = net_buf_simple_pull_le64(buf);
456 size = net_buf_simple_pull_le32(buf);
457 block_size_log = net_buf_simple_pull_u8(buf);
458 mtu_size = net_buf_simple_pull_le16(buf);
459
460 LOG_DBG("\n\tsize: %u block size: %u\n\tmtu_size: %u\n\tmode: %s",
461 size, (1U << block_size_log), mtu_size,
462 mode == BT_MESH_BLOB_XFER_MODE_PUSH ? "push" : "pull");
463
464 if (mode != BT_MESH_BLOB_XFER_MODE_PULL &&
465 mode != BT_MESH_BLOB_XFER_MODE_PUSH) {
466 LOG_WRN("Invalid mode 0x%x", mode);
467 return -EINVAL;
468 }
469
470 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
471 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
472 LOG_WRN("Uninitialized");
473 goto rsp;
474 }
475
476 if (srv->state.xfer.id != id) {
477 status = BT_MESH_BLOB_ERR_WRONG_BLOB_ID;
478 /* bt_hex uses static array for the resulting hex string.
479 * Not possible to use bt_hex in the same logging function twice.
480 */
481 LOG_WRN("Invalid ID: %s", bt_hex(&id, sizeof(uint64_t)));
482 LOG_WRN("Expected ID: %s", bt_hex(&srv->state.xfer.id, sizeof(uint64_t)));
483 goto rsp;
484 }
485
486 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
487 if (srv->state.xfer.mode != mode ||
488 srv->state.xfer.size != size ||
489 srv->state.xfer.block_size_log != block_size_log ||
490 srv->state.mtu_size > mtu_size) {
491 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
492 LOG_WRN("Busy");
493 goto rsp;
494 }
495
496 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_SUSPENDED) {
497 resume(srv);
498 store_state(srv);
499 } else {
500 LOG_DBG("Duplicate");
501 }
502
503 status = BT_MESH_BLOB_SUCCESS;
504 goto rsp;
505 }
506
507 if (size > CONFIG_BT_MESH_BLOB_SIZE_MAX) {
508 LOG_WRN("Too large");
509 status = BT_MESH_BLOB_ERR_BLOB_TOO_LARGE;
510 goto rsp;
511 }
512
513 if (((1U << block_size_log) < CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MIN) ||
514 ((1U << block_size_log) > CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX)) {
515 LOG_WRN("Invalid block size: %u", block_size_log);
516 status = BT_MESH_BLOB_ERR_INVALID_BLOCK_SIZE;
517 goto rsp;
518 }
519
520 srv->state.cli = ctx->addr;
521 srv->state.app_idx = ctx->app_idx;
522 srv->state.mtu_size = MIN(mtu_size, MTU_SIZE_MAX);
523 srv->state.xfer.id = id;
524 srv->state.xfer.size = size;
525 srv->state.xfer.mode = mode;
526 srv->state.xfer.block_size_log = block_size_log;
527 srv->state.xfer.chunk_size = 0xffff;
528 srv->block.number = 0xffff;
529
530 block_count = block_count_get(srv);
531 if (block_count > BT_MESH_BLOB_BLOCKS_MAX) {
532 LOG_WRN("Invalid block count (%u)", block_count);
533 status = BT_MESH_BLOB_ERR_INVALID_PARAM;
534 cancel(srv);
535 goto rsp;
536 }
537
538 memset(srv->state.blocks, 0, sizeof(srv->state.blocks));
539 for (int i = 0; i < block_count; i++) {
540 atomic_set_bit(srv->state.blocks, i);
541 }
542
543 err = io_open(srv);
544 if (err) {
545 LOG_ERR("Couldn't open stream (err: %d)", err);
546 status = BT_MESH_BLOB_ERR_INTERNAL;
547 cancel(srv);
548 goto rsp;
549 }
550
551 if (srv->cb && srv->cb->start) {
552 err = srv->cb->start(srv, ctx, &srv->state.xfer);
553 if (err) {
554 LOG_ERR("Couldn't start transfer (err: %d)", err);
555 status = BT_MESH_BLOB_ERR_INTERNAL;
556 cancel(srv);
557 goto rsp;
558 }
559 }
560
561 reset_timer(srv);
562 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK);
563 store_state(srv);
564 status = BT_MESH_BLOB_SUCCESS;
565
566 rsp:
567 xfer_status_rsp(srv, ctx, status);
568
569 return 0;
570 }
571
handle_xfer_cancel(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)572 static int handle_xfer_cancel(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
573 struct net_buf_simple *buf)
574 {
575 enum bt_mesh_blob_status status = BT_MESH_BLOB_SUCCESS;
576 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
577 uint64_t id;
578
579 id = net_buf_simple_pull_le64(buf);
580
581 LOG_DBG("%u", (uint32_t)id);
582
583 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
584 goto rsp;
585 }
586
587 if (srv->state.xfer.id != id) {
588 status = BT_MESH_BLOB_ERR_WRONG_BLOB_ID;
589 goto rsp;
590 }
591
592 cancel(srv);
593
594 rsp:
595 xfer_status_rsp(srv, ctx, status);
596
597 return 0;
598 }
599
handle_block_get(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)600 static int handle_block_get(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
601 struct net_buf_simple *buf)
602 {
603 enum bt_mesh_blob_status status;
604 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
605
606 switch (srv->phase) {
607 case BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK:
608 case BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK:
609 case BT_MESH_BLOB_XFER_PHASE_COMPLETE:
610 status = BT_MESH_BLOB_SUCCESS;
611 break;
612 case BT_MESH_BLOB_XFER_PHASE_SUSPENDED:
613 status = BT_MESH_BLOB_ERR_INFO_UNAVAILABLE;
614 break;
615 case BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START:
616 case BT_MESH_BLOB_XFER_PHASE_INACTIVE:
617 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
618 break;
619 default:
620 status = BT_MESH_BLOB_ERR_INTERNAL;
621 break;
622 }
623
624 LOG_DBG("");
625
626 block_status_rsp(srv, ctx, status);
627
628 return 0;
629 }
630
handle_block_start(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)631 static int handle_block_start(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
632 struct net_buf_simple *buf)
633 {
634 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
635 enum bt_mesh_blob_status status;
636 uint16_t block_number, chunk_size;
637 int err;
638
639 block_number = net_buf_simple_pull_le16(buf);
640 chunk_size = net_buf_simple_pull_le16(buf);
641
642 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START ||
643 srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
644 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
645 goto rsp;
646 }
647
648 reset_timer(srv);
649
650 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK) {
651 if (block_number != srv->block.number ||
652 chunk_size != srv->state.xfer.chunk_size) {
653 status = BT_MESH_BLOB_ERR_WRONG_PHASE;
654 } else {
655 status = BT_MESH_BLOB_SUCCESS;
656 }
657
658 goto rsp;
659 }
660
661 if (block_number >= block_count_get(srv)) {
662 status = BT_MESH_BLOB_ERR_INVALID_BLOCK_NUM;
663 goto rsp;
664 }
665
666 if (!chunk_size || chunk_size > max_chunk_size(srv) ||
667 (DIV_ROUND_UP((1 << srv->state.xfer.block_size_log), chunk_size) >
668 max_chunk_count(srv))) {
669 LOG_WRN("Invalid chunk size: (chunk size: %u, max: %u, block log: %u, count: %u)",
670 chunk_size, max_chunk_size(srv),
671 srv->state.xfer.block_size_log,
672 max_chunk_count(srv));
673 status = BT_MESH_BLOB_ERR_INVALID_CHUNK_SIZE;
674 goto rsp;
675 }
676
677 srv->block.size = blob_block_size(
678 srv->state.xfer.size, srv->state.xfer.block_size_log, block_number);
679 srv->block.number = block_number;
680 srv->block.chunk_count = DIV_ROUND_UP(srv->block.size, chunk_size);
681 srv->state.xfer.chunk_size = chunk_size;
682 srv->block.offset = block_number * (1UL << srv->state.xfer.block_size_log);
683
684 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_COMPLETE ||
685 !atomic_test_bit(srv->state.blocks, block_number)) {
686 memset(srv->block.missing, 0, sizeof(srv->block.missing));
687 status = BT_MESH_BLOB_SUCCESS;
688 goto rsp;
689 }
690
691 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_SUSPENDED && srv->cb &&
692 srv->cb->resume) {
693 srv->cb->resume(srv);
694 }
695
696 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK);
697 blob_chunk_missing_set_all(&srv->block);
698
699 LOG_DBG("%u: (%u/%u)\n\tsize: %u\n\tchunk size: %u\n\tchunk count: %u",
700 srv->block.number, srv->block.number + 1, block_count_get(srv),
701 srv->block.size, chunk_size, srv->block.chunk_count);
702
703 if (srv->io->block_start) {
704 err = srv->io->block_start(srv->io, &srv->state.xfer,
705 &srv->block);
706 if (err) {
707 cancel(srv);
708 status = BT_MESH_BLOB_ERR_INTERNAL;
709 goto rsp;
710 }
711 }
712
713 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
714 /* Wait for the client to send the first chunk */
715 k_work_reschedule(&srv->pull.report, REPORT_TIMER_TIMEOUT);
716 }
717
718 status = BT_MESH_BLOB_SUCCESS;
719
720 rsp:
721 block_status_rsp(srv, ctx, status);
722
723 return 0;
724 }
725
handle_chunk(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)726 static int handle_chunk(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
727 struct net_buf_simple *buf)
728 {
729 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
730 struct bt_mesh_blob_chunk chunk;
731 size_t expected_size = 0;
732 uint16_t idx;
733 int err;
734
735 idx = net_buf_simple_pull_le16(buf);
736 chunk.size = buf->len;
737 chunk.data = net_buf_simple_pull_mem(buf, chunk.size);
738 chunk.offset = idx * srv->state.xfer.chunk_size;
739
740 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_CHUNK ||
741 idx >= srv->block.chunk_count) {
742 LOG_ERR("Invalid phase or index (%u %u)", srv->phase,
743 idx);
744 return -EINVAL;
745 }
746
747 if (idx == srv->block.chunk_count - 1) {
748 expected_size = srv->block.size % srv->state.xfer.chunk_size;
749 }
750
751 if (expected_size == 0) {
752 expected_size = srv->state.xfer.chunk_size;
753 }
754
755 if (chunk.size != expected_size) {
756 LOG_ERR("Unexpected size: %u != %u", expected_size, chunk.size);
757 return -EINVAL;
758 }
759
760 LOG_DBG("%u/%u (%u bytes)", idx + 1, srv->block.chunk_count,
761 chunk.size);
762
763 reset_timer(srv);
764 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
765 k_work_reschedule(&srv->pull.report, REPORT_TIMER_TIMEOUT);
766 }
767
768 if (!blob_chunk_missing_get(srv->block.missing, idx)) {
769 LOG_DBG("Duplicate chunk %u", idx);
770 return -EALREADY;
771 }
772
773 err = srv->io->wr(srv->io, &srv->state.xfer, &srv->block, &chunk);
774 if (err) {
775 return err;
776 }
777
778 blob_chunk_missing_set(srv->block.missing, idx, false);
779 if (missing_chunks(&srv->block)) {
780 return 0;
781 }
782
783 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
784 block_report(srv);
785 }
786
787 if (srv->io->block_end) {
788 srv->io->block_end(srv->io, &srv->state.xfer, &srv->block);
789 }
790
791 atomic_clear_bit(srv->state.blocks, srv->block.number);
792
793 if (!all_blocks_received(srv)) {
794 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_BLOCK);
795 store_state(srv);
796 return 0;
797 }
798
799 if (srv->state.xfer.mode == BT_MESH_BLOB_XFER_MODE_PULL) {
800 /* By spec (section 5.2.4), the BLOB Server stops sending BLOB Partial Block Report
801 * messages "If the current block is the last block, then the server determines that
802 * the client knows the transfer is complete. For example, a higher-layer model may
803 * indicate that the client considers the transfer complete."
804 *
805 * We don't have any way for higher-layer model to indicate that the transfer is
806 * complete. Therefore we need to keep sending Partial Block Report messages until
807 * the client sends BLOB Transfer Get message or the Block Timer expires.
808 */
809 return 0;
810 }
811
812 end(srv);
813 return 0;
814 }
815
handle_info_get(const struct bt_mesh_model * mod,struct bt_mesh_msg_ctx * ctx,struct net_buf_simple * buf)816 static int handle_info_get(const struct bt_mesh_model *mod, struct bt_mesh_msg_ctx *ctx,
817 struct net_buf_simple *buf)
818 {
819 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
820
821 LOG_DBG("");
822
823 BT_MESH_MODEL_BUF_DEFINE(rsp, BT_MESH_BLOB_OP_INFO_STATUS, 15);
824 bt_mesh_model_msg_init(&rsp, BT_MESH_BLOB_OP_INFO_STATUS);
825 net_buf_simple_add_u8(&rsp, BLOB_BLOCK_SIZE_LOG_MIN);
826 net_buf_simple_add_u8(&rsp, BLOB_BLOCK_SIZE_LOG_MAX);
827 net_buf_simple_add_le16(&rsp, CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX);
828
829 #if defined(CONFIG_BT_MESH_LOW_POWER)
830 /* If friendship is established, then chunk size is according to friend's queue size.
831 * Chunk size = (Queue size * Segment size) - (Opcode (1) - Chunk Number (2)
832 * - 8 byte MIC (max))
833 */
834 if (bt_mesh_lpn_established() && bt_mesh.lpn.queue_size > 0) {
835 uint16_t chunk_size = (bt_mesh.lpn.queue_size * 12) - 11;
836
837 chunk_size = MIN(chunk_size, BLOB_RX_CHUNK_SIZE);
838 net_buf_simple_add_le16(&rsp, chunk_size);
839 if (bt_mesh.lpn.queue_size <= 2) {
840 LOG_WRN("FndQ too small %u", bt_mesh.lpn.queue_size);
841 }
842 } else {
843 net_buf_simple_add_le16(&rsp, BLOB_RX_CHUNK_SIZE);
844 }
845 #else
846 net_buf_simple_add_le16(&rsp, BLOB_RX_CHUNK_SIZE);
847 #endif
848
849 net_buf_simple_add_le32(&rsp, CONFIG_BT_MESH_BLOB_SIZE_MAX);
850 net_buf_simple_add_le16(&rsp, MTU_SIZE_MAX);
851 net_buf_simple_add_u8(&rsp, BT_MESH_BLOB_XFER_MODE_ALL);
852
853 if (srv->phase != BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
854 ctx->send_ttl = srv->state.ttl;
855 }
856
857 (void)bt_mesh_model_send(srv->mod, ctx, &rsp, NULL, NULL);
858
859 return 0;
860 }
861
862 const struct bt_mesh_model_op _bt_mesh_blob_srv_op[] = {
863 { BT_MESH_BLOB_OP_XFER_GET, BT_MESH_LEN_EXACT(0), handle_xfer_get },
864 { BT_MESH_BLOB_OP_XFER_START, BT_MESH_LEN_EXACT(16), handle_xfer_start },
865 { BT_MESH_BLOB_OP_XFER_CANCEL, BT_MESH_LEN_EXACT(8), handle_xfer_cancel },
866 { BT_MESH_BLOB_OP_BLOCK_GET, BT_MESH_LEN_EXACT(0), handle_block_get },
867 { BT_MESH_BLOB_OP_BLOCK_START, BT_MESH_LEN_EXACT(4), handle_block_start },
868 { BT_MESH_BLOB_OP_CHUNK, BT_MESH_LEN_MIN(2), handle_chunk },
869 { BT_MESH_BLOB_OP_INFO_GET, BT_MESH_LEN_EXACT(0), handle_info_get },
870 BT_MESH_MODEL_OP_END,
871 };
872
blob_srv_init(const struct bt_mesh_model * mod)873 static int blob_srv_init(const struct bt_mesh_model *mod)
874 {
875 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
876
877 srv->mod = mod;
878 srv->state.ttl = BT_MESH_TTL_DEFAULT;
879 srv->block.number = 0xffff;
880 srv->state.xfer.chunk_size = 0xffff;
881 k_work_init_delayable(&srv->rx_timeout, timeout);
882 k_work_init_delayable(&srv->pull.report, report_timeout);
883
884 return 0;
885 }
886
blob_srv_settings_set(const struct bt_mesh_model * mod,const char * name,size_t len_rd,settings_read_cb read_cb,void * cb_arg)887 static int blob_srv_settings_set(const struct bt_mesh_model *mod, const char *name,
888 size_t len_rd, settings_read_cb read_cb,
889 void *cb_arg)
890 {
891 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
892 ssize_t len;
893
894 if (len_rd < offsetof(struct bt_mesh_blob_srv_state, blocks)) {
895 return -EINVAL;
896 }
897
898 len = read_cb(cb_arg, &srv->state, sizeof(srv->state));
899 if (len < 0) {
900 return len;
901 }
902
903 srv->block.number = 0xffff;
904 srv->state.xfer.chunk_size = 0xffff;
905
906 if (block_count_get(srv) > BT_MESH_BLOB_BLOCKS_MAX) {
907 LOG_WRN("Loaded block count too high (%u, max: %u)",
908 block_count_get(srv), BT_MESH_BLOB_BLOCKS_MAX);
909 return 0;
910 }
911
912 /* If device restarted before it handled `XFER_START` server we restore state into
913 * BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START phase, so `XFER_START` can be accepted
914 * as it would before reboot
915 */
916 if (srv->state.cli == BT_MESH_ADDR_UNASSIGNED) {
917 LOG_DBG("Transfer (id=%llu) waiting for start", srv->state.xfer.id);
918 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START);
919 } else {
920 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_SUSPENDED);
921
922 LOG_DBG("Recovered transfer from 0x%04x (%llu)", srv->state.cli,
923 srv->state.xfer.id);
924 }
925
926 return 0;
927 }
928
blob_srv_start(const struct bt_mesh_model * mod)929 static int blob_srv_start(const struct bt_mesh_model *mod)
930 {
931 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
932 int err = -ENOTSUP;
933
934 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE) {
935 return 0;
936 }
937
938 if (srv->cb && srv->cb->recover) {
939 srv->io = NULL;
940 err = srv->cb->recover(srv, &srv->state.xfer, &srv->io);
941 if (!err && srv->io) {
942 err = io_open(srv);
943 }
944 }
945
946 if (err || !srv->io) {
947 LOG_WRN("Abandoning transfer.");
948 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_INACTIVE);
949 srv->state.xfer.mode = BT_MESH_BLOB_XFER_MODE_NONE;
950 srv->state.ttl = BT_MESH_TTL_DEFAULT;
951 erase_state(srv);
952 }
953
954 return 0;
955 }
956
blob_srv_reset(const struct bt_mesh_model * mod)957 static void blob_srv_reset(const struct bt_mesh_model *mod)
958 {
959 struct bt_mesh_blob_srv *srv = mod->rt->user_data;
960
961 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_INACTIVE);
962 srv->state.xfer.mode = BT_MESH_BLOB_XFER_MODE_NONE;
963 k_work_cancel_delayable(&srv->rx_timeout);
964 k_work_cancel_delayable(&srv->pull.report);
965 erase_state(srv);
966 }
967
968 const struct bt_mesh_model_cb _bt_mesh_blob_srv_cb = {
969 .init = blob_srv_init,
970 .settings_set = blob_srv_settings_set,
971 .start = blob_srv_start,
972 .reset = blob_srv_reset,
973 };
974
bt_mesh_blob_srv_recv(struct bt_mesh_blob_srv * srv,uint64_t id,const struct bt_mesh_blob_io * io,uint8_t ttl,uint16_t timeout_base)975 int bt_mesh_blob_srv_recv(struct bt_mesh_blob_srv *srv, uint64_t id,
976 const struct bt_mesh_blob_io *io, uint8_t ttl,
977 uint16_t timeout_base)
978 {
979 if (bt_mesh_blob_srv_is_busy(srv)) {
980 return -EBUSY;
981 }
982
983 if (!io || !io->wr) {
984 return -EINVAL;
985 }
986
987 srv->state.xfer.id = id;
988 srv->state.ttl = ttl;
989 srv->state.timeout_base = timeout_base;
990 srv->io = io;
991 srv->block.number = 0xffff;
992 srv->state.xfer.chunk_size = 0xffff;
993 phase_set(srv, BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START);
994 store_state(srv);
995
996 return 0;
997 }
998
bt_mesh_blob_srv_cancel(struct bt_mesh_blob_srv * srv)999 int bt_mesh_blob_srv_cancel(struct bt_mesh_blob_srv *srv)
1000 {
1001 if (!bt_mesh_blob_srv_is_busy(srv)) {
1002 return -EALREADY;
1003 }
1004
1005 cancel(srv);
1006
1007 return 0;
1008 }
1009
bt_mesh_blob_srv_is_busy(const struct bt_mesh_blob_srv * srv)1010 bool bt_mesh_blob_srv_is_busy(const struct bt_mesh_blob_srv *srv)
1011 {
1012 return srv->phase != BT_MESH_BLOB_XFER_PHASE_INACTIVE &&
1013 srv->phase != BT_MESH_BLOB_XFER_PHASE_SUSPENDED &&
1014 srv->phase != BT_MESH_BLOB_XFER_PHASE_COMPLETE;
1015 }
1016
bt_mesh_blob_srv_progress(const struct bt_mesh_blob_srv * srv)1017 uint8_t bt_mesh_blob_srv_progress(const struct bt_mesh_blob_srv *srv)
1018 {
1019 uint32_t total;
1020 uint32_t received;
1021
1022 if (srv->phase == BT_MESH_BLOB_XFER_PHASE_INACTIVE ||
1023 srv->phase == BT_MESH_BLOB_XFER_PHASE_WAITING_FOR_START) {
1024 return 0;
1025 }
1026
1027 total = block_count_get(srv);
1028
1029 received = 0;
1030 for (int i = 0; i < total; ++i) {
1031 if (!atomic_test_bit(srv->state.blocks, i)) {
1032 received++;
1033 }
1034 }
1035
1036 return (100U * received) / total;
1037 }
1038