1 /*
2  * Copyright (c) 2023 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8  * ICBMsg backend.
9  *
10  * This is an IPC service backend that dynamically allocates buffers for data storage
11  * and uses ICMsg to send references to them.
12  *
13  * Shared memory organization
14  * --------------------------
15  *
16  * Single channel (RX or TX) of the shared memory is divided into two areas: ICMsg area
17  * followed by Blocks area. ICMsg is used to send and receive short 3-byte messages.
18  * Blocks area is evenly divided into aligned blocks. Blocks are used to allocate
19  * buffers containing actual data. Data buffers can span multiple blocks. The first block
20  * starts with the size of the following data.
21  *
22  *  +------------+-------------+
23  *  | ICMsg area | Blocks area |
24  *  +------------+-------------+
25  *       _______/               \_________________________________________
26  *      /                                                                 \
27  *      +-----------+-----------+-----------+-----------+-   -+-----------+
28  *      |  Block 0  |  Block 1  |  Block 2  |  Block 3  | ... | Block N-1 |
29  *      +-----------+-----------+-----------+-----------+-   -+-----------+
30  *            _____/                                     \_____
31  *           /                                                 \
32  *           +------+--------------------------------+---------+
33  *           | size | data_buffer[size] ...          | padding |
34  *           +------+--------------------------------+---------+
35  *
36  * The sender holds information about reserved blocks using bitarray and it is responsible
37  * for allocating and releasing the blocks. The receiver just tells the sender that it
38  * does not need a specific buffer anymore.
39  *
40  * Control messages
41  * ----------------
42  *
43  * ICMsg is used to send and receive small 3-byte control messages.
44  *
45  *  - Send data
46  *    | MSG_DATA | endpoint address | block index |
47  *    This message is used to send data buffer to specific endpoint.
48  *
49  *  - Release data
50  *    | MSG_RELEASE_DATA | 0 | block index |
51  *    This message is a response to the "Send data" message and it is used to inform that
52  *    specific buffer is not used anymore and can be released. Endpoint addresses does
53  *    not matter here, so it is zero.
54  *
55  *  - Bound endpoint
56  *    | MSG_BOUND | endpoint address | block index |
57  *    This message starts the bounding of the endpoint. The buffer contains a
58  *    null-terminated endpoint name.
59  *
60  *  - Release bound endpoint
61  *    | MSG_RELEASE_BOUND | endpoint address | block index |
62  *    This message is a response to the "Bound endpoint" message and it is used to inform
63  *    that a specific buffer (starting at "block index") is not used anymore and
64  *    a the endpoint is bounded and can now receive a data.
65  *
66  * Bounding endpoints
67  * ------------------
68  *
69  * When ICMsg is bounded and user registers an endpoint on initiator side, the backend
70  * sends "Bound endpoint". Endpoint address is assigned by the initiator. When follower
71  * gets the message and user on follower side also registered the same endpoint,
72  * the backend calls "bound" callback and sends back "Release bound endpoint".
73  * The follower saves the endpoint address. The follower's endpoint is ready to send
74  * and receive data. When the initiator gets "Release bound endpoint" message or any
75  * data messages, it calls bound endpoint and it is ready to send data.
76  */
77 
78 #undef _POSIX_C_SOURCE
79 #define _POSIX_C_SOURCE 200809L /* For strnlen() */
80 
81 #include <string.h>
82 
83 #include <zephyr/logging/log.h>
84 #include <zephyr/device.h>
85 #include <zephyr/sys/bitarray.h>
86 #include <zephyr/ipc/icmsg.h>
87 #include <zephyr/ipc/ipc_service_backend.h>
88 #include <zephyr/cache.h>
89 
90 #if defined(CONFIG_ARCH_POSIX)
91 #include <soc.h>
92 #define MAYBE_CONST
93 #else
94 #define MAYBE_CONST const
95 #endif
96 
97 LOG_MODULE_REGISTER(ipc_icbmsg,
98 		    CONFIG_IPC_SERVICE_BACKEND_ICBMSG_LOG_LEVEL);
99 
100 #define DT_DRV_COMPAT zephyr_ipc_icbmsg
101 
102 /** Allowed number of endpoints. */
103 #define NUM_EPT CONFIG_IPC_SERVICE_BACKEND_ICBMSG_NUM_EP
104 
105 /** Special endpoint address indicating invalid (or empty) entry. */
106 #define EPT_ADDR_INVALID 0xFF
107 
108 /** Special value for empty entry in bound message waiting table. */
109 #define WAITING_BOUND_MSG_EMPTY 0xFFFF
110 
111 /** Size of the header (size field) of the block. */
112 #define BLOCK_HEADER_SIZE (sizeof(struct block_header))
113 
114 /** Flag indicating that ICMsg was bounded for this instance. */
115 #define CONTROL_BOUNDED BIT(31)
116 
117 /** Registered endpoints count mask in flags. */
118 #define FLAG_EPT_COUNT_MASK 0xFFFF
119 
120 /** Workqueue stack size for bounding processing (this configuration is not optimized). */
121 #define EP_BOUND_WORK_Q_STACK_SIZE \
122 	(CONFIG_IPC_SERVICE_BACKEND_ICBMSG_EP_BOUND_WORK_Q_STACK_SIZE)
123 
124 /** Workqueue priority for bounding processing. */
125 #define EP_BOUND_WORK_Q_PRIORITY (CONFIG_SYSTEM_WORKQUEUE_PRIORITY)
126 
127 enum msg_type {
128 	MSG_DATA = 0,		/* Data message. */
129 	MSG_RELEASE_DATA,	/* Release data buffer message. */
130 	MSG_BOUND,		/* Endpoint bounding message. */
131 	MSG_RELEASE_BOUND,	/* Release endpoint bound message.
132 				 * This message is also indicator for the receiving side
133 				 * that the endpoint bounding was fully processed on
134 				 * the sender side.
135 				 */
136 };
137 
138 enum ept_bounding_state {
139 	EPT_UNCONFIGURED = 0,	/* Endpoint in not configured (initial state). */
140 	EPT_CONFIGURED,		/* Endpoint is configured, waiting for work queue to
141 				 * start bounding process.
142 				 */
143 	EPT_BOUNDING,		/* Only on initiator. Bound message was send,
144 				 * but bound callback was not called yet, because
145 				 * we are waiting for any incoming messages.
146 				 */
147 	EPT_READY,		/* Bounding is done. Bound callback was called. */
148 };
149 
150 enum ept_rebound_state {
151 	EPT_NORMAL = 0,		/* No endpoint rebounding is needed. */
152 	EPT_DEREGISTERED,	/* Endpoint was deregistered. */
153 	EPT_REBOUNDING,		/* Rebounding was requested, waiting for work queue to
154 				 * start rebounding process.
155 				 */
156 };
157 
158 struct channel_config {
159 	uint8_t *blocks_ptr;	/* Address where the blocks start. */
160 	size_t block_size;	/* Size of one block. */
161 	size_t block_count;	/* Number of blocks. */
162 };
163 
164 struct icbmsg_config {
165 	struct icmsg_config_t control_config;	/* Configuration of the ICMsg. */
166 	struct channel_config rx;		/* RX channel config. */
167 	struct channel_config tx;		/* TX channel config. */
168 	sys_bitarray_t *tx_usage_bitmap;	/* Bit is set when TX block is in use */
169 	sys_bitarray_t *rx_hold_bitmap;		/* Bit is set, if the buffer starting at
170 						 * this block should be kept after exit
171 						 * from receive handler.
172 						 */
173 };
174 
175 struct ept_data {
176 	const struct ipc_ept_cfg *cfg;	/* Endpoint configuration. */
177 	atomic_t state;			/* Bounding state. */
178 	atomic_t rebound_state;		/* Rebounding state. */
179 	uint8_t addr;			/* Endpoint address. */
180 };
181 
182 struct backend_data {
183 	const struct icbmsg_config *conf;/* Backend instance config. */
184 	struct icmsg_data_t control_data;/* ICMsg data. */
185 #ifdef CONFIG_MULTITHREADING
186 	struct k_mutex mutex;		/* Mutex to protect: ICMsg send call and
187 					 * waiting_bound field.
188 					 */
189 	struct k_work ep_bound_work;	/* Work item for bounding processing. */
190 	struct k_sem block_wait_sem;	/* Semaphore for waiting for free blocks. */
191 #endif
192 	struct ept_data ept[NUM_EPT];	/* Array of registered endpoints. */
193 	uint8_t ept_map[NUM_EPT];	/* Array that maps endpoint address to index. */
194 	uint16_t waiting_bound[NUM_EPT];/* The bound messages waiting to be registered. */
195 	atomic_t flags;			/* Flags on higher bits, number of registered
196 					 * endpoints on lower.
197 					 */
198 	bool is_initiator;		/* This side has an initiator role. */
199 };
200 
201 struct block_header {
202 	volatile size_t size;	/* Size of the data field. It must be volatile, because
203 				 * when this value is read and validated for security
204 				 * reasons, compiler cannot generate code that reads
205 				 * it again after validation.
206 				 */
207 };
208 
209 struct block_content {
210 	struct block_header header;
211 	uint8_t data[];		/* Buffer data. */
212 };
213 
214 struct control_message {
215 	uint8_t msg_type;	/* Message type. */
216 	uint8_t ept_addr;	/* Endpoint address or zero for MSG_RELEASE_DATA. */
217 	uint8_t block_index;	/* Block index to send or release. */
218 };
219 
220 BUILD_ASSERT(NUM_EPT <= EPT_ADDR_INVALID, "Too many endpoints");
221 
222 #ifdef CONFIG_MULTITHREADING
223 /* Work queue for bounding processing. */
224 static struct k_work_q ep_bound_work_q;
225 #endif
226 
227 /**
228  * Calculate pointer to block from its index and channel configuration (RX or TX).
229  * No validation is performed.
230  */
block_from_index(const struct channel_config * ch_conf,size_t block_index)231 static struct block_content *block_from_index(const struct channel_config *ch_conf,
232 					      size_t block_index)
233 {
234 	return (struct block_content *)(ch_conf->blocks_ptr +
235 					block_index * ch_conf->block_size);
236 }
237 
238 /**
239  * Calculate pointer to data buffer from block index and channel configuration (RX or TX).
240  * Also validate the index and optionally the buffer size allocated on the this block.
241  *
242  * @param[in]  ch_conf		The channel
243  * @param[in]  block_index	Block index
244  * @param[out] size		Size of the buffer allocated on the block if not NULL.
245  *				The size is also checked if it fits in the blocks area.
246  *				If it is NULL, no size validation is performed.
247  * @param[in]  invalidate_cache	If size is not NULL, invalidates cache for entire buffer
248  *				(all blocks). Otherwise, it is ignored.
249  * @return	Pointer to data buffer or NULL if validation failed.
250  */
buffer_from_index_validate(const struct channel_config * ch_conf,size_t block_index,size_t * size,bool invalidate_cache)251 static uint8_t *buffer_from_index_validate(const struct channel_config *ch_conf,
252 					   size_t block_index, size_t *size,
253 					   bool invalidate_cache)
254 {
255 	size_t allocable_size;
256 	size_t buffer_size;
257 	uint8_t *end_ptr;
258 	struct block_content *block;
259 
260 	if (block_index >= ch_conf->block_count) {
261 		LOG_ERR("Block index invalid");
262 		return NULL;
263 	}
264 
265 	block = block_from_index(ch_conf, block_index);
266 
267 	if (size != NULL) {
268 		if (invalidate_cache) {
269 			sys_cache_data_invd_range(block, BLOCK_HEADER_SIZE);
270 			__sync_synchronize();
271 		}
272 		allocable_size = ch_conf->block_count * ch_conf->block_size;
273 		end_ptr = ch_conf->blocks_ptr + allocable_size;
274 		buffer_size = block->header.size;
275 
276 		if ((buffer_size > allocable_size - BLOCK_HEADER_SIZE) ||
277 		    (&block->data[buffer_size] > end_ptr)) {
278 			LOG_ERR("Block corrupted");
279 			return NULL;
280 		}
281 
282 		*size = buffer_size;
283 		if (invalidate_cache) {
284 			sys_cache_data_invd_range(block->data, buffer_size);
285 			__sync_synchronize();
286 		}
287 	}
288 
289 	return block->data;
290 }
291 
292 /**
293  * Calculate block index based on data buffer pointer and validate it.
294  *
295  * @param[in]  ch_conf		The channel
296  * @param[in]  buffer		Pointer to data buffer
297  * @param[out] size		Size of the allocated buffer if not NULL.
298  *				The size is also checked if it fits in the blocks area.
299  *				If it is NULL, no size validation is performed.
300  * @return		Block index or negative error code
301  * @retval -EINVAL	The buffer is not correct
302  */
buffer_to_index_validate(const struct channel_config * ch_conf,const uint8_t * buffer,size_t * size)303 static int buffer_to_index_validate(const struct channel_config *ch_conf,
304 				    const uint8_t *buffer, size_t *size)
305 {
306 	size_t block_index;
307 	uint8_t *expected;
308 
309 	block_index = (buffer - ch_conf->blocks_ptr) / ch_conf->block_size;
310 
311 	expected = buffer_from_index_validate(ch_conf, block_index, size, false);
312 
313 	if (expected == NULL || expected != buffer) {
314 		LOG_ERR("Pointer invalid");
315 		return -EINVAL;
316 	}
317 
318 	return block_index;
319 }
320 
321 /**
322  * Allocate buffer for transmission
323  *
324  * @param[in,out] size	Required size of the buffer. If set to zero, the first available block will
325  *			be allocated, together with all contiguous free blocks that follow it.
326  *			On success, size will contain the actually allocated size, which will be
327  *			at least the requested size.
328  * @param[out] buffer	Pointer to the newly allocated buffer.
329  * @param[in] timeout	Timeout.
330  *
331  * @return		Positive index of the first allocated block or negative error.
332  * @retval -ENOMEM	If requested size is bigger than entire allocable space, or
333  *			the timeout was K_NO_WAIT and there was not enough space.
334  * @retval -EAGAIN	If timeout occurred.
335  */
alloc_tx_buffer(struct backend_data * dev_data,uint32_t * size,uint8_t ** buffer,k_timeout_t timeout)336 static int alloc_tx_buffer(struct backend_data *dev_data, uint32_t *size,
337 			   uint8_t **buffer, k_timeout_t timeout)
338 {
339 	const struct icbmsg_config *conf = dev_data->conf;
340 	size_t total_size = *size + BLOCK_HEADER_SIZE;
341 	size_t num_blocks = DIV_ROUND_UP(total_size, conf->tx.block_size);
342 	struct block_content *block;
343 #ifdef CONFIG_MULTITHREADING
344 	bool sem_taken = false;
345 #endif
346 	size_t tx_block_index;
347 	size_t next_bit;
348 	int prev_bit_val;
349 	int r;
350 
351 #ifdef CONFIG_MULTITHREADING
352 	do {
353 		/* Try to allocate specified number of blocks. */
354 		r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks,
355 				       &tx_block_index);
356 		if (r == -ENOSPC && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
357 			/* Wait for releasing if there is no enough space and exit loop
358 			 * on timeout.
359 			 */
360 			r = k_sem_take(&dev_data->block_wait_sem, timeout);
361 			if (r < 0) {
362 				break;
363 			}
364 			sem_taken = true;
365 		} else {
366 			/* Exit loop if space was allocated or other error occurred. */
367 			break;
368 		}
369 	} while (true);
370 
371 	/* If semaphore was taken, give it back because this thread does not
372 	 * necessary took all available space, so other thread may need it.
373 	 */
374 	if (sem_taken) {
375 		k_sem_give(&dev_data->block_wait_sem);
376 	}
377 #else
378 	/* Try to allocate specified number of blocks. */
379 	r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks, &tx_block_index);
380 #endif
381 
382 	if (r < 0) {
383 		if (r != -ENOSPC && r != -EAGAIN) {
384 			LOG_ERR("Failed to allocate buffer, err: %d", r);
385 			/* Only -EINVAL is allowed in this place. Any other code
386 			 * indicates something wrong with the logic.
387 			 */
388 			__ASSERT_NO_MSG(r == -EINVAL);
389 		}
390 
391 		if (r == -ENOSPC || r == -EINVAL) {
392 			/* IPC service require -ENOMEM error in case of no memory. */
393 			r = -ENOMEM;
394 		}
395 		return r;
396 	}
397 
398 	/* If size is 0 try to allocate more blocks after already allocated. */
399 	if (*size == 0) {
400 		prev_bit_val = 0;
401 		for (next_bit = tx_block_index + 1; next_bit < conf->tx.block_count;
402 		     next_bit++) {
403 			r = sys_bitarray_test_and_set_bit(conf->tx_usage_bitmap, next_bit,
404 							  &prev_bit_val);
405 			/** Setting bit should always success. */
406 			__ASSERT_NO_MSG(r == 0);
407 			if (prev_bit_val) {
408 				break;
409 			}
410 		}
411 		num_blocks = next_bit - tx_block_index;
412 	}
413 
414 	/* Get block pointer and adjust size to actually allocated space. */
415 	*size = conf->tx.block_size * num_blocks - BLOCK_HEADER_SIZE;
416 	block = block_from_index(&conf->tx, tx_block_index);
417 	block->header.size = *size;
418 	*buffer = block->data;
419 	return tx_block_index;
420 }
421 
422 /**
423  * Release all or part of the blocks occupied by the buffer.
424  *
425  * @param[in] tx_block_index	First block index to release, no validation is performed,
426  *				so caller is responsible for passing valid index.
427  * @param[in] size		Size of data buffer, no validation is performed,
428  *				so caller is responsible for passing valid size.
429  * @param[in] new_size		If less than zero, release all blocks, otherwise reduce
430  *				size to this value and update size in block header.
431  *
432  * @returns		Positive block index where the buffer starts or negative error.
433  * @retval -EINVAL	If invalid buffer was provided or size is greater than already
434  *			allocated size.
435  */
release_tx_blocks(struct backend_data * dev_data,size_t tx_block_index,size_t size,int new_size)436 static int release_tx_blocks(struct backend_data *dev_data, size_t tx_block_index,
437 			     size_t size, int new_size)
438 {
439 	const struct icbmsg_config *conf = dev_data->conf;
440 	struct block_content *block;
441 	size_t num_blocks;
442 	size_t total_size;
443 	size_t new_total_size;
444 	size_t new_num_blocks;
445 	size_t release_index;
446 	int r;
447 
448 	/* Calculate number of blocks. */
449 	total_size = size + BLOCK_HEADER_SIZE;
450 	num_blocks = DIV_ROUND_UP(total_size, conf->tx.block_size);
451 
452 	if (new_size >= 0) {
453 		/* Calculate and validate new values. */
454 		new_total_size = new_size + BLOCK_HEADER_SIZE;
455 		new_num_blocks = DIV_ROUND_UP(new_total_size, conf->tx.block_size);
456 		if (new_num_blocks > num_blocks) {
457 			LOG_ERR("Requested %d blocks, allocated %d", new_num_blocks,
458 				num_blocks);
459 			return -EINVAL;
460 		}
461 		/* Update actual buffer size and number of blocks to release. */
462 		block = block_from_index(&conf->tx, tx_block_index);
463 		block->header.size = new_size;
464 		release_index = tx_block_index + new_num_blocks;
465 		num_blocks = num_blocks - new_num_blocks;
466 	} else {
467 		/* If size is negative, release all blocks. */
468 		release_index = tx_block_index;
469 	}
470 
471 	if (num_blocks > 0) {
472 		/* Free bits in the bitmap. */
473 		r = sys_bitarray_free(conf->tx_usage_bitmap, num_blocks,
474 				      release_index);
475 		if (r < 0) {
476 			LOG_ERR("Cannot free bits, err %d", r);
477 			return r;
478 		}
479 
480 #ifdef CONFIG_MULTITHREADING
481 		/* Wake up all waiting threads. */
482 		k_sem_give(&dev_data->block_wait_sem);
483 #endif
484 	}
485 
486 	return tx_block_index;
487 }
488 
489 /**
490  * Release all or part of the blocks occupied by the buffer.
491  *
492  * @param[in] buffer	Buffer to release.
493  * @param[in] new_size	If less than zero, release all blocks, otherwise reduce size to
494  *			this value and update size in block header.
495  *
496  * @returns		Positive block index where the buffer starts or negative error.
497  * @retval -EINVAL	If invalid buffer was provided or size is greater than already
498  *			allocated size.
499  */
release_tx_buffer(struct backend_data * dev_data,const uint8_t * buffer,int new_size)500 static int release_tx_buffer(struct backend_data *dev_data, const uint8_t *buffer,
501 			     int new_size)
502 {
503 	const struct icbmsg_config *conf = dev_data->conf;
504 	size_t size = 0;
505 	int tx_block_index;
506 
507 	tx_block_index = buffer_to_index_validate(&conf->tx, buffer, &size);
508 	if (tx_block_index < 0) {
509 		return tx_block_index;
510 	}
511 
512 	return release_tx_blocks(dev_data, tx_block_index, size, new_size);
513 }
514 
515 /**
516  * Send control message over ICMsg with mutex locked. Mutex must be locked because
517  * ICMsg may return error on concurrent invocations even when there is enough space
518  * in queue.
519  */
send_control_message(struct backend_data * dev_data,enum msg_type msg_type,uint8_t ept_addr,uint8_t block_index)520 static int send_control_message(struct backend_data *dev_data, enum msg_type msg_type,
521 				uint8_t ept_addr, uint8_t block_index)
522 {
523 	const struct icbmsg_config *conf = dev_data->conf;
524 	const struct control_message message = {
525 		.msg_type = (uint8_t)msg_type,
526 		.ept_addr = ept_addr,
527 		.block_index = block_index,
528 	};
529 	int r;
530 
531 #ifdef CONFIG_MULTITHREADING
532 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
533 #endif
534 	r = icmsg_send(&conf->control_config, &dev_data->control_data, &message,
535 		       sizeof(message));
536 #ifdef CONFIG_MULTITHREADING
537 	k_mutex_unlock(&dev_data->mutex);
538 #endif
539 	if (r < sizeof(message)) {
540 		LOG_ERR("Cannot send over ICMsg, err %d", r);
541 	}
542 	return r;
543 }
544 
545 /**
546  * Release received buffer. This function will just send release control message.
547  *
548  * @param[in] buffer	Buffer to release.
549  * @param[in] msg_type	Message type: MSG_RELEASE_BOUND or MSG_RELEASE_DATA.
550  * @param[in] ept_addr	Endpoint address or zero for MSG_RELEASE_DATA.
551  *
552  * @return	zero or ICMsg send error.
553  */
send_release(struct backend_data * dev_data,const uint8_t * buffer,enum msg_type msg_type,uint8_t ept_addr)554 static int send_release(struct backend_data *dev_data, const uint8_t *buffer,
555 			enum msg_type msg_type, uint8_t ept_addr)
556 {
557 	const struct icbmsg_config *conf = dev_data->conf;
558 	int rx_block_index;
559 
560 	rx_block_index = buffer_to_index_validate(&conf->rx, buffer, NULL);
561 	if (rx_block_index < 0) {
562 		return rx_block_index;
563 	}
564 
565 	return send_control_message(dev_data, msg_type, ept_addr, rx_block_index);
566 }
567 
568 /**
569  * Send data contained in specified block. It will adjust data size and flush cache
570  * if necessary. If sending failed, allocated blocks will be released.
571  *
572  * @param[in] msg_type		Message type: MSG_BOUND or MSG_DATA.
573  * @param[in] ept_addr		Endpoints address.
574  * @param[in] tx_block_index	Index of first block containing data, it is not validated,
575  *				so caller is responsible for passing only valid index.
576  * @param[in] size		Actual size of the data, can be smaller than allocated,
577  *				but it cannot change number of required blocks.
578  *
579  * @return			number of bytes sent in the message or negative error code.
580  */
send_block(struct backend_data * dev_data,enum msg_type msg_type,uint8_t ept_addr,size_t tx_block_index,size_t size)581 static int send_block(struct backend_data *dev_data, enum msg_type msg_type,
582 		      uint8_t ept_addr, size_t tx_block_index, size_t size)
583 {
584 	struct block_content *block;
585 	int r;
586 
587 	block = block_from_index(&dev_data->conf->tx, tx_block_index);
588 
589 	block->header.size = size;
590 	__sync_synchronize();
591 	sys_cache_data_flush_range(block, size + BLOCK_HEADER_SIZE);
592 
593 	r = send_control_message(dev_data, msg_type, ept_addr, tx_block_index);
594 	if (r < 0) {
595 		release_tx_blocks(dev_data, tx_block_index, size, -1);
596 	}
597 
598 	return r;
599 }
600 
601 /**
602  * Find endpoint that was registered with name that matches name
603  * contained in the endpoint bound message received from remote.
604  *
605  * @param[in] name	Endpoint name, it must be in a received block.
606  *
607  * @return	Found endpoint index or -ENOENT if not found.
608  */
find_ept_by_name(struct backend_data * dev_data,const char * name)609 static int find_ept_by_name(struct backend_data *dev_data, const char *name)
610 {
611 	const struct channel_config *rx_conf = &dev_data->conf->rx;
612 	const char *buffer_end = (const char *)rx_conf->blocks_ptr +
613 				 rx_conf->block_count * rx_conf->block_size;
614 	struct ept_data *ept;
615 	size_t name_size;
616 	size_t i;
617 
618 	/* Requested name is in shared memory, so we have to assume that it
619 	 * can be corrupted. Extra care must be taken to avoid out of
620 	 * bounds reads.
621 	 */
622 	name_size = strnlen(name, buffer_end - name - 1) + 1;
623 
624 	for (i = 0; i < NUM_EPT; i++) {
625 		ept = &dev_data->ept[i];
626 		if (atomic_get(&ept->state) == EPT_CONFIGURED &&
627 		    strncmp(ept->cfg->name, name, name_size) == 0) {
628 			return i;
629 		}
630 	}
631 
632 	return -ENOENT;
633 }
634 
635 /**
636  * Find registered endpoint that matches given "bound endpoint" message. When found,
637  * the "release bound endpoint" message is send.
638  *
639  * @param[in] rx_block_index	Block containing the "bound endpoint" message.
640  * @param[in] ept_addr		Endpoint address.
641  *
642  * @return	negative error code or non-negative search result.
643  * @retval 0	match not found.
644  * @retval 1	match found and processing was successful.
645  */
match_bound_msg(struct backend_data * dev_data,size_t rx_block_index,uint8_t ept_addr)646 static int match_bound_msg(struct backend_data *dev_data, size_t rx_block_index,
647 			   uint8_t ept_addr)
648 {
649 	const struct icbmsg_config *conf = dev_data->conf;
650 	struct block_content *block;
651 	uint8_t *buffer;
652 	int ept_index;
653 	struct ept_data *ept;
654 	int r;
655 	bool valid_state;
656 
657 	/* Find endpoint that matches requested name. */
658 	block = block_from_index(&conf->rx, rx_block_index);
659 	buffer = block->data;
660 	ept_index = find_ept_by_name(dev_data, buffer);
661 	if (ept_index < 0) {
662 		return 0;
663 	}
664 
665 	/* Set endpoint address and mapping. Move it to "ready" state. */
666 	ept = &dev_data->ept[ept_index];
667 	ept->addr = ept_addr;
668 	dev_data->ept_map[ept->addr] = ept_index;
669 	valid_state = atomic_cas(&ept->state, EPT_CONFIGURED, EPT_READY);
670 	if (!valid_state) {
671 		LOG_ERR("Unexpected bounding from remote on endpoint %d", ept_addr);
672 		return -EINVAL;
673 	}
674 
675 	/* Endpoint is ready to send messages, so call bound callback. */
676 	if (ept->cfg->cb.bound != NULL) {
677 		ept->cfg->cb.bound(ept->cfg->priv);
678 	}
679 
680 	/* Release the bound message and inform remote that we are ready to receive. */
681 	r = send_release(dev_data, buffer, MSG_RELEASE_BOUND, ept_addr);
682 	if (r < 0) {
683 		return r;
684 	}
685 
686 	return 1;
687 }
688 
689 /**
690  * Send bound message on specified endpoint.
691  *
692  * @param[in] ept	Endpoint to use.
693  *
694  * @return		non-negative value in case of success or negative error code.
695  */
send_bound_message(struct backend_data * dev_data,struct ept_data * ept)696 static int send_bound_message(struct backend_data *dev_data, struct ept_data *ept)
697 {
698 	size_t msg_len;
699 	uint32_t alloc_size;
700 	uint8_t *buffer;
701 	int r;
702 
703 	msg_len = strlen(ept->cfg->name) + 1;
704 	alloc_size = msg_len;
705 	r = alloc_tx_buffer(dev_data, &alloc_size, &buffer, K_FOREVER);
706 	if (r >= 0) {
707 		strcpy(buffer, ept->cfg->name);
708 		r = send_block(dev_data, MSG_BOUND, ept->addr, r, msg_len);
709 	}
710 
711 	return r;
712 }
713 
714 #ifdef CONFIG_MULTITHREADING
715 /**
716  * Put endpoint bound processing into system workqueue.
717  */
schedule_ept_bound_process(struct backend_data * dev_data)718 static void schedule_ept_bound_process(struct backend_data *dev_data)
719 {
720 	k_work_submit_to_queue(&ep_bound_work_q, &dev_data->ep_bound_work);
721 }
722 #endif
723 
724 /**
725  * Work handler that is responsible to start bounding when ICMsg is bound.
726  */
727 #ifdef CONFIG_MULTITHREADING
ept_bound_process(struct k_work * item)728 static void ept_bound_process(struct k_work *item)
729 #else
730 static void ept_bound_process(struct backend_data *dev_data)
731 #endif
732 {
733 #ifdef CONFIG_MULTITHREADING
734 	struct backend_data *dev_data = CONTAINER_OF(item, struct backend_data,
735 						     ep_bound_work);
736 #endif
737 	struct ept_data *ept = NULL;
738 	size_t i;
739 	int r = 0;
740 	bool matching_state;
741 
742 	/* Skip processing if ICMsg was not bounded yet. */
743 	if (!(atomic_get(&dev_data->flags) & CONTROL_BOUNDED)) {
744 		return;
745 	}
746 
747 	if (dev_data->is_initiator) {
748 		/* Initiator just sends bound message after endpoint was registered. */
749 		for (i = 0; i < NUM_EPT; i++) {
750 			ept = &dev_data->ept[i];
751 			matching_state = atomic_cas(&ept->state, EPT_CONFIGURED,
752 						    EPT_BOUNDING);
753 			if (matching_state) {
754 				r = send_bound_message(dev_data, ept);
755 				if (r < 0) {
756 					atomic_set(&ept->state, EPT_UNCONFIGURED);
757 					LOG_ERR("Failed to send bound, err %d", r);
758 				}
759 			}
760 		}
761 	} else {
762 		/* Walk over all waiting bound messages and match to local endpoints. */
763 #ifdef CONFIG_MULTITHREADING
764 		k_mutex_lock(&dev_data->mutex, K_FOREVER);
765 #endif
766 		for (i = 0; i < NUM_EPT; i++) {
767 			if (dev_data->waiting_bound[i] != WAITING_BOUND_MSG_EMPTY) {
768 #ifdef CONFIG_MULTITHREADING
769 				k_mutex_unlock(&dev_data->mutex);
770 #endif
771 				r = match_bound_msg(dev_data,
772 						    dev_data->waiting_bound[i], i);
773 #ifdef CONFIG_MULTITHREADING
774 				k_mutex_lock(&dev_data->mutex, K_FOREVER);
775 #endif
776 				if (r != 0) {
777 					dev_data->waiting_bound[i] =
778 						WAITING_BOUND_MSG_EMPTY;
779 					if (r < 0) {
780 						LOG_ERR("Failed bound, err %d", r);
781 					}
782 				}
783 			}
784 		}
785 #ifdef CONFIG_MULTITHREADING
786 		k_mutex_unlock(&dev_data->mutex);
787 #endif
788 	}
789 
790 	/* Check if any endpoint is ready to rebound and call the callback if it is. */
791 	for (i = 0; i < NUM_EPT; i++) {
792 		ept = &dev_data->ept[i];
793 		matching_state = atomic_cas(&ept->rebound_state, EPT_REBOUNDING,
794 						EPT_NORMAL);
795 		if (matching_state) {
796 			if (ept->cfg->cb.bound != NULL) {
797 				ept->cfg->cb.bound(ept->cfg->priv);
798 			}
799 		}
800 	}
801 }
802 
803 /**
804  * Get endpoint from endpoint address. Also validates if the address is correct and
805  * endpoint is in correct state for receiving. If bounding callback was not called yet,
806  * then call it.
807  */
get_ept_and_rx_validate(struct backend_data * dev_data,uint8_t ept_addr)808 static struct ept_data *get_ept_and_rx_validate(struct backend_data *dev_data,
809 						uint8_t ept_addr)
810 {
811 	struct ept_data *ept;
812 	enum ept_bounding_state state;
813 
814 	if (ept_addr >= NUM_EPT || dev_data->ept_map[ept_addr] >= NUM_EPT) {
815 		LOG_ERR("Received invalid endpoint addr %d", ept_addr);
816 		return NULL;
817 	}
818 
819 	ept = &dev_data->ept[dev_data->ept_map[ept_addr]];
820 
821 	state = atomic_get(&ept->state);
822 
823 	if (state == EPT_READY) {
824 		/* Ready state, ensure that it is not deregistered nor rebounding. */
825 		if (atomic_get(&ept->rebound_state) != EPT_NORMAL) {
826 			return NULL;
827 		}
828 	} else if (state == EPT_BOUNDING) {
829 		/* Endpoint bound callback was not called yet - call it. */
830 		atomic_set(&ept->state, EPT_READY);
831 		if (ept->cfg->cb.bound != NULL) {
832 			ept->cfg->cb.bound(ept->cfg->priv);
833 		}
834 	} else {
835 		LOG_ERR("Invalid state %d of receiving endpoint %d", state, ept->addr);
836 		return NULL;
837 	}
838 
839 	return ept;
840 }
841 
842 /**
843  * Data message received.
844  */
received_data(struct backend_data * dev_data,size_t rx_block_index,uint8_t ept_addr)845 static int received_data(struct backend_data *dev_data, size_t rx_block_index,
846 			 uint8_t ept_addr)
847 {
848 	const struct icbmsg_config *conf = dev_data->conf;
849 	uint8_t *buffer;
850 	struct ept_data *ept;
851 	size_t size;
852 	int bit_val;
853 
854 	/* Validate. */
855 	buffer = buffer_from_index_validate(&conf->rx, rx_block_index, &size, true);
856 	ept = get_ept_and_rx_validate(dev_data, ept_addr);
857 	if (buffer == NULL || ept == NULL) {
858 		LOG_ERR("Received invalid block index %d or addr %d", rx_block_index,
859 			ept_addr);
860 		return -EINVAL;
861 	}
862 
863 	/* Clear bit. If cleared, specific block will not be hold after the callback. */
864 	sys_bitarray_clear_bit(conf->rx_hold_bitmap, rx_block_index);
865 
866 	/* Call the endpoint callback. It can set the hold bit. */
867 	ept->cfg->cb.received(buffer, size, ept->cfg->priv);
868 
869 	/* If the bit is still cleared, request release of the buffer. */
870 	sys_bitarray_test_bit(conf->rx_hold_bitmap, rx_block_index, &bit_val);
871 	if (!bit_val) {
872 		send_release(dev_data, buffer, MSG_RELEASE_DATA, 0);
873 	}
874 
875 	return 0;
876 }
877 
878 /**
879  * Release data message received.
880  */
received_release_data(struct backend_data * dev_data,size_t tx_block_index)881 static int received_release_data(struct backend_data *dev_data, size_t tx_block_index)
882 {
883 	const struct icbmsg_config *conf = dev_data->conf;
884 	uint8_t *buffer;
885 	size_t size;
886 	int r;
887 
888 	/* Validate. */
889 	buffer = buffer_from_index_validate(&conf->tx, tx_block_index, &size, false);
890 	if (buffer == NULL) {
891 		LOG_ERR("Received invalid block index %d", tx_block_index);
892 		return -EINVAL;
893 	}
894 
895 	/* Release. */
896 	r = release_tx_blocks(dev_data, tx_block_index, size, -1);
897 	if (r < 0) {
898 		return r;
899 	}
900 
901 	return r;
902 }
903 
904 /**
905  * Bound endpoint message received.
906  */
received_bound(struct backend_data * dev_data,size_t rx_block_index,uint8_t ept_addr)907 static int received_bound(struct backend_data *dev_data, size_t rx_block_index,
908 			  uint8_t ept_addr)
909 {
910 	const struct icbmsg_config *conf = dev_data->conf;
911 	size_t size;
912 	uint8_t *buffer;
913 
914 	/* Validate */
915 	buffer = buffer_from_index_validate(&conf->rx, rx_block_index, &size, true);
916 	if (buffer == NULL) {
917 		LOG_ERR("Received invalid block index %d", rx_block_index);
918 		return -EINVAL;
919 	}
920 
921 	/* Put message to waiting array. */
922 #ifdef CONFIG_MULTITHREADING
923 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
924 #endif
925 	dev_data->waiting_bound[ept_addr] = rx_block_index;
926 #ifdef CONFIG_MULTITHREADING
927 	k_mutex_unlock(&dev_data->mutex);
928 #endif
929 
930 #ifdef CONFIG_MULTITHREADING
931 	/* Schedule processing the message. */
932 	schedule_ept_bound_process(dev_data);
933 #else
934 	ept_bound_process(dev_data);
935 #endif
936 
937 	return 0;
938 }
939 
940 /**
941  * Callback called by ICMsg that handles message (data or endpoint bound) received
942  * from the remote.
943  *
944  * @param[in] data	Message received from the ICMsg.
945  * @param[in] len	Number of bytes of data.
946  * @param[in] priv	Opaque pointer to device instance.
947  */
control_received(const void * data,size_t len,void * priv)948 static void control_received(const void *data, size_t len, void *priv)
949 {
950 	const struct device *instance = priv;
951 	struct backend_data *dev_data = instance->data;
952 	const struct control_message *message = (const struct control_message *)data;
953 	struct ept_data *ept;
954 	uint8_t ept_addr;
955 	int r = 0;
956 
957 	/* Allow messages longer than 3 bytes, e.g. for future protocol versions. */
958 	if (len < sizeof(struct control_message)) {
959 		r = -EINVAL;
960 		goto exit;
961 	}
962 
963 	ept_addr = message->ept_addr;
964 	if (ept_addr >= NUM_EPT) {
965 		r = -EINVAL;
966 		goto exit;
967 	}
968 
969 	switch (message->msg_type) {
970 	case MSG_RELEASE_DATA:
971 		r = received_release_data(dev_data, message->block_index);
972 		break;
973 	case MSG_RELEASE_BOUND:
974 		r = received_release_data(dev_data, message->block_index);
975 		if (r >= 0) {
976 			ept = get_ept_and_rx_validate(dev_data, ept_addr);
977 			if (ept == NULL) {
978 				r = -EINVAL;
979 			}
980 		}
981 		break;
982 	case MSG_BOUND:
983 		r = received_bound(dev_data, message->block_index, ept_addr);
984 		break;
985 	case MSG_DATA:
986 		r = received_data(dev_data, message->block_index, ept_addr);
987 		break;
988 	default:
989 		/* Silently ignore other messages types. They can be used in future
990 		 * protocol version.
991 		 */
992 		break;
993 	}
994 
995 exit:
996 	if (r < 0) {
997 		LOG_ERR("Failed to receive, err %d", r);
998 	}
999 }
1000 
1001 /**
1002  * Callback called when ICMsg is bound.
1003  */
control_bound(void * priv)1004 static void control_bound(void *priv)
1005 {
1006 	const struct device *instance = priv;
1007 	struct backend_data *dev_data = instance->data;
1008 
1009 	/* Set flag that ICMsg is bounded and now, endpoint bounding may start. */
1010 	atomic_or(&dev_data->flags, CONTROL_BOUNDED);
1011 #ifdef CONFIG_MULTITHREADING
1012 	schedule_ept_bound_process(dev_data);
1013 #else
1014 	ept_bound_process(dev_data);
1015 #endif
1016 }
1017 
1018 /**
1019  * Open the backend instance callback.
1020  */
open(const struct device * instance)1021 static int open(const struct device *instance)
1022 {
1023 	const struct icbmsg_config *conf = instance->config;
1024 	struct backend_data *dev_data = instance->data;
1025 
1026 	static const struct ipc_service_cb cb = {
1027 		.bound = control_bound,
1028 		.received = control_received,
1029 		.error = NULL,
1030 	};
1031 
1032 	LOG_DBG("Open instance 0x%08X, initiator=%d", (uint32_t)instance,
1033 		dev_data->is_initiator ? 1 : 0);
1034 	LOG_DBG("  TX %d blocks of %d bytes at 0x%08X, max allocable %d bytes",
1035 		(uint32_t)conf->tx.block_count,
1036 		(uint32_t)conf->tx.block_size,
1037 		(uint32_t)conf->tx.blocks_ptr,
1038 		(uint32_t)(conf->tx.block_size * conf->tx.block_count -
1039 			   BLOCK_HEADER_SIZE));
1040 	LOG_DBG("  RX %d blocks of %d bytes at 0x%08X, max allocable %d bytes",
1041 		(uint32_t)conf->rx.block_count,
1042 		(uint32_t)conf->rx.block_size,
1043 		(uint32_t)conf->rx.blocks_ptr,
1044 		(uint32_t)(conf->rx.block_size * conf->rx.block_count -
1045 			   BLOCK_HEADER_SIZE));
1046 
1047 	return icmsg_open(&conf->control_config, &dev_data->control_data, &cb,
1048 			  (void *)instance);
1049 }
1050 
1051 /**
1052  * Endpoint send callback function (with copy).
1053  */
send(const struct device * instance,void * token,const void * msg,size_t len)1054 static int send(const struct device *instance, void *token, const void *msg, size_t len)
1055 {
1056 	struct backend_data *dev_data = instance->data;
1057 	struct ept_data *ept = token;
1058 	uint32_t alloc_size;
1059 	uint8_t *buffer;
1060 	int r;
1061 
1062 	/* Allocate the buffer. */
1063 	alloc_size = len;
1064 	r = alloc_tx_buffer(dev_data, &alloc_size, &buffer, K_NO_WAIT);
1065 	if (r < 0) {
1066 		return r;
1067 	}
1068 
1069 	/* Copy data to allocated buffer. */
1070 	memcpy(buffer, msg, len);
1071 
1072 	/* Send data message. */
1073 	r = send_block(dev_data, MSG_DATA, ept->addr, r, len);
1074 	if (r < 0) {
1075 		return r;
1076 	}
1077 
1078 	return len;
1079 }
1080 
1081 /**
1082  * Backend endpoint registration callback.
1083  */
register_ept(const struct device * instance,void ** token,const struct ipc_ept_cfg * cfg)1084 static int register_ept(const struct device *instance, void **token,
1085 			const struct ipc_ept_cfg *cfg)
1086 {
1087 	struct backend_data *dev_data = instance->data;
1088 	struct ept_data *ept = NULL;
1089 	bool matching_state;
1090 	int ept_index;
1091 
1092 	/* Try to find endpoint to rebound */
1093 	for (ept_index = 0; ept_index < NUM_EPT; ept_index++) {
1094 		ept = &dev_data->ept[ept_index];
1095 		if (ept->cfg == cfg) {
1096 			matching_state = atomic_cas(&ept->rebound_state, EPT_DEREGISTERED,
1097 						   EPT_REBOUNDING);
1098 			if (!matching_state) {
1099 				return -EINVAL;
1100 			}
1101 #ifdef CONFIG_MULTITHREADING
1102 			schedule_ept_bound_process(dev_data);
1103 #else
1104 			ept_bound_process(dev_data);
1105 #endif
1106 			return 0;
1107 		}
1108 	}
1109 
1110 	/* Reserve new endpoint index. */
1111 	ept_index = atomic_inc(&dev_data->flags) & FLAG_EPT_COUNT_MASK;
1112 	if (ept_index >= NUM_EPT) {
1113 		LOG_ERR("Too many endpoints");
1114 		__ASSERT_NO_MSG(false);
1115 		return -ENOMEM;
1116 	}
1117 
1118 	/* Add new endpoint. */
1119 	ept = &dev_data->ept[ept_index];
1120 	ept->cfg = cfg;
1121 	if (dev_data->is_initiator) {
1122 		ept->addr = ept_index;
1123 		dev_data->ept_map[ept->addr] = ept->addr;
1124 	}
1125 	atomic_set(&ept->state, EPT_CONFIGURED);
1126 
1127 	/* Keep endpoint address in token. */
1128 	*token = ept;
1129 
1130 #ifdef CONFIG_MULTITHREADING
1131 	/* Rest of the bounding will be done in the system workqueue. */
1132 	schedule_ept_bound_process(dev_data);
1133 #else
1134 	ept_bound_process(dev_data);
1135 #endif
1136 
1137 	return 0;
1138 }
1139 
1140 /**
1141  * Backend endpoint deregistration callback.
1142  */
deregister_ept(const struct device * instance,void * token)1143 static int deregister_ept(const struct device *instance, void *token)
1144 {
1145 	struct ept_data *ept = token;
1146 	bool matching_state;
1147 
1148 	matching_state = atomic_cas(&ept->rebound_state, EPT_NORMAL, EPT_DEREGISTERED);
1149 
1150 	if (!matching_state) {
1151 		return -EINVAL;
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 /**
1158  * Returns maximum TX buffer size.
1159  */
get_tx_buffer_size(const struct device * instance,void * token)1160 static int get_tx_buffer_size(const struct device *instance, void *token)
1161 {
1162 	const struct icbmsg_config *conf = instance->config;
1163 
1164 	return conf->tx.block_size * conf->tx.block_count - BLOCK_HEADER_SIZE;
1165 }
1166 
1167 /**
1168  * Endpoint TX buffer allocation callback for nocopy sending.
1169  */
get_tx_buffer(const struct device * instance,void * token,void ** data,uint32_t * user_len,k_timeout_t wait)1170 static int get_tx_buffer(const struct device *instance, void *token, void **data,
1171 			 uint32_t *user_len, k_timeout_t wait)
1172 {
1173 	struct backend_data *dev_data = instance->data;
1174 	int r;
1175 
1176 	r = alloc_tx_buffer(dev_data, user_len, (uint8_t **)data, wait);
1177 	if (r < 0) {
1178 		return r;
1179 	}
1180 	return 0;
1181 }
1182 
1183 /**
1184  * Endpoint TX buffer release callback for nocopy sending.
1185  */
drop_tx_buffer(const struct device * instance,void * token,const void * data)1186 static int drop_tx_buffer(const struct device *instance, void *token, const void *data)
1187 {
1188 	struct backend_data *dev_data = instance->data;
1189 	int r;
1190 
1191 	r =  release_tx_buffer(dev_data, data, -1);
1192 	if (r < 0) {
1193 		return r;
1194 	}
1195 
1196 	return 0;
1197 }
1198 
1199 /**
1200  * Endpoint nocopy sending.
1201  */
send_nocopy(const struct device * instance,void * token,const void * data,size_t len)1202 static int send_nocopy(const struct device *instance, void *token, const void *data,
1203 		       size_t len)
1204 {
1205 	struct backend_data *dev_data = instance->data;
1206 	struct ept_data *ept = token;
1207 	int r;
1208 
1209 	/* Actual size may be smaller than requested, so shrink if possible. */
1210 	r = release_tx_buffer(dev_data, data, len);
1211 	if (r < 0) {
1212 		release_tx_buffer(dev_data, data, -1);
1213 		return r;
1214 	}
1215 
1216 	return send_block(dev_data, MSG_DATA, ept->addr, r, len);
1217 }
1218 
1219 /**
1220  * Holding RX buffer for nocopy receiving.
1221  */
hold_rx_buffer(const struct device * instance,void * token,void * data)1222 static int hold_rx_buffer(const struct device *instance, void *token, void *data)
1223 {
1224 	const struct icbmsg_config *conf = instance->config;
1225 	int rx_block_index;
1226 	uint8_t *buffer = data;
1227 
1228 	/* Calculate block index and set associated bit. */
1229 	rx_block_index = buffer_to_index_validate(&conf->rx, buffer, NULL);
1230 	__ASSERT_NO_MSG(rx_block_index >= 0);
1231 	return sys_bitarray_set_bit(conf->rx_hold_bitmap, rx_block_index);
1232 }
1233 
1234 /**
1235  * Release RX buffer that was previously held.
1236  */
release_rx_buffer(const struct device * instance,void * token,void * data)1237 static int release_rx_buffer(const struct device *instance, void *token, void *data)
1238 {
1239 	struct backend_data *dev_data = instance->data;
1240 	int r;
1241 
1242 	r = send_release(dev_data, (uint8_t *)data, MSG_RELEASE_DATA, 0);
1243 	if (r < 0) {
1244 		return r;
1245 	}
1246 	return 0;
1247 }
1248 
1249 /**
1250  * Backend device initialization.
1251  */
backend_init(const struct device * instance)1252 static int backend_init(const struct device *instance)
1253 {
1254 	MAYBE_CONST struct icbmsg_config *conf = (struct icbmsg_config *)instance->config;
1255 	struct backend_data *dev_data = instance->data;
1256 #ifdef CONFIG_MULTITHREADING
1257 	static K_THREAD_STACK_DEFINE(ep_bound_work_q_stack, EP_BOUND_WORK_Q_STACK_SIZE);
1258 	static bool is_work_q_started;
1259 	struct k_work_queue_config work_q_cfg = { .name = "icbmsg_workq" };
1260 
1261 #if defined(CONFIG_ARCH_POSIX)
1262 	native_emb_addr_remap((void **)&conf->tx.blocks_ptr);
1263 	native_emb_addr_remap((void **)&conf->rx.blocks_ptr);
1264 #endif
1265 
1266 	if (!is_work_q_started) {
1267 		k_work_queue_init(&ep_bound_work_q);
1268 		k_work_queue_start(&ep_bound_work_q, ep_bound_work_q_stack,
1269 				   K_THREAD_STACK_SIZEOF(ep_bound_work_q_stack),
1270 				   EP_BOUND_WORK_Q_PRIORITY, &work_q_cfg);
1271 
1272 		is_work_q_started = true;
1273 	}
1274 #endif
1275 
1276 	dev_data->conf = conf;
1277 	dev_data->is_initiator = (conf->rx.blocks_ptr < conf->tx.blocks_ptr);
1278 #ifdef CONFIG_MULTITHREADING
1279 	k_mutex_init(&dev_data->mutex);
1280 	k_work_init(&dev_data->ep_bound_work, ept_bound_process);
1281 	k_sem_init(&dev_data->block_wait_sem, 0, 1);
1282 #endif
1283 	memset(&dev_data->waiting_bound, 0xFF, sizeof(dev_data->waiting_bound));
1284 	memset(&dev_data->ept_map, EPT_ADDR_INVALID, sizeof(dev_data->ept_map));
1285 	return 0;
1286 }
1287 
1288 /**
1289  * IPC service backend callbacks.
1290  */
1291 const static struct ipc_service_backend backend_ops = {
1292 	.open_instance = open,
1293 	.close_instance = NULL, /* not implemented */
1294 	.send = send,
1295 	.register_endpoint = register_ept,
1296 	.deregister_endpoint = deregister_ept,
1297 	.get_tx_buffer_size = get_tx_buffer_size,
1298 	.get_tx_buffer = get_tx_buffer,
1299 	.drop_tx_buffer = drop_tx_buffer,
1300 	.send_nocopy = send_nocopy,
1301 	.hold_rx_buffer = hold_rx_buffer,
1302 	.release_rx_buffer = release_rx_buffer,
1303 };
1304 
1305 /**
1306  * Required block alignment.
1307  */
1308 #define BLOCK_ALIGNMENT sizeof(uint32_t)
1309 
1310 /**
1311  * Number of bytes per each ICMsg message. It is used to calculate size of ICMsg area.
1312  */
1313 #define BYTES_PER_ICMSG_MESSAGE (ROUND_UP(sizeof(struct control_message),		\
1314 					  sizeof(void *)) + PBUF_PACKET_LEN_SZ)
1315 
1316 /**
1317  * Maximum ICMsg overhead. It is used to calculate size of ICMsg area.
1318  */
1319 #define ICMSG_BUFFER_OVERHEAD(i)							\
1320 	(PBUF_HEADER_OVERHEAD(GET_CACHE_ALIGNMENT(i)) + 2 * BYTES_PER_ICMSG_MESSAGE)
1321 
1322 /**
1323  * Returns required data cache alignment for instance "i".
1324  */
1325 #define GET_CACHE_ALIGNMENT(i) \
1326 	MAX(BLOCK_ALIGNMENT, DT_INST_PROP_OR(i, dcache_alignment, 0))
1327 
1328 /**
1329  * Calculates minimum size required for ICMsg region for specific number of local
1330  * and remote blocks. The minimum size ensures that ICMsg queue is will never overflow
1331  * because it can hold data message for each local block and release message
1332  * for each remote block.
1333  */
1334 #define GET_ICMSG_MIN_SIZE(i, local_blocks, remote_blocks) ROUND_UP(			\
1335 	(ICMSG_BUFFER_OVERHEAD(i) + BYTES_PER_ICMSG_MESSAGE *				\
1336 	 (local_blocks + remote_blocks)), GET_CACHE_ALIGNMENT(i))
1337 
1338 /**
1339  * Calculate aligned block size by evenly dividing remaining space after removing
1340  * the space for ICMsg.
1341  */
1342 #define GET_BLOCK_SIZE(i, total_size, local_blocks, remote_blocks) ROUND_DOWN(		\
1343 	((total_size) - GET_ICMSG_MIN_SIZE(i, (local_blocks), (remote_blocks))) /	\
1344 	(local_blocks), BLOCK_ALIGNMENT)
1345 
1346 /**
1347  * Calculate offset where area for blocks starts which is just after the ICMsg.
1348  */
1349 #define GET_BLOCKS_OFFSET(i, total_size, local_blocks, remote_blocks)			\
1350 	((total_size) - GET_BLOCK_SIZE(i, (total_size), (local_blocks),			\
1351 				       (remote_blocks)) * (local_blocks))
1352 
1353 /**
1354  * Return shared memory start address aligned to block alignment and cache line.
1355  */
1356 #define GET_MEM_ADDR_INST(i, direction) \
1357 	ROUND_UP(DT_REG_ADDR(DT_INST_PHANDLE(i, direction##_region)),			\
1358 		 GET_CACHE_ALIGNMENT(i))
1359 
1360 /**
1361  * Return shared memory end address aligned to block alignment and cache line.
1362  */
1363 #define GET_MEM_END_INST(i, direction)							\
1364 	ROUND_DOWN(DT_REG_ADDR(DT_INST_PHANDLE(i, direction##_region)) +		\
1365 		   DT_REG_SIZE(DT_INST_PHANDLE(i, direction##_region)),			\
1366 		   GET_CACHE_ALIGNMENT(i))
1367 
1368 /**
1369  * Return shared memory size aligned to block alignment and cache line.
1370  */
1371 #define GET_MEM_SIZE_INST(i, direction) \
1372 	(GET_MEM_END_INST(i, direction) - GET_MEM_ADDR_INST(i, direction))
1373 
1374 /**
1375  * Returns GET_ICMSG_SIZE, but for specific instance and direction.
1376  * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx"
1377  *  or "rx, tx".
1378  */
1379 #define GET_ICMSG_SIZE_INST(i, loc, rem)					\
1380 	GET_BLOCKS_OFFSET(							\
1381 		i,								\
1382 		GET_MEM_SIZE_INST(i, loc),					\
1383 		DT_INST_PROP(i, loc##_blocks),					\
1384 		DT_INST_PROP(i, rem##_blocks))
1385 
1386 /**
1387  * Returns address where area for blocks starts for specific instance and direction.
1388  * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx"
1389  *  or "rx, tx".
1390  */
1391 #define GET_BLOCKS_ADDR_INST(i, loc, rem)					\
1392 	GET_MEM_ADDR_INST(i, loc) +						\
1393 	GET_BLOCKS_OFFSET(							\
1394 		i,								\
1395 		GET_MEM_SIZE_INST(i, loc),					\
1396 		DT_INST_PROP(i, loc##_blocks),					\
1397 		DT_INST_PROP(i, rem##_blocks))
1398 
1399 /**
1400  * Returns block size for specific instance and direction.
1401  * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx"
1402  *  or "rx, tx".
1403  */
1404 #define GET_BLOCK_SIZE_INST(i, loc, rem)					\
1405 	GET_BLOCK_SIZE(								\
1406 		i,								\
1407 		GET_MEM_SIZE_INST(i, loc),					\
1408 		DT_INST_PROP(i, loc##_blocks),					\
1409 		DT_INST_PROP(i, rem##_blocks))
1410 
1411 #define DEFINE_BACKEND_DEVICE(i)							\
1412 	SYS_BITARRAY_DEFINE_STATIC(tx_usage_bitmap_##i, DT_INST_PROP(i, tx_blocks));	\
1413 	SYS_BITARRAY_DEFINE_STATIC(rx_hold_bitmap_##i, DT_INST_PROP(i, rx_blocks));	\
1414 	PBUF_DEFINE(tx_icbmsg_pb_##i,							\
1415 			GET_MEM_ADDR_INST(i, tx),					\
1416 			GET_ICMSG_SIZE_INST(i, tx, rx),					\
1417 			GET_CACHE_ALIGNMENT(i), 0, 0);					\
1418 	PBUF_DEFINE(rx_icbmsg_pb_##i,							\
1419 			GET_MEM_ADDR_INST(i, rx),					\
1420 			GET_ICMSG_SIZE_INST(i, rx, tx),					\
1421 			GET_CACHE_ALIGNMENT(i), 0, 0);					\
1422 	static struct backend_data backend_data_##i = {					\
1423 		.control_data = {							\
1424 			.tx_pb = &tx_icbmsg_pb_##i,					\
1425 			.rx_pb = &rx_icbmsg_pb_##i,					\
1426 		}									\
1427 	};										\
1428 	static MAYBE_CONST struct icbmsg_config backend_config_##i =			\
1429 	{										\
1430 		.control_config = {							\
1431 			.mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx),			\
1432 			.mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx),			\
1433 			.unbound_mode = ICMSG_UNBOUND_MODE_DISABLE,			\
1434 		},									\
1435 		.tx = {									\
1436 			.blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, tx, rx),	\
1437 			.block_count = DT_INST_PROP(i, tx_blocks),			\
1438 			.block_size = GET_BLOCK_SIZE_INST(i, tx, rx),			\
1439 		},									\
1440 		.rx = {									\
1441 			.blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, rx, tx),	\
1442 			.block_count = DT_INST_PROP(i, rx_blocks),			\
1443 			.block_size = GET_BLOCK_SIZE_INST(i, rx, tx),			\
1444 		},									\
1445 		.tx_usage_bitmap = &tx_usage_bitmap_##i,				\
1446 		.rx_hold_bitmap = &rx_hold_bitmap_##i,					\
1447 	};										\
1448 	BUILD_ASSERT(IS_POWER_OF_TWO(GET_CACHE_ALIGNMENT(i)),				\
1449 		     "This module supports only power of two cache alignment");		\
1450 	BUILD_ASSERT((GET_BLOCK_SIZE_INST(i, tx, rx) >= BLOCK_ALIGNMENT) &&		\
1451 		     (GET_BLOCK_SIZE_INST(i, tx, rx) <					\
1452 		      GET_MEM_SIZE_INST(i, tx)),					\
1453 		     "TX region is too small for provided number of blocks");		\
1454 	BUILD_ASSERT((GET_BLOCK_SIZE_INST(i, rx, tx) >= BLOCK_ALIGNMENT) &&		\
1455 		     (GET_BLOCK_SIZE_INST(i, rx, tx) <					\
1456 		      GET_MEM_SIZE_INST(i, rx)),					\
1457 		     "RX region is too small for provided number of blocks");		\
1458 	BUILD_ASSERT(DT_INST_PROP(i, rx_blocks) <= 256, "Too many RX blocks");		\
1459 	BUILD_ASSERT(DT_INST_PROP(i, tx_blocks) <= 256, "Too many TX blocks");		\
1460 	DEVICE_DT_INST_DEFINE(i,							\
1461 			      &backend_init,						\
1462 			      NULL,							\
1463 			      &backend_data_##i,					\
1464 			      &backend_config_##i,					\
1465 			      POST_KERNEL,						\
1466 			      CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY,			\
1467 			      &backend_ops);
1468 
1469 DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE)
1470