1 /*
2  * Copyright (c) 2025 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include "modem_backend_uart_async.h"
8 
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(modem_backend_uart_async_hwfc, CONFIG_MODEM_MODULES_LOG_LEVEL);
11 
12 #include <zephyr/kernel.h>
13 #include <string.h>
14 
15 struct rx_buf_t {
16 	atomic_t ref_counter;
17 	uint8_t buf[];
18 };
19 
block_start_get(struct modem_backend_uart_async * async,uint8_t * buf)20 static inline struct rx_buf_t *block_start_get(struct modem_backend_uart_async *async, uint8_t *buf)
21 {
22 	size_t block_num;
23 
24 	/* Find the correct block. */
25 	block_num = (((size_t)buf - sizeof(struct rx_buf_t) - (size_t)async->rx_slab.buffer) /
26 		     async->rx_buf_size);
27 
28 	return (struct rx_buf_t *) &async->rx_slab.buffer[block_num * async->rx_buf_size];
29 }
30 
rx_buf_alloc(struct modem_backend_uart_async * async)31 static struct rx_buf_t *rx_buf_alloc(struct modem_backend_uart_async *async)
32 {
33 	struct rx_buf_t *buf;
34 
35 	if (k_mem_slab_alloc(&async->rx_slab, (void **) &buf, K_NO_WAIT)) {
36 		return NULL;
37 	}
38 	atomic_set(&buf->ref_counter, 1);
39 
40 	return buf;
41 }
42 
rx_buf_ref(struct modem_backend_uart_async * async,void * buf)43 static void rx_buf_ref(struct modem_backend_uart_async *async, void *buf)
44 {
45 	atomic_inc(&(block_start_get(async, buf)->ref_counter));
46 }
47 
rx_buf_unref(struct modem_backend_uart_async * async,void * buf)48 static void rx_buf_unref(struct modem_backend_uart_async *async, void *buf)
49 {
50 	struct rx_buf_t *uart_buf = block_start_get(async, buf);
51 	atomic_t ref_counter = atomic_dec(&uart_buf->ref_counter);
52 
53 	if (ref_counter == 1) {
54 		k_mem_slab_free(&async->rx_slab, (void *)uart_buf);
55 	}
56 }
57 
58 enum {
59 	MODEM_BACKEND_UART_ASYNC_STATE_OPEN_BIT,
60 	MODEM_BACKEND_UART_ASYNC_STATE_TRANSMIT_BIT,
61 	MODEM_BACKEND_UART_ASYNC_STATE_RECOVERY_BIT,
62 };
63 
modem_backend_uart_async_hwfc_rx_enable(struct modem_backend_uart * backend)64 static int modem_backend_uart_async_hwfc_rx_enable(struct modem_backend_uart *backend)
65 {
66 	int ret;
67 	struct rx_buf_t *buf = rx_buf_alloc(&backend->async);
68 
69 	if (!buf) {
70 		return -ENOMEM;
71 	}
72 
73 	ret = uart_rx_enable(backend->uart, buf->buf,
74 			     backend->async.rx_buf_size - sizeof(struct rx_buf_t),
75 			     CONFIG_MODEM_BACKEND_UART_ASYNC_RECEIVE_IDLE_TIMEOUT_MS * 1000);
76 	if (ret) {
77 		rx_buf_unref(&backend->async, buf->buf);
78 		return ret;
79 	}
80 
81 	return 0;
82 }
83 
modem_backend_uart_async_hwfc_rx_recovery(struct modem_backend_uart * backend)84 static void modem_backend_uart_async_hwfc_rx_recovery(struct modem_backend_uart *backend)
85 {
86 	int err;
87 
88 	if (!atomic_test_bit(&backend->async.common.state,
89 			     MODEM_BACKEND_UART_ASYNC_STATE_RECOVERY_BIT)) {
90 		return;
91 	}
92 
93 	err = modem_backend_uart_async_hwfc_rx_enable(backend);
94 	if (err) {
95 		LOG_DBG("RX recovery failed: %d", err);
96 		return;
97 	}
98 
99 	if (!atomic_test_and_clear_bit(&backend->async.common.state,
100 				       MODEM_BACKEND_UART_ASYNC_STATE_RECOVERY_BIT)) {
101 		/* Closed during recovery. */
102 		uart_rx_disable(backend->uart);
103 	} else {
104 		LOG_DBG("RX recovery success");
105 	}
106 }
107 
modem_backend_uart_async_hwfc_is_uart_stopped(const struct modem_backend_uart * backend)108 static bool modem_backend_uart_async_hwfc_is_uart_stopped(const struct modem_backend_uart *backend)
109 {
110 	if (!atomic_test_bit(&backend->async.common.state,
111 			     MODEM_BACKEND_UART_ASYNC_STATE_OPEN_BIT) &&
112 	    !atomic_test_bit(&backend->async.common.state,
113 			     MODEM_BACKEND_UART_ASYNC_STATE_RECOVERY_BIT) &&
114 	    !atomic_test_bit(&backend->async.common.state,
115 			     MODEM_BACKEND_UART_ASYNC_STATE_TRANSMIT_BIT)) {
116 		return true;
117 	}
118 
119 	return false;
120 }
121 
modem_backend_uart_async_hwfc_is_open(const struct modem_backend_uart * backend)122 static bool modem_backend_uart_async_hwfc_is_open(const struct modem_backend_uart *backend)
123 {
124 	return atomic_test_bit(&backend->async.common.state,
125 			       MODEM_BACKEND_UART_ASYNC_STATE_OPEN_BIT);
126 }
127 
modem_backend_uart_async_hwfc_event_handler(const struct device * dev,struct uart_event * evt,void * user_data)128 static void modem_backend_uart_async_hwfc_event_handler(const struct device *dev,
129 						   struct uart_event *evt, void *user_data)
130 {
131 	struct modem_backend_uart *backend = (struct modem_backend_uart *) user_data;
132 	struct rx_queue_event rx_event;
133 	int err;
134 
135 	switch (evt->type) {
136 	case UART_TX_DONE:
137 		atomic_clear_bit(&backend->async.common.state,
138 				 MODEM_BACKEND_UART_ASYNC_STATE_TRANSMIT_BIT);
139 		k_work_submit(&backend->transmit_idle_work);
140 		break;
141 
142 	case UART_TX_ABORTED:
143 		if (modem_backend_uart_async_hwfc_is_open(backend)) {
144 			LOG_WRN("Transmit aborted (%zu sent)", evt->data.tx.len);
145 		}
146 		atomic_clear_bit(&backend->async.common.state,
147 				 MODEM_BACKEND_UART_ASYNC_STATE_TRANSMIT_BIT);
148 		k_work_submit(&backend->transmit_idle_work);
149 
150 		break;
151 
152 	case UART_RX_BUF_REQUEST:
153 		struct rx_buf_t *buf = rx_buf_alloc(&backend->async);
154 
155 		if (!buf) {
156 			LOG_DBG("No receive buffer, disabling RX");
157 			break;
158 		}
159 		err = uart_rx_buf_rsp(backend->uart, buf->buf,
160 				      backend->async.rx_buf_size - sizeof(struct rx_buf_t));
161 		if (err) {
162 			LOG_ERR("uart_rx_buf_rsp: %d", err);
163 			rx_buf_unref(&backend->async, buf->buf);
164 		}
165 		break;
166 
167 	case UART_RX_BUF_RELEASED:
168 		if (evt->data.rx_buf.buf) {
169 			rx_buf_unref(&backend->async, evt->data.rx_buf.buf);
170 		}
171 		break;
172 
173 	case UART_RX_RDY:
174 		if (evt->data.rx.buf) {
175 			rx_buf_ref(&backend->async, evt->data.rx.buf);
176 			rx_event.buf = &evt->data.rx.buf[evt->data.rx.offset];
177 			rx_event.len = evt->data.rx.len;
178 			err = k_msgq_put(&backend->async.rx_queue, &rx_event, K_NO_WAIT);
179 			if (err) {
180 				LOG_WRN("RX queue overflow: %d (dropped %u)", err,
181 					evt->data.rx.len);
182 				rx_buf_unref(&backend->async, evt->data.rx.buf);
183 				break;
184 			}
185 			k_work_schedule(&backend->receive_ready_work, K_NO_WAIT);
186 		}
187 		break;
188 
189 	case UART_RX_DISABLED:
190 		if (atomic_test_bit(&backend->async.common.state,
191 				    MODEM_BACKEND_UART_ASYNC_STATE_OPEN_BIT)) {
192 			if (!atomic_test_and_set_bit(&backend->async.common.state,
193 						     MODEM_BACKEND_UART_ASYNC_STATE_RECOVERY_BIT)) {
194 				k_work_schedule(&backend->receive_ready_work, K_NO_WAIT);
195 				LOG_DBG("RX recovery started");
196 			}
197 		}
198 		break;
199 
200 	case UART_RX_STOPPED:
201 		LOG_WRN("Receive stopped for reasons: %u", (uint8_t)evt->data.rx_stop.reason);
202 		break;
203 
204 	default:
205 		break;
206 	}
207 
208 	if (modem_backend_uart_async_hwfc_is_uart_stopped(backend)) {
209 		k_work_submit(&backend->async.common.rx_disabled_work);
210 	}
211 }
212 
modem_backend_uart_async_hwfc_open(void * data)213 static int modem_backend_uart_async_hwfc_open(void *data)
214 {
215 	struct modem_backend_uart *backend = (struct modem_backend_uart *)data;
216 	struct rx_buf_t *buf = rx_buf_alloc(&backend->async);
217 	int ret;
218 
219 	if (!buf) {
220 		return -ENOMEM;
221 	}
222 
223 	atomic_clear(&backend->async.common.state);
224 	atomic_set_bit(&backend->async.common.state, MODEM_BACKEND_UART_ASYNC_STATE_OPEN_BIT);
225 
226 	ret = uart_rx_enable(backend->uart, buf->buf,
227 			     backend->async.rx_buf_size - sizeof(struct rx_buf_t),
228 			     CONFIG_MODEM_BACKEND_UART_ASYNC_RECEIVE_IDLE_TIMEOUT_MS * 1000L);
229 	if (ret < 0) {
230 		rx_buf_unref(&backend->async, buf->buf);
231 		atomic_clear(&backend->async.common.state);
232 		return ret;
233 	}
234 
235 	modem_pipe_notify_opened(&backend->pipe);
236 	return 0;
237 }
238 
239 #if CONFIG_MODEM_STATS
get_receive_buf_size(struct modem_backend_uart * backend)240 static uint32_t get_receive_buf_size(struct modem_backend_uart *backend)
241 {
242 	return (backend->async.rx_buf_size - sizeof(struct rx_buf_t)) * backend->async.rx_buf_count;
243 }
244 
advertise_transmit_buf_stats(struct modem_backend_uart * backend,uint32_t length)245 static void advertise_transmit_buf_stats(struct modem_backend_uart *backend, uint32_t length)
246 {
247 	modem_stats_buffer_advertise_length(&backend->transmit_buf_stats, length);
248 }
249 
advertise_receive_buf_stats(struct modem_backend_uart * backend,uint32_t reserved)250 static void advertise_receive_buf_stats(struct modem_backend_uart *backend, uint32_t reserved)
251 {
252 	modem_stats_buffer_advertise_length(&backend->receive_buf_stats, reserved);
253 }
254 #endif
255 
get_transmit_buf_size(const struct modem_backend_uart * backend)256 static uint32_t get_transmit_buf_size(const struct modem_backend_uart *backend)
257 {
258 	return backend->async.common.transmit_buf_size;
259 }
260 
modem_backend_uart_async_hwfc_transmit(void * data,const uint8_t * buf,size_t size)261 static int modem_backend_uart_async_hwfc_transmit(void *data, const uint8_t *buf, size_t size)
262 {
263 	struct modem_backend_uart *backend = (struct modem_backend_uart *)data;
264 	bool transmitting;
265 	uint32_t bytes_to_transmit;
266 	int ret;
267 
268 	transmitting = atomic_test_and_set_bit(&backend->async.common.state,
269 					       MODEM_BACKEND_UART_ASYNC_STATE_TRANSMIT_BIT);
270 	if (transmitting) {
271 		return 0;
272 	}
273 
274 	/* Determine amount of bytes to transmit */
275 	bytes_to_transmit = MIN(size, get_transmit_buf_size(backend));
276 
277 	/* Copy buf to transmit buffer which is passed to UART */
278 	memcpy(backend->async.common.transmit_buf, buf, bytes_to_transmit);
279 
280 	ret = uart_tx(backend->uart, backend->async.common.transmit_buf, bytes_to_transmit,
281 		      CONFIG_MODEM_BACKEND_UART_ASYNC_TRANSMIT_TIMEOUT_MS * 1000L);
282 
283 #if CONFIG_MODEM_STATS
284 	advertise_transmit_buf_stats(backend, bytes_to_transmit);
285 #endif
286 
287 	if (ret != 0) {
288 		LOG_ERR("Failed to %s %u bytes. (%d)",
289 			"start async transmit for", bytes_to_transmit, ret);
290 		return ret;
291 	}
292 
293 	return (int)bytes_to_transmit;
294 }
295 
modem_backend_uart_async_hwfc_receive(void * data,uint8_t * buf,size_t size)296 static int modem_backend_uart_async_hwfc_receive(void *data, uint8_t *buf, size_t size)
297 {
298 	struct modem_backend_uart *backend = (struct modem_backend_uart *)data;
299 	size_t received = 0;
300 	size_t copy_size = 0;
301 
302 #if CONFIG_MODEM_STATS
303 	struct rx_queue_event rx_event;
304 	size_t reserved = backend->async.rx_event.len;
305 
306 	for (int i = 0; i < k_msgq_num_used_get(&backend->async.rx_queue); i++) {
307 		if (k_msgq_peek_at(&backend->async.rx_queue, &rx_event, i)) {
308 			break;
309 		}
310 		reserved += rx_event.len;
311 	}
312 	advertise_receive_buf_stats(backend, reserved);
313 #endif
314 	while (size > received) {
315 		/* Keeping track of the async.rx_event allows us to receive less than what the event
316 		 * indicates.
317 		 */
318 		if (backend->async.rx_event.len == 0) {
319 			if (k_msgq_get(&backend->async.rx_queue, &backend->async.rx_event,
320 				       K_NO_WAIT)) {
321 				break;
322 			}
323 		}
324 		copy_size = MIN(size - received, backend->async.rx_event.len);
325 		memcpy(buf, backend->async.rx_event.buf, copy_size);
326 		buf += copy_size;
327 		received += copy_size;
328 		backend->async.rx_event.buf += copy_size;
329 		backend->async.rx_event.len -= copy_size;
330 
331 		if (backend->async.rx_event.len	== 0) {
332 			rx_buf_unref(&backend->async, backend->async.rx_event.buf);
333 		}
334 	}
335 
336 	if (backend->async.rx_event.len != 0 ||
337 	    k_msgq_num_used_get(&backend->async.rx_queue) != 0) {
338 		k_work_schedule(&backend->receive_ready_work, K_NO_WAIT);
339 	}
340 
341 	modem_backend_uart_async_hwfc_rx_recovery(backend);
342 
343 	return (int)received;
344 }
345 
modem_backend_uart_async_hwfc_close(void * data)346 static int modem_backend_uart_async_hwfc_close(void *data)
347 {
348 	struct modem_backend_uart *backend = (struct modem_backend_uart *)data;
349 
350 	atomic_clear_bit(&backend->async.common.state, MODEM_BACKEND_UART_ASYNC_STATE_OPEN_BIT);
351 	uart_tx_abort(backend->uart);
352 
353 	if (!atomic_test_and_clear_bit(&backend->async.common.state,
354 				      MODEM_BACKEND_UART_ASYNC_STATE_RECOVERY_BIT)) {
355 		/* Disable the RX, if recovery is not ongoing. */
356 		uart_rx_disable(backend->uart);
357 	}
358 
359 	return 0;
360 }
361 
362 static const struct modem_pipe_api modem_backend_uart_async_api = {
363 	.open = modem_backend_uart_async_hwfc_open,
364 	.transmit = modem_backend_uart_async_hwfc_transmit,
365 	.receive = modem_backend_uart_async_hwfc_receive,
366 	.close = modem_backend_uart_async_hwfc_close,
367 };
368 
modem_backend_uart_async_is_supported(struct modem_backend_uart * backend)369 bool modem_backend_uart_async_is_supported(struct modem_backend_uart *backend)
370 {
371 	return uart_callback_set(backend->uart, modem_backend_uart_async_hwfc_event_handler,
372 				 backend) == 0;
373 }
374 
modem_backend_uart_async_hwfc_notify_closed(struct k_work * item)375 static void modem_backend_uart_async_hwfc_notify_closed(struct k_work *item)
376 {
377 	struct modem_backend_uart_async_common *common =
378 		CONTAINER_OF(item, struct modem_backend_uart_async_common, rx_disabled_work);
379 
380 	struct modem_backend_uart_async *async =
381 		CONTAINER_OF(common, struct modem_backend_uart_async, common);
382 
383 	struct modem_backend_uart *backend =
384 		CONTAINER_OF(async, struct modem_backend_uart, async);
385 
386 	modem_pipe_notify_closed(&backend->pipe);
387 }
388 
389 #if CONFIG_MODEM_STATS
init_stats(struct modem_backend_uart * backend)390 static void init_stats(struct modem_backend_uart *backend)
391 {
392 	char name[CONFIG_MODEM_STATS_BUFFER_NAME_SIZE];
393 	uint32_t receive_buf_size;
394 	uint32_t transmit_buf_size;
395 
396 	receive_buf_size = get_receive_buf_size(backend);
397 	transmit_buf_size = get_transmit_buf_size(backend);
398 
399 	snprintk(name, sizeof(name), "%s_%s", backend->uart->name, "rx");
400 	modem_stats_buffer_init(&backend->receive_buf_stats, name, receive_buf_size);
401 	snprintk(name, sizeof(name), "%s_%s", backend->uart->name, "tx");
402 	modem_stats_buffer_init(&backend->transmit_buf_stats, name, transmit_buf_size);
403 }
404 #endif
405 
modem_backend_uart_async_init(struct modem_backend_uart * backend,const struct modem_backend_uart_config * config)406 int modem_backend_uart_async_init(struct modem_backend_uart *backend,
407 				   const struct modem_backend_uart_config *config)
408 {
409 	int32_t buf_size = (int32_t)config->receive_buf_size;
410 	int err;
411 
412 	backend->async.rx_buf_count = CONFIG_MODEM_BACKEND_UART_ASYNC_HWFC_BUFFER_COUNT;
413 
414 	/* k_mem_slab_init requires a word-aligned buffer. */
415 	__ASSERT((uintptr_t)config->receive_buf % sizeof(void *) == 0,
416 		 "Receive buffer is not word-aligned");
417 
418 	/* Make sure all the buffers will be aligned. */
419 	buf_size -= (config->receive_buf_size % (sizeof(uint32_t) * backend->async.rx_buf_count));
420 	backend->async.rx_buf_size = buf_size / backend->async.rx_buf_count;
421 	__ASSERT_NO_MSG(backend->async.rx_buf_size > sizeof(struct rx_buf_t));
422 
423 	/* Initialize the RX buffers and event queue. */
424 	err = k_mem_slab_init(&backend->async.rx_slab, config->receive_buf,
425 			      backend->async.rx_buf_size, backend->async.rx_buf_count);
426 	if (err) {
427 		return err;
428 	}
429 	k_msgq_init(&backend->async.rx_queue, (char *)&backend->async.rx_queue_buf,
430 		sizeof(struct rx_queue_event), CONFIG_MODEM_BACKEND_UART_ASYNC_HWFC_BUFFER_COUNT);
431 
432 	backend->async.common.transmit_buf = config->transmit_buf;
433 	backend->async.common.transmit_buf_size = config->transmit_buf_size;
434 	k_work_init(&backend->async.common.rx_disabled_work,
435 		    modem_backend_uart_async_hwfc_notify_closed);
436 
437 	modem_pipe_init(&backend->pipe, backend, &modem_backend_uart_async_api);
438 
439 #if CONFIG_MODEM_STATS
440 	init_stats(backend);
441 #endif
442 	return 0;
443 }
444