1 /*
2 * Copyright 2025 Google LLC
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/device.h>
8 #include <zephyr/drivers/uart.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/logging/log.h>
11 #include <zephyr/sys/ring_buffer.h>
12 #include <zephyr/drivers/uart/uart_bridge.h>
13 #include <zephyr/pm/device.h>
14
15 #define DT_DRV_COMPAT zephyr_uart_bridge
16
17 LOG_MODULE_REGISTER(uart_bridge, LOG_LEVEL_INF);
18
19 #define RING_BUF_SIZE CONFIG_UART_BRIDGE_BUF_SIZE
20 #define RING_BUF_FULL_THRESHOLD (RING_BUF_SIZE / 3)
21
22 struct uart_bridge_config {
23 const struct device *peer_dev[2];
24 };
25
26 struct uart_bridge_peer_data {
27 uint8_t buf[RING_BUF_SIZE];
28 struct ring_buf rb;
29 bool paused;
30 };
31
32 struct uart_bridge_data {
33 struct uart_bridge_peer_data peer[2];
34 };
35
uart_bridge_get_peer(const struct device * dev,const struct device * bridge_dev)36 static const struct device *uart_bridge_get_peer(const struct device *dev,
37 const struct device *bridge_dev)
38 {
39 const struct uart_bridge_config *cfg = bridge_dev->config;
40
41 if (dev == cfg->peer_dev[0]) {
42 return cfg->peer_dev[1];
43 } else if (dev == cfg->peer_dev[1]) {
44 return cfg->peer_dev[0];
45 } else {
46 return NULL;
47 }
48 }
49
uart_bridge_settings_update(const struct device * dev,const struct device * bridge_dev)50 void uart_bridge_settings_update(const struct device *dev,
51 const struct device *bridge_dev)
52 {
53 struct uart_config cfg;
54 const struct device *peer_dev = uart_bridge_get_peer(dev, bridge_dev);
55 int ret;
56
57 if (peer_dev == NULL) {
58 LOG_DBG("%s: not a bridge dev", dev->name);
59 return;
60 }
61
62 LOG_DBG("update settings: dev=%s bridge=%s peer=%s", dev->name,
63 bridge_dev->name, peer_dev->name);
64
65 ret = uart_config_get(dev, &cfg);
66 if (ret) {
67 LOG_WRN("%s: failed to get the uart config: %d", dev->name,
68 ret);
69 return;
70 }
71
72 ret = uart_configure(peer_dev, &cfg);
73 if (ret) {
74 LOG_WRN("%s: failed to set the uart config: %d", peer_dev->name,
75 ret);
76 return;
77 }
78
79 LOG_INF("uart settings: baudrate=%d parity=%d dev=%s",
80 cfg.baudrate, cfg.parity, bridge_dev->name);
81 }
82
uart_bridge_get_idx(const struct device * dev,const struct device * bridge_dev,bool own)83 static uint8_t uart_bridge_get_idx(const struct device *dev,
84 const struct device *bridge_dev, bool own)
85 {
86 const struct uart_bridge_config *cfg = bridge_dev->config;
87
88 if (dev == cfg->peer_dev[0]) {
89 return own ? 0 : 1;
90 } else {
91 return own ? 1 : 0;
92 }
93 }
94
uart_bridge_handle_rx(const struct device * dev,const struct device * bridge_dev)95 static void uart_bridge_handle_rx(const struct device *dev,
96 const struct device *bridge_dev)
97 {
98 const struct uart_bridge_config *cfg = bridge_dev->config;
99 struct uart_bridge_data *data = bridge_dev->data;
100
101 const struct device *peer_dev =
102 cfg->peer_dev[uart_bridge_get_idx(dev, bridge_dev, false)];
103 struct uart_bridge_peer_data *own_data =
104 &data->peer[uart_bridge_get_idx(dev, bridge_dev, true)];
105
106 uint8_t *recv_buf;
107 int rb_len, recv_len;
108 int ret;
109
110 if (ring_buf_space_get(&own_data->rb) < RING_BUF_FULL_THRESHOLD) {
111 LOG_DBG("%s: buffer full: pause", dev->name);
112 uart_irq_rx_disable(dev);
113 own_data->paused = true;
114 return;
115 }
116
117 rb_len = ring_buf_put_claim(&own_data->rb, &recv_buf, RING_BUF_SIZE);
118 if (rb_len == 0) {
119 LOG_WRN("%s: ring_buf full", dev->name);
120 return;
121 }
122
123 recv_len = uart_fifo_read(dev, recv_buf, rb_len);
124 if (recv_len < 0) {
125 ring_buf_put_finish(&own_data->rb, 0);
126 LOG_ERR("%s: rx error: %d", dev->name, recv_len);
127 return;
128 }
129
130 ret = ring_buf_put_finish(&own_data->rb, recv_len);
131 if (ret < 0) {
132 LOG_ERR("%s: ring_buf_put_finish error: %d", dev->name, rb_len);
133 return;
134 }
135
136 uart_irq_tx_enable(peer_dev);
137 }
138
uart_bridge_handle_tx(const struct device * dev,const struct device * bridge_dev)139 static void uart_bridge_handle_tx(const struct device *dev,
140 const struct device *bridge_dev)
141 {
142 const struct uart_bridge_config *cfg = bridge_dev->config;
143 struct uart_bridge_data *data = bridge_dev->data;
144
145 const struct device *peer_dev =
146 cfg->peer_dev[uart_bridge_get_idx(dev, bridge_dev, false)];
147 struct uart_bridge_peer_data *peer_data =
148 &data->peer[uart_bridge_get_idx(dev, bridge_dev, false)];
149
150 uint8_t *send_buf;
151 int rb_len, sent_len;
152 int ret;
153
154 rb_len = ring_buf_get_claim(&peer_data->rb, &send_buf, RING_BUF_SIZE);
155 if (rb_len == 0) {
156 LOG_DBG("%s: buffer empty, disable tx irq", dev->name);
157 uart_irq_tx_disable(dev);
158 return;
159 }
160
161 sent_len = uart_fifo_fill(dev, send_buf, rb_len);
162 if (sent_len < 0) {
163 ring_buf_get_finish(&peer_data->rb, 0);
164 LOG_ERR("%s: tx error: %d", dev->name, sent_len);
165 return;
166 }
167
168 ret = ring_buf_get_finish(&peer_data->rb, sent_len);
169 if (ret < 0) {
170 LOG_ERR("ring_buf_get_finish error: %d", ret);
171 return;
172 }
173
174 if (peer_data->paused &&
175 ring_buf_space_get(&peer_data->rb) > RING_BUF_FULL_THRESHOLD) {
176 LOG_DBG("%s: buffer free: resume", dev->name);
177 uart_irq_rx_enable(peer_dev);
178 peer_data->paused = false;
179 return;
180 }
181 }
182
interrupt_handler(const struct device * dev,void * user_data)183 static void interrupt_handler(const struct device *dev, void *user_data)
184 {
185 const struct device *bridge_dev = user_data;
186
187 while (uart_irq_update(dev) && uart_irq_is_pending(dev)) {
188 if (uart_irq_rx_ready(dev)) {
189 uart_bridge_handle_rx(dev, bridge_dev);
190 }
191 if (uart_irq_tx_ready(dev)) {
192 uart_bridge_handle_tx(dev, bridge_dev);
193 }
194 }
195 }
196
uart_bridge_pm_action(const struct device * dev,enum pm_device_action action)197 static int uart_bridge_pm_action(const struct device *dev,
198 enum pm_device_action action)
199 {
200 const struct uart_bridge_config *cfg = dev->config;
201
202 switch (action) {
203 case PM_DEVICE_ACTION_SUSPEND:
204 uart_irq_rx_disable(cfg->peer_dev[0]);
205 uart_irq_rx_disable(cfg->peer_dev[1]);
206 uart_irq_callback_user_data_set(cfg->peer_dev[0], NULL, NULL);
207 uart_irq_callback_user_data_set(cfg->peer_dev[1], NULL, NULL);
208 break;
209 case PM_DEVICE_ACTION_RESUME:
210 uart_irq_callback_user_data_set(cfg->peer_dev[0], interrupt_handler,
211 (void *)dev);
212 uart_irq_callback_user_data_set(cfg->peer_dev[1], interrupt_handler,
213 (void *)dev);
214 uart_irq_rx_enable(cfg->peer_dev[0]);
215 uart_irq_rx_enable(cfg->peer_dev[1]);
216 break;
217 default:
218 return -ENOTSUP;
219 }
220
221 return 0;
222 }
223
uart_bridge_init(const struct device * dev)224 static int uart_bridge_init(const struct device *dev)
225 {
226 struct uart_bridge_data *data = dev->data;
227
228 ring_buf_init(&data->peer[0].rb, RING_BUF_SIZE, data->peer[0].buf);
229 ring_buf_init(&data->peer[1].rb, RING_BUF_SIZE, data->peer[1].buf);
230
231 return pm_device_driver_init(dev, uart_bridge_pm_action);
232 }
233
234 #define UART_BRIDGE_INIT(n) \
235 BUILD_ASSERT(DT_INST_PROP_LEN(n, peers) == 2, \
236 "uart-bridge peers property must have exactly 2 members"); \
237 \
238 static const struct uart_bridge_config uart_bridge_cfg_##n = { \
239 .peer_dev = {DT_INST_FOREACH_PROP_ELEM_SEP( \
240 n, peers, DEVICE_DT_GET_BY_IDX, (,))}, \
241 }; \
242 \
243 static struct uart_bridge_data uart_bridge_data_##n; \
244 \
245 PM_DEVICE_DT_INST_DEFINE(n, uart_bridge_pm_action); \
246 \
247 DEVICE_DT_INST_DEFINE(n, uart_bridge_init, PM_DEVICE_DT_INST_GET(n), \
248 &uart_bridge_data_##n, &uart_bridge_cfg_##n, \
249 POST_KERNEL, CONFIG_SERIAL_INIT_PRIORITY, NULL);
250
251 DT_INST_FOREACH_STATUS_OKAY(UART_BRIDGE_INIT)
252