1 /*
2  * Copyright (c) 2024 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <errno.h>
8 #include <string.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/drivers/spi.h>
11 #include <zephyr/linker/devicetree_regions.h>
12 #include <zephyr/ztest.h>
13 
14 #if CONFIG_TESTED_SPI_MODE == 0
15 #define SPI_MODE (SPI_WORD_SET(8) | SPI_LINES_SINGLE | SPI_TRANSFER_LSB)
16 #elif CONFIG_TESTED_SPI_MODE == 1
17 #define SPI_MODE (SPI_WORD_SET(8) | SPI_LINES_SINGLE | SPI_TRANSFER_MSB | SPI_MODE_CPHA)
18 #elif CONFIG_TESTED_SPI_MODE == 2
19 #define SPI_MODE (SPI_WORD_SET(8) | SPI_LINES_SINGLE | SPI_TRANSFER_LSB | SPI_MODE_CPOL)
20 #elif CONFIG_TESTED_SPI_MODE == 3
21 #define SPI_MODE (SPI_WORD_SET(8) | SPI_LINES_SINGLE | SPI_TRANSFER_MSB | SPI_MODE_CPHA \
22 				| SPI_MODE_CPOL)
23 #endif
24 
25 #define SPIM_OP	 (SPI_OP_MODE_MASTER | SPI_MODE)
26 #define SPIS_OP	 (SPI_OP_MODE_SLAVE | SPI_MODE)
27 
28 static struct spi_dt_spec spim = SPI_DT_SPEC_GET(DT_NODELABEL(dut_spi_dt), SPIM_OP, 0);
29 static const struct device *spis_dev = DEVICE_DT_GET(DT_NODELABEL(dut_spis));
30 static const struct spi_config spis_config = {
31 	.operation = SPIS_OP,
32 	.slave = DT_PROP_OR(DT_PATH(zephyr_user), peripheral_cs, 0),
33 };
34 
35 static struct k_poll_signal async_sig = K_POLL_SIGNAL_INITIALIZER(async_sig);
36 static struct k_poll_event async_evt =
37 	K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &async_sig);
38 
39 static struct k_poll_signal async_sig_spim = K_POLL_SIGNAL_INITIALIZER(async_sig_spim);
40 static struct k_poll_event async_evt_spim =
41 	K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &async_sig_spim);
42 
43 #define MEMORY_SECTION(node)                                                                       \
44 	COND_CODE_1(IS_ENABLED(CONFIG_PREALLOC_BUFFERS),                                           \
45 		    (COND_CODE_1(DT_NODE_HAS_PROP(node, memory_regions),                           \
46 				 (__attribute__((__section__(                                      \
47 				     LINKER_DT_NODE_REGION_NAME(DT_PHANDLE(node,                   \
48 									   memory_regions)))))),   \
49 				 ())),                                                             \
50 		    ())
51 
52 static uint8_t spim_buffer[32] MEMORY_SECTION(DT_BUS(DT_NODELABEL(dut_spi_dt)));
53 static uint8_t spis_buffer[32] MEMORY_SECTION(DT_NODELABEL(dut_spis));
54 
55 struct test_data {
56 	struct k_work_delayable test_work;
57 	struct k_sem sem;
58 	int spim_alloc_idx;
59 	int spis_alloc_idx;
60 	struct spi_buf_set sets[4];
61 	struct spi_buf_set *mtx_set;
62 	struct spi_buf_set *mrx_set;
63 	struct spi_buf_set *stx_set;
64 	struct spi_buf_set *srx_set;
65 	struct spi_buf bufs[8];
66 	bool async;
67 };
68 
69 static struct test_data tdata;
70 
71 /* Allocate buffer from spim or spis space. */
buf_alloc(size_t len,bool spim)72 static uint8_t *buf_alloc(size_t len, bool spim)
73 {
74 	int *idx = spim ? &tdata.spim_alloc_idx : &tdata.spis_alloc_idx;
75 	uint8_t *buf = spim ? spim_buffer : spis_buffer;
76 	size_t total = spim ? sizeof(spim_buffer) : sizeof(spis_buffer);
77 	uint8_t *rv;
78 
79 	if (*idx + len > total) {
80 		zassert_false(true);
81 
82 		return NULL;
83 	}
84 
85 	rv = &buf[*idx];
86 	*idx += len;
87 
88 	return rv;
89 }
90 
work_handler(struct k_work * work)91 static void work_handler(struct k_work *work)
92 {
93 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
94 	struct test_data *td = CONTAINER_OF(dwork, struct test_data, test_work);
95 	int rv;
96 
97 	if (!td->async) {
98 		rv = spi_transceive_dt(&spim, td->mtx_set, td->mrx_set);
99 		if (rv == 0) {
100 			k_sem_give(&td->sem);
101 		}
102 	} else {
103 		rv = spi_transceive_signal(spim.bus, &spim.config, td->mtx_set, td->mrx_set,
104 				&async_sig_spim);
105 		zassert_equal(rv, 0);
106 
107 		rv = k_poll(&async_evt_spim, 1, K_MSEC(200));
108 		zassert_false(rv, "one or more events are not ready");
109 
110 		rv = async_evt_spim.signal->result;
111 		zassert_equal(rv, 0);
112 
113 		/* Reinitializing for next call */
114 		async_evt_spim.signal->signaled = 0U;
115 		async_evt_spim.state = K_POLL_STATE_NOT_READY;
116 
117 		k_sem_give(&td->sem);
118 	}
119 }
120 
121 /** Copies data from buffers in the set to a single buffer which makes it easier
122  * to compare transmitted and received data.
123  *
124  * @param buf Output buffer.
125  * @param len Buffer length.
126  * @param set Set of buffers.
127  *
128  * @return Number of bytes copied.
129  */
cpy_data(uint8_t * buf,size_t len,struct spi_buf_set * set)130 static int cpy_data(uint8_t *buf, size_t len, struct spi_buf_set *set)
131 {
132 	int idx = 0;
133 
134 	for (size_t i = 0; i < set->count; i++) {
135 		size_t l = set->buffers[i].len;
136 
137 		if (len - idx >= l) {
138 			memcpy(&buf[idx], set->buffers[i].buf, l);
139 			idx += l;
140 		} else {
141 			return -1;
142 		}
143 	}
144 
145 	return idx;
146 }
147 
148 /** Compare two sets.
149  *
150  * @param tx_set TX set.
151  * @param rx_set RX set.
152  * @param same_size True if it is expected to have the same amount of data in both sets.
153  *
154  * @return 0 if data is the same and other value indicate that check failed.
155  */
check_buffers(struct spi_buf_set * tx_set,struct spi_buf_set * rx_set,bool same_size)156 static int check_buffers(struct spi_buf_set *tx_set, struct spi_buf_set *rx_set, bool same_size)
157 {
158 	static uint8_t tx_data[256];
159 	static uint8_t rx_data[256];
160 	int rx_len;
161 	int tx_len;
162 
163 	if (!tx_set || !rx_set) {
164 		return 0;
165 	}
166 
167 	rx_len = cpy_data(rx_data, sizeof(rx_data), rx_set);
168 	tx_len = cpy_data(tx_data, sizeof(tx_data), tx_set);
169 	if (same_size && (rx_len != tx_len)) {
170 		return -1;
171 	}
172 
173 	return memcmp(tx_data, rx_data, rx_len);
174 }
175 
176 /** Calculate expected number of received bytes by the SPI peripheral.
177  *
178  * It is used to check if SPI API call for peripheral SPI device returns correct value.
179  * @param tx_set TX set.
180  * @param rx_set RX set.
181  *
182  * @return Expected amount of received bytes.
183  */
peripheral_rx_len(struct spi_buf_set * tx_set,struct spi_buf_set * rx_set)184 static int peripheral_rx_len(struct spi_buf_set *tx_set, struct spi_buf_set *rx_set)
185 {
186 	size_t tx_len = 0;
187 	size_t rx_len = 0;
188 
189 	if (!tx_set || !rx_set) {
190 		return 0;
191 	}
192 
193 	for (size_t i = 0; i < tx_set->count; i++) {
194 		tx_len += tx_set->buffers[i].len;
195 	}
196 
197 	for (size_t i = 0; i < rx_set->count; i++) {
198 		rx_len += rx_set->buffers[i].len;
199 	}
200 
201 	return MIN(rx_len, tx_len);
202 }
203 
204 /** Generic function which runs the test with sets prepared in the test data structure. */
run_test(bool m_same_size,bool s_same_size,bool async)205 static void run_test(bool m_same_size, bool s_same_size, bool async)
206 {
207 	int rv;
208 	int periph_rv;
209 	int srx_len;
210 
211 	tdata.async = async;
212 	rv = k_work_schedule(&tdata.test_work, K_MSEC(10));
213 	zassert_equal(rv, 1);
214 
215 	if (!async) {
216 		periph_rv = spi_transceive(spis_dev, &spis_config, tdata.stx_set, tdata.srx_set);
217 		if (periph_rv == -ENOTSUP) {
218 			ztest_test_skip();
219 		}
220 	} else {
221 		rv = spi_transceive_signal(spis_dev, &spis_config, tdata.stx_set, tdata.srx_set,
222 					   &async_sig);
223 		if (rv == -ENOTSUP) {
224 			ztest_test_skip();
225 		}
226 		zassert_equal(rv, 0);
227 
228 		/* Transfer not finished yet */
229 		rv = k_sem_take(&tdata.sem, K_NO_WAIT);
230 		zassert_equal(rv, -EBUSY);
231 
232 		rv = k_poll(&async_evt, 1, K_MSEC(200));
233 		zassert_false(rv, "one or more events are not ready");
234 
235 		periph_rv = async_evt.signal->result;
236 
237 		/* Reinitializing for next call */
238 		async_evt.signal->signaled = 0U;
239 		async_evt.state = K_POLL_STATE_NOT_READY;
240 	}
241 
242 	rv = k_sem_take(&tdata.sem, K_MSEC(100));
243 	zassert_equal(rv, 0);
244 
245 	srx_len = peripheral_rx_len(tdata.mtx_set, tdata.srx_set);
246 
247 	zassert_equal(periph_rv, srx_len, "Got: %d but expected:%d", periph_rv, srx_len);
248 
249 	rv = check_buffers(tdata.mtx_set, tdata.srx_set, m_same_size);
250 	zassert_equal(rv, 0);
251 
252 	rv = check_buffers(tdata.stx_set, tdata.mrx_set, s_same_size);
253 	zassert_equal(rv, 0);
254 }
255 
256 /** Basic test where SPI controller and SPI peripheral have RX and TX sets which contains only one
257  *  same size buffer.
258  */
test_basic(bool async)259 static void test_basic(bool async)
260 {
261 	size_t len = 16;
262 
263 	for (int i = 0; i < 4; i++) {
264 		tdata.bufs[i].buf = buf_alloc(len, i < 2);
265 		tdata.bufs[i].len = len;
266 		tdata.sets[i].buffers = &tdata.bufs[i];
267 		tdata.sets[i].count = 1;
268 	}
269 
270 	tdata.mtx_set = &tdata.sets[0];
271 	tdata.mrx_set = &tdata.sets[1];
272 	tdata.stx_set = &tdata.sets[2];
273 	tdata.srx_set = &tdata.sets[3];
274 
275 	run_test(true, true, async);
276 }
277 
ZTEST(spi_controller_peripheral,test_basic)278 ZTEST(spi_controller_peripheral, test_basic)
279 {
280 	test_basic(false);
281 }
282 
ZTEST(spi_controller_peripheral,test_basic_async)283 ZTEST(spi_controller_peripheral, test_basic_async)
284 {
285 	test_basic(true);
286 }
287 
288 /** Basic test with zero length buffers.
289  */
test_basic_zero_len(bool async)290 void test_basic_zero_len(bool async)
291 {
292 	size_t len = 8;
293 
294 	/* SPIM */
295 	tdata.bufs[0].buf = buf_alloc(len, true);
296 	tdata.bufs[0].len = len;
297 	tdata.bufs[1].buf = buf_alloc(len, true);
298 	/* Intentionally len was set to 0 - second buffer "is empty". */
299 	tdata.bufs[1].len = 0;
300 	tdata.sets[0].buffers = &tdata.bufs[0];
301 	tdata.sets[0].count = 2;
302 	tdata.mtx_set = &tdata.sets[0];
303 
304 	tdata.bufs[2].buf = buf_alloc(len, true);
305 	tdata.bufs[2].len = len;
306 	tdata.bufs[3].buf = buf_alloc(len, true);
307 	/* Intentionally len was set to 0 - second buffer "is empty". */
308 	tdata.bufs[3].len = 0;
309 	tdata.sets[1].buffers = &tdata.bufs[2];
310 	tdata.sets[1].count = 2;
311 	tdata.mrx_set = &tdata.sets[1];
312 
313 	/* SPIS */
314 	tdata.bufs[4].buf = buf_alloc(len, false);
315 	tdata.bufs[4].len = len;
316 	tdata.sets[2].buffers = &tdata.bufs[4];
317 	tdata.sets[2].count = 1;
318 	tdata.stx_set = &tdata.sets[2];
319 
320 	tdata.bufs[6].buf = buf_alloc(len, false);
321 	tdata.bufs[6].len = len;
322 	tdata.sets[3].buffers = &tdata.bufs[6];
323 	tdata.sets[3].count = 1;
324 	tdata.srx_set = &tdata.sets[3];
325 
326 	run_test(true, true, async);
327 }
328 
ZTEST(spi_controller_peripheral,test_basic_zero_len)329 ZTEST(spi_controller_peripheral, test_basic_zero_len)
330 {
331 	test_basic_zero_len(false);
332 }
333 
ZTEST(spi_controller_peripheral,test_basic_zero_len_async)334 ZTEST(spi_controller_peripheral, test_basic_zero_len_async)
335 {
336 	test_basic_zero_len(true);
337 }
338 
339 /** Setup a transfer where RX buffer on SPI controller and SPI peripheral are
340  *  shorter than TX buffers. RX buffers shall contain beginning of TX data
341  *  and last TX bytes that did not fit in the RX buffers shall be lost.
342  */
test_short_rx(bool async)343 static void test_short_rx(bool async)
344 {
345 	size_t len = 16;
346 
347 	tdata.bufs[0].buf = buf_alloc(len, true);
348 	tdata.bufs[0].len = len;
349 	tdata.bufs[1].buf = buf_alloc(len, true);
350 	tdata.bufs[1].len = len - 3; /* RX buffer */
351 	tdata.bufs[2].buf = buf_alloc(len, false);
352 	tdata.bufs[2].len = len;
353 	tdata.bufs[3].buf = buf_alloc(len, false);
354 	tdata.bufs[3].len = len - 4; /* RX buffer */
355 
356 	for (int i = 0; i < 4; i++) {
357 		tdata.sets[i].buffers = &tdata.bufs[i];
358 		tdata.sets[i].count = 1;
359 	}
360 
361 	tdata.mtx_set = &tdata.sets[0];
362 	tdata.mrx_set = &tdata.sets[1];
363 	tdata.stx_set = &tdata.sets[2];
364 	tdata.srx_set = &tdata.sets[3];
365 
366 	run_test(false, false, async);
367 }
368 
ZTEST(spi_controller_peripheral,test_short_rx)369 ZTEST(spi_controller_peripheral, test_short_rx)
370 {
371 	test_short_rx(false);
372 }
373 
ZTEST(spi_controller_peripheral,test_short_rx_async)374 ZTEST(spi_controller_peripheral, test_short_rx_async)
375 {
376 	test_short_rx(true);
377 }
378 
379 /** Test where only master transmits. */
test_only_tx(bool async)380 static void test_only_tx(bool async)
381 {
382 	size_t len = 16;
383 
384 	/* MTX buffer */
385 	tdata.bufs[0].buf = buf_alloc(len, true);
386 	tdata.bufs[0].len = len;
387 	tdata.sets[0].buffers = &tdata.bufs[0];
388 	tdata.sets[0].count = 1;
389 	tdata.mtx_set = &tdata.sets[0];
390 	tdata.mrx_set = NULL;
391 
392 	/* STX buffer */
393 	tdata.bufs[1].buf = buf_alloc(len, false);
394 	tdata.bufs[1].len = len;
395 	tdata.sets[1].buffers = &tdata.bufs[1];
396 	tdata.sets[1].count = 1;
397 	tdata.srx_set = &tdata.sets[1];
398 	tdata.stx_set = NULL;
399 
400 	run_test(true, true, async);
401 }
402 
ZTEST(spi_controller_peripheral,test_only_tx)403 ZTEST(spi_controller_peripheral, test_only_tx)
404 {
405 	test_only_tx(false);
406 }
407 
ZTEST(spi_controller_peripheral,test_only_tx_async)408 ZTEST(spi_controller_peripheral, test_only_tx_async)
409 {
410 	test_only_tx(true);
411 }
412 
413 /** Test where only SPI controller transmits and SPI peripheral receives in chunks. */
test_only_tx_in_chunks(bool async)414 static void test_only_tx_in_chunks(bool async)
415 {
416 	size_t len1 = 7;
417 	size_t len2 = 8;
418 
419 	/* MTX buffer */
420 	tdata.bufs[0].buf = buf_alloc(len1 + len2, true);
421 	tdata.bufs[0].len = len1 + len2;
422 	tdata.sets[0].buffers = &tdata.bufs[0];
423 	tdata.sets[0].count = 1;
424 	tdata.mtx_set = &tdata.sets[0];
425 	tdata.mrx_set = NULL;
426 
427 	/* STX buffer */
428 	tdata.bufs[1].buf = buf_alloc(len1, false);
429 	tdata.bufs[1].len = len1;
430 	tdata.bufs[2].buf = buf_alloc(len2, false);
431 	tdata.bufs[2].len = len2;
432 	tdata.sets[1].buffers = &tdata.bufs[1];
433 	tdata.sets[1].count = 2;
434 	tdata.srx_set = &tdata.sets[1];
435 	tdata.stx_set = NULL;
436 
437 	run_test(true, true, async);
438 }
439 
ZTEST(spi_controller_peripheral,test_only_tx_in_chunks)440 ZTEST(spi_controller_peripheral, test_only_tx_in_chunks)
441 {
442 	test_only_tx_in_chunks(false);
443 }
444 
ZTEST(spi_controller_peripheral,test_only_tx_in_chunks_async)445 ZTEST(spi_controller_peripheral, test_only_tx_in_chunks_async)
446 {
447 	test_only_tx_in_chunks(true);
448 }
449 
450 /** Test where only SPI peripheral transmits. */
test_only_rx(bool async)451 static void test_only_rx(bool async)
452 {
453 	size_t len = 16;
454 
455 	/* MTX buffer */
456 	tdata.bufs[0].buf = buf_alloc(len, true);
457 	tdata.bufs[0].len = len;
458 	tdata.sets[0].buffers = &tdata.bufs[0];
459 	tdata.sets[0].count = 1;
460 	tdata.mrx_set = &tdata.sets[0];
461 	tdata.mtx_set = NULL;
462 
463 	/* STX buffer */
464 	tdata.bufs[1].buf = buf_alloc(len, false);
465 	tdata.bufs[1].len = len;
466 	tdata.sets[1].buffers = &tdata.bufs[1];
467 	tdata.sets[1].count = 1;
468 	tdata.stx_set = &tdata.sets[1];
469 	tdata.srx_set = NULL;
470 
471 	run_test(true, true, async);
472 }
473 
ZTEST(spi_controller_peripheral,test_only_rx)474 ZTEST(spi_controller_peripheral, test_only_rx)
475 {
476 	test_only_rx(false);
477 }
478 
ZTEST(spi_controller_peripheral,test_only_rx_async)479 ZTEST(spi_controller_peripheral, test_only_rx_async)
480 {
481 	test_only_rx(true);
482 }
483 
484 /** Test where only SPI peripheral transmits in chunks. */
test_only_rx_in_chunks(bool async)485 static void test_only_rx_in_chunks(bool async)
486 {
487 	size_t len1 = 7;
488 	size_t len2 = 9;
489 
490 	/* MTX buffer */
491 	tdata.bufs[0].buf = buf_alloc(len1 + len2, true);
492 	tdata.bufs[0].len = len1 + len2;
493 	tdata.sets[0].buffers = &tdata.bufs[0];
494 	tdata.sets[0].count = 1;
495 	tdata.mrx_set = &tdata.sets[0];
496 	tdata.mtx_set = NULL;
497 
498 	/* STX buffer */
499 	tdata.bufs[1].buf = buf_alloc(len1, false);
500 	tdata.bufs[1].len = len1;
501 	tdata.bufs[2].buf = buf_alloc(len2, false);
502 	tdata.bufs[2].len = len2;
503 	tdata.sets[1].buffers = &tdata.bufs[1];
504 	tdata.sets[1].count = 2;
505 	tdata.stx_set = &tdata.sets[1];
506 	tdata.srx_set = NULL;
507 
508 	run_test(true, true, async);
509 }
510 
ZTEST(spi_controller_peripheral,test_only_rx_in_chunks)511 ZTEST(spi_controller_peripheral, test_only_rx_in_chunks)
512 {
513 	test_only_rx_in_chunks(false);
514 }
515 
ZTEST(spi_controller_peripheral,test_only_rx_in_chunks_async)516 ZTEST(spi_controller_peripheral, test_only_rx_in_chunks_async)
517 {
518 	test_only_rx_in_chunks(true);
519 }
520 
before(void * not_used)521 static void before(void *not_used)
522 {
523 	ARG_UNUSED(not_used);
524 
525 	memset(&tdata, 0, sizeof(tdata));
526 	for (size_t i = 0; i < sizeof(spim_buffer); i++) {
527 		spim_buffer[i] = (uint8_t)i;
528 	}
529 	for (size_t i = 0; i < sizeof(spis_buffer); i++) {
530 		spis_buffer[i] = (uint8_t)(i + 0x80);
531 	}
532 
533 	k_work_init_delayable(&tdata.test_work, work_handler);
534 	k_sem_init(&tdata.sem, 0, 1);
535 }
536 
after(void * not_used)537 static void after(void *not_used)
538 {
539 	ARG_UNUSED(not_used);
540 
541 	k_work_cancel_delayable(&tdata.test_work);
542 }
543 
suite_setup(void)544 static void *suite_setup(void)
545 {
546 	return NULL;
547 }
548 
549 ZTEST_SUITE(spi_controller_peripheral, NULL, suite_setup, before, after, NULL);
550