1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Private API for SPI drivers
10  */
11 
12 #ifndef ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_
13 #define ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_
14 
15 #include <zephyr/drivers/gpio.h>
16 #include <zephyr/drivers/spi.h>
17 #include <zephyr/kernel.h>
18 #include <zephyr/pm/device_runtime.h>
19 
20 #ifdef __cplusplus
21 extern "C" {
22 #endif
23 
24 #if defined(DT_DRV_COMPAT) && !DT_ANY_INST_HAS_PROP_STATUS_OKAY(cs_gpios)
25 #define DT_SPI_CTX_HAS_NO_CS_GPIOS 1
26 #endif
27 
28 enum spi_ctx_runtime_op_mode {
29 	SPI_CTX_RUNTIME_OP_MODE_MASTER = BIT(0),
30 	SPI_CTX_RUNTIME_OP_MODE_SLAVE  = BIT(1),
31 };
32 
33 struct spi_context {
34 	const struct spi_config *config;
35 #ifdef CONFIG_MULTITHREADING
36 	const struct spi_config *owner;
37 #endif
38 #ifndef DT_SPI_CTX_HAS_NO_CS_GPIOS
39 	const struct gpio_dt_spec *cs_gpios;
40 	size_t num_cs_gpios;
41 #endif /* !DT_SPI_CTX_HAS_NO_CS_GPIOS */
42 
43 #ifdef CONFIG_MULTITHREADING
44 	struct k_sem lock;
45 	struct k_sem sync;
46 #else
47 	/* An atomic flag that signals completed transfer
48 	 * when threads are not enabled.
49 	 */
50 	atomic_t ready;
51 #endif /* CONFIG_MULTITHREADING */
52 	int sync_status;
53 
54 #ifdef CONFIG_SPI_ASYNC
55 	spi_callback_t callback;
56 	void *callback_data;
57 	bool asynchronous;
58 #endif /* CONFIG_SPI_ASYNC */
59 	const struct spi_buf *current_tx;
60 	size_t tx_count;
61 	const struct spi_buf *current_rx;
62 	size_t rx_count;
63 
64 	const uint8_t *tx_buf;
65 	size_t tx_len;
66 	uint8_t *rx_buf;
67 	size_t rx_len;
68 
69 #ifdef CONFIG_SPI_SLAVE
70 	int recv_frames;
71 #endif /* CONFIG_SPI_SLAVE */
72 };
73 
74 #define SPI_CONTEXT_INIT_LOCK(_data, _ctx_name)				\
75 	._ctx_name.lock = Z_SEM_INITIALIZER(_data._ctx_name.lock, 0, 1)
76 
77 #define SPI_CONTEXT_INIT_SYNC(_data, _ctx_name)				\
78 	._ctx_name.sync = Z_SEM_INITIALIZER(_data._ctx_name.sync, 0, 1)
79 
80 #ifndef DT_SPI_CTX_HAS_NO_CS_GPIOS
81 #define SPI_CONTEXT_CS_GPIO_SPEC_ELEM(_node_id, _prop, _idx)		\
82 	GPIO_DT_SPEC_GET_BY_IDX(_node_id, _prop, _idx),
83 
84 #define SPI_CONTEXT_CS_GPIOS_FOREACH_ELEM(_node_id)				\
85 	DT_FOREACH_PROP_ELEM(_node_id, cs_gpios,				\
86 				SPI_CONTEXT_CS_GPIO_SPEC_ELEM)
87 
88 #define SPI_CONTEXT_CS_GPIOS_INITIALIZE(_node_id, _ctx_name)				\
89 	._ctx_name.cs_gpios = (const struct gpio_dt_spec []) {				\
90 		COND_CODE_1(DT_SPI_HAS_CS_GPIOS(_node_id),				\
91 			    (SPI_CONTEXT_CS_GPIOS_FOREACH_ELEM(_node_id)), ({0}))	\
92 	},										\
93 	._ctx_name.num_cs_gpios = DT_PROP_LEN_OR(_node_id, cs_gpios, 0),
94 #else /* DT_SPI_CTX_HAS_NO_CS_GPIOS */
95 #define SPI_CONTEXT_CS_GPIOS_INITIALIZE(...)
96 #endif /* DT_SPI_CTX_HAS_NO_CS_GPIOS */
97 
98 /*
99  * Checks if a spi config is the same as the one stored in the spi_context
100  * The intention of this function is to be used to check if a driver can skip
101  * some reconfiguration for a transfer in a fast code path.
102  */
spi_context_configured(struct spi_context * ctx,const struct spi_config * config)103 static inline bool spi_context_configured(struct spi_context *ctx,
104 					  const struct spi_config *config)
105 {
106 	return !!(ctx->config == config);
107 }
108 
109 /* Returns true if the spi configuration stored for this context
110  * specifies a slave mode configuration, returns false otherwise
111  */
spi_context_is_slave(struct spi_context * ctx)112 static inline bool spi_context_is_slave(struct spi_context *ctx)
113 {
114 	return (ctx->config->operation & SPI_OP_MODE_SLAVE);
115 }
116 
117 /*
118  * The purpose of the context lock is to synchronize the usage of the driver/hardware.
119  * The driver should call this function to claim or wait for ownership of the spi resource.
120  * Usually the appropriate time to call this is at the start of the transceive API implementation.
121  */
spi_context_lock(struct spi_context * ctx,bool asynchronous,spi_callback_t callback,void * callback_data,const struct spi_config * spi_cfg)122 static inline void spi_context_lock(struct spi_context *ctx,
123 				    bool asynchronous,
124 				    spi_callback_t callback,
125 				    void *callback_data,
126 				    const struct spi_config *spi_cfg)
127 {
128 #ifdef CONFIG_MULTITHREADING
129 	bool already_locked = (spi_cfg->operation & SPI_LOCK_ON) &&
130 			      (k_sem_count_get(&ctx->lock) == 0) &&
131 			      (ctx->owner == spi_cfg);
132 
133 	if (!already_locked) {
134 		k_sem_take(&ctx->lock, K_FOREVER);
135 		ctx->owner = spi_cfg;
136 	}
137 #endif /* CONFIG_MULTITHREADING */
138 
139 #ifdef CONFIG_SPI_ASYNC
140 	ctx->asynchronous = asynchronous;
141 	ctx->callback = callback;
142 	ctx->callback_data = callback_data;
143 #endif /* CONFIG_SPI_ASYNC */
144 }
145 
146 /*
147  * This function must be called by a driver which has called spi_context_lock in order
148  * to release the ownership of the spi resource.
149  * Usually the appropriate time to call this would be at the end of a transfer that was
150  * initiated by a transceive API call, except in the case that the SPI_LOCK_ON bit was set
151  * in the configuration.
152  */
spi_context_release(struct spi_context * ctx,int status)153 static inline void spi_context_release(struct spi_context *ctx, int status)
154 {
155 #ifdef CONFIG_MULTITHREADING
156 #ifdef CONFIG_SPI_SLAVE
157 	if (status >= 0 && ((ctx->config == NULL) || (ctx->config->operation & SPI_LOCK_ON))) {
158 		return;
159 	}
160 #endif /* CONFIG_SPI_SLAVE */
161 
162 #ifdef CONFIG_SPI_ASYNC
163 	if (!ctx->asynchronous || (status < 0)) {
164 		ctx->owner = NULL;
165 		k_sem_give(&ctx->lock);
166 	}
167 #else
168 	if ((ctx->config == NULL) || !(ctx->config->operation & SPI_LOCK_ON)) {
169 		ctx->owner = NULL;
170 		k_sem_give(&ctx->lock);
171 	}
172 #endif /* CONFIG_SPI_ASYNC */
173 #endif /* CONFIG_MULTITHREADING */
174 }
175 
176 static inline size_t spi_context_total_tx_len(struct spi_context *ctx);
177 static inline size_t spi_context_total_rx_len(struct spi_context *ctx);
178 
179 /* This function essentially is a way for a driver to seamlessly implement both the
180  * synchronous transceive API and the asynchronous transceive_async API in the same way.
181  *
182  * The exact way this function is used may depend on driver implementation, but
183  * essentially this will block waiting for a signal from spi_context_complete,
184  * unless the transfer is asynchronous, in which case it does nothing in master mode.
185  */
spi_context_wait_for_completion(struct spi_context * ctx)186 static inline int spi_context_wait_for_completion(struct spi_context *ctx)
187 {
188 	int status = 0;
189 	bool wait;
190 
191 #ifdef CONFIG_SPI_ASYNC
192 	wait = !ctx->asynchronous;
193 #else
194 	wait = true;
195 #endif
196 
197 	if (wait) {
198 		k_timeout_t timeout;
199 		uint32_t timeout_ms;
200 
201 		/* Do not use any timeout in the slave mode, as in this case
202 		 * it is not known when the transfer will actually start and
203 		 * what the frequency will be.
204 		 */
205 		if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(ctx)) {
206 			timeout = K_FOREVER;
207 			timeout_ms = UINT32_MAX;
208 		} else {
209 			uint32_t tx_len = spi_context_total_tx_len(ctx);
210 			uint32_t rx_len = spi_context_total_rx_len(ctx);
211 
212 			timeout_ms = MAX(tx_len, rx_len) * 8 * 1000 /
213 				     ctx->config->frequency;
214 			timeout_ms += CONFIG_SPI_COMPLETION_TIMEOUT_TOLERANCE;
215 
216 			timeout = K_MSEC(timeout_ms);
217 		}
218 #ifdef CONFIG_MULTITHREADING
219 		if (k_sem_take(&ctx->sync, timeout)) {
220 			LOG_ERR("Timeout waiting for transfer complete");
221 			return -ETIMEDOUT;
222 		}
223 #else
224 		if (timeout_ms == UINT32_MAX) {
225 			/* In slave mode, we wait indefinitely, so we can go idle. */
226 			unsigned int key = irq_lock();
227 
228 			while (!atomic_get(&ctx->ready)) {
229 				k_cpu_atomic_idle(key);
230 				key = irq_lock();
231 			}
232 
233 			ctx->ready = 0;
234 			irq_unlock(key);
235 		} else {
236 			const uint32_t tms = k_uptime_get_32();
237 
238 			while (!atomic_get(&ctx->ready) && (k_uptime_get_32() - tms < timeout_ms)) {
239 				k_busy_wait(1);
240 			}
241 
242 			if (!ctx->ready) {
243 				LOG_ERR("Timeout waiting for transfer complete");
244 				return -ETIMEDOUT;
245 			}
246 
247 			ctx->ready = 0;
248 		}
249 #endif /* CONFIG_MULTITHREADING */
250 		status = ctx->sync_status;
251 	}
252 
253 #ifdef CONFIG_SPI_SLAVE
254 	if (spi_context_is_slave(ctx) && !status) {
255 		return ctx->recv_frames;
256 	}
257 #endif /* CONFIG_SPI_SLAVE */
258 
259 	return status;
260 }
261 
262 /* For synchronous transfers, this will signal to a thread waiting
263  * on spi_context_wait for completion.
264  *
265  * For asynchronous tranfers, this will call the async callback function
266  * with the user data.
267  */
spi_context_complete(struct spi_context * ctx,const struct device * dev,int status)268 static inline void spi_context_complete(struct spi_context *ctx,
269 					const struct device *dev,
270 					int status)
271 {
272 #ifdef CONFIG_SPI_ASYNC
273 	if (!ctx->asynchronous) {
274 		ctx->sync_status = status;
275 		k_sem_give(&ctx->sync);
276 	} else {
277 		if (ctx->callback) {
278 #ifdef CONFIG_SPI_SLAVE
279 			if (spi_context_is_slave(ctx) && !status) {
280 				/* Let's update the status so it tells
281 				 * about number of received frames.
282 				 */
283 				status = ctx->recv_frames;
284 			}
285 #endif /* CONFIG_SPI_SLAVE */
286 			ctx->callback(dev, status, ctx->callback_data);
287 		}
288 
289 		if (!(ctx->config->operation & SPI_LOCK_ON)) {
290 			ctx->owner = NULL;
291 			k_sem_give(&ctx->lock);
292 		}
293 
294 	}
295 #else
296 	ctx->sync_status = status;
297 #ifdef CONFIG_MULTITHREADING
298 	k_sem_give(&ctx->sync);
299 #else
300 	atomic_set(&ctx->ready, 1);
301 #endif /* CONFIG_MULTITHREADING */
302 #endif /* CONFIG_SPI_ASYNC */
303 }
304 
305 #ifdef DT_SPI_CTX_HAS_NO_CS_GPIOS
306 #define spi_context_cs_configure_all(...) 0
307 #define spi_context_cs_get_all(...) 0
308 #define spi_context_cs_put_all(...) 0
309 #define _spi_context_cs_control(...) (void) 0
310 #define spi_context_cs_control(...) (void) 0
311 #else /* DT_SPI_CTX_HAS_NO_CS_GPIOS */
312 /*
313  * This function initializes all the chip select GPIOs associated with a spi controller.
314  * The context first must be initialized using the SPI_CONTEXT_CS_GPIOS_INITIALIZE macro.
315  * This function should be called during the device init sequence so that
316  * all the CS lines are configured properly before the first transfer begins.
317  * Note: If a controller has native CS control in SPI hardware, they should also be initialized
318  * during device init by the driver with hardware-specific code.
319  */
spi_context_cs_configure_all(struct spi_context * ctx)320 static inline int spi_context_cs_configure_all(struct spi_context *ctx)
321 {
322 	int ret;
323 	const struct gpio_dt_spec *cs_gpio;
324 
325 	for (cs_gpio = ctx->cs_gpios; cs_gpio < &ctx->cs_gpios[ctx->num_cs_gpios]; cs_gpio++) {
326 		if (!device_is_ready(cs_gpio->port)) {
327 			LOG_ERR("CS GPIO port %s pin %d is not ready",
328 				cs_gpio->port->name, cs_gpio->pin);
329 			return -ENODEV;
330 		}
331 
332 		ret = gpio_pin_configure_dt(cs_gpio, GPIO_OUTPUT_INACTIVE);
333 		if (ret < 0) {
334 			return ret;
335 		}
336 	}
337 
338 	return 0;
339 }
340 
341 /* Helper function to power manage the GPIO CS pins, not meant to be used directly by drivers */
_spi_context_cs_pm_all(struct spi_context * ctx,bool get)342 static inline int _spi_context_cs_pm_all(struct spi_context *ctx, bool get)
343 {
344 	const struct gpio_dt_spec *cs_gpio;
345 	int ret;
346 
347 	for (cs_gpio = ctx->cs_gpios; cs_gpio < &ctx->cs_gpios[ctx->num_cs_gpios]; cs_gpio++) {
348 		if (get) {
349 			ret = pm_device_runtime_get(cs_gpio->port);
350 		} else {
351 			ret = pm_device_runtime_put(cs_gpio->port);
352 		}
353 
354 		if (ret < 0) {
355 			return ret;
356 		}
357 	}
358 
359 	return 0;
360 }
361 
362 /* This function should be called by drivers to pm get all the chip select lines in
363  * master mode in the case of any CS being a GPIO. This should be called from the
364  * drivers pm action hook on pm resume.
365  */
spi_context_cs_get_all(struct spi_context * ctx)366 static inline int spi_context_cs_get_all(struct spi_context *ctx)
367 {
368 	return _spi_context_cs_pm_all(ctx, true);
369 }
370 
371 /* This function should be called by drivers to pm put all the chip select lines in
372  * master mode in the case of any CS being a GPIO. This should be called from the
373  * drivers pm action hook on pm suspend.
374  */
spi_context_cs_put_all(struct spi_context * ctx)375 static inline int spi_context_cs_put_all(struct spi_context *ctx)
376 {
377 	return _spi_context_cs_pm_all(ctx, false);
378 }
379 
380 /* Helper function to control the GPIO CS, not meant to be used directly by drivers */
_spi_context_cs_control(struct spi_context * ctx,bool on,bool force_off)381 static inline void _spi_context_cs_control(struct spi_context *ctx,
382 					   bool on, bool force_off)
383 {
384 	if (ctx->config && spi_cs_is_gpio(ctx->config)) {
385 		if (on) {
386 			gpio_pin_set_dt(&ctx->config->cs.gpio, 1);
387 			k_busy_wait(ctx->config->cs.delay);
388 		} else {
389 			if (!force_off &&
390 			    ctx->config->operation & SPI_HOLD_ON_CS) {
391 				return;
392 			}
393 
394 			k_busy_wait(ctx->config->cs.delay);
395 			gpio_pin_set_dt(&ctx->config->cs.gpio, 0);
396 		}
397 	}
398 }
399 
400 /* This function should be called by drivers to control the chip select line in master mode
401  * in the case of the CS being a GPIO. The de facto usage of the zephyr SPI API expects that the
402  * chip select be asserted throughout the entire transfer specified by a transceive call,
403  * ie all buffers in a spi_buf_set should be finished before deasserting CS. And usually
404  * the deassertion is at the end of the transfer, except in the case that the
405  * SPI_HOLD_ON_CS bit was set in the configuration.
406  */
spi_context_cs_control(struct spi_context * ctx,bool on)407 static inline void spi_context_cs_control(struct spi_context *ctx, bool on)
408 {
409 	_spi_context_cs_control(ctx, on, false);
410 }
411 #endif /* DT_SPI_CTX_HAS_NO_CS_GPIOS */
412 
413 /* Forcefully releases the spi context and removes the owner, allowing taking the lock
414  * with spi_context_lock without the previous owner releasing the lock.
415  * This is usually used to aid in implementation of the spi_release driver API.
416  */
spi_context_unlock_unconditionally(struct spi_context * ctx __maybe_unused)417 static inline void spi_context_unlock_unconditionally(struct spi_context *ctx __maybe_unused)
418 {
419 	/* Forcing CS to go to inactive status */
420 	_spi_context_cs_control(ctx, false, true);
421 
422 #ifdef CONFIG_MULTITHREADING
423 	if (!k_sem_count_get(&ctx->lock)) {
424 		ctx->owner = NULL;
425 		k_sem_give(&ctx->lock);
426 	}
427 #endif /* CONFIG_MULTITHREADING */
428 }
429 
430 /*
431  * Helper function for incrementing buffer pointer.
432  * Generally not needed to be used directly by drivers.
433  * Use spi_context_update_(tx/rx) instead.
434  */
spi_context_get_next_buf(const struct spi_buf ** current,size_t * count,size_t * buf_len,uint8_t dfs)435 static inline void *spi_context_get_next_buf(const struct spi_buf **current,
436 					     size_t *count,
437 					     size_t *buf_len,
438 					     uint8_t dfs)
439 {
440 	/* This loop skips zero-length buffers in the set, if any. */
441 	while (*count) {
442 		if (((*current)->len / dfs) != 0) {
443 			*buf_len = (*current)->len / dfs;
444 			return (*current)->buf;
445 		}
446 		++(*current);
447 		--(*count);
448 	}
449 
450 	*buf_len = 0;
451 	return NULL;
452 }
453 
454 /*
455  * The spi context private api works with the driver by providing code to
456  * keep track of how much of the transfer has been completed. The driver
457  * calls functions to report when some tx or rx has finished, and the driver
458  * then can use the spi context to keep track of how much is left to do.
459  */
460 
461 /*
462  * This function must be called at the start of a transfer by the driver
463  * to initialize the spi context fields for tracking the progress.
464  */
465 static inline
spi_context_buffers_setup(struct spi_context * ctx,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,uint8_t dfs)466 void spi_context_buffers_setup(struct spi_context *ctx,
467 			       const struct spi_buf_set *tx_bufs,
468 			       const struct spi_buf_set *rx_bufs,
469 			       uint8_t dfs)
470 {
471 	LOG_DBG("tx_bufs %p - rx_bufs %p - %u", tx_bufs, rx_bufs, dfs);
472 
473 	ctx->current_tx = tx_bufs ? tx_bufs->buffers : NULL;
474 	ctx->tx_count = ctx->current_tx ? tx_bufs->count : 0;
475 	ctx->tx_buf = (const uint8_t *)
476 		spi_context_get_next_buf(&ctx->current_tx, &ctx->tx_count,
477 					 &ctx->tx_len, dfs);
478 
479 	ctx->current_rx = rx_bufs ? rx_bufs->buffers : NULL;
480 	ctx->rx_count = ctx->current_rx ? rx_bufs->count : 0;
481 	ctx->rx_buf = (uint8_t *)
482 		spi_context_get_next_buf(&ctx->current_rx, &ctx->rx_count,
483 					 &ctx->rx_len, dfs);
484 
485 	ctx->sync_status = 0;
486 
487 #ifdef CONFIG_SPI_SLAVE
488 	ctx->recv_frames = 0;
489 #endif /* CONFIG_SPI_SLAVE */
490 
491 	LOG_DBG("current_tx %p (%zu), current_rx %p (%zu),"
492 		" tx buf/len %p/%zu, rx buf/len %p/%zu",
493 		ctx->current_tx, ctx->tx_count,
494 		ctx->current_rx, ctx->rx_count,
495 		(void *)ctx->tx_buf, ctx->tx_len,
496 		(void *)ctx->rx_buf, ctx->rx_len);
497 }
498 
499 /*
500  * Should be called to update the tracking of TX being completed.
501  *
502  * Parameter "dfs" is the number of bytes needed to store a data frame.
503  * Parameter "len" is the number of data frames of TX that were sent.
504  */
505 static ALWAYS_INLINE
spi_context_update_tx(struct spi_context * ctx,uint8_t dfs,uint32_t len)506 void spi_context_update_tx(struct spi_context *ctx, uint8_t dfs, uint32_t len)
507 {
508 	if (!ctx->tx_len) {
509 		return;
510 	}
511 
512 	if (len > ctx->tx_len) {
513 		LOG_ERR("Update exceeds current buffer");
514 		return;
515 	}
516 
517 	ctx->tx_len -= len;
518 	if (!ctx->tx_len) {
519 		/* Current buffer is done. Get the next one to be processed. */
520 		++ctx->current_tx;
521 		--ctx->tx_count;
522 		ctx->tx_buf = (const uint8_t *)
523 			spi_context_get_next_buf(&ctx->current_tx,
524 						 &ctx->tx_count,
525 						 &ctx->tx_len, dfs);
526 	} else if (ctx->tx_buf) {
527 		ctx->tx_buf += dfs * len;
528 	}
529 
530 	LOG_DBG("tx buf/len %p/%zu", (void *)ctx->tx_buf, ctx->tx_len);
531 }
532 
533 /* Returns true if there is still TX buffers left in the spi_buf_set
534  * even if they are "null" (nop) buffers.
535  */
536 static ALWAYS_INLINE
spi_context_tx_on(struct spi_context * ctx)537 bool spi_context_tx_on(struct spi_context *ctx)
538 {
539 	return !!(ctx->tx_len);
540 }
541 
542 /* Similar to spi_context_tx_on, but only returns true if the current buffer is
543  * not a null/NOP placeholder.
544  */
545 static ALWAYS_INLINE
spi_context_tx_buf_on(struct spi_context * ctx)546 bool spi_context_tx_buf_on(struct spi_context *ctx)
547 {
548 	return !!(ctx->tx_buf && ctx->tx_len);
549 }
550 
551 /*
552  * Should be called to update the tracking of RX being completed.
553  *
554  * @param dfs is the number of bytes needed to store a data frame.
555  * @param len is the number of data frames of RX that were received.
556  */
557 static ALWAYS_INLINE
spi_context_update_rx(struct spi_context * ctx,uint8_t dfs,uint32_t len)558 void spi_context_update_rx(struct spi_context *ctx, uint8_t dfs, uint32_t len)
559 {
560 #ifdef CONFIG_SPI_SLAVE
561 	if (spi_context_is_slave(ctx)) {
562 		ctx->recv_frames += len;
563 	}
564 
565 #endif /* CONFIG_SPI_SLAVE */
566 
567 	if (!ctx->rx_len) {
568 		return;
569 	}
570 
571 	if (len > ctx->rx_len) {
572 		LOG_ERR("Update exceeds current buffer");
573 		return;
574 	}
575 
576 	ctx->rx_len -= len;
577 	if (!ctx->rx_len) {
578 		/* Current buffer is done. Get the next one to be processed. */
579 		++ctx->current_rx;
580 		--ctx->rx_count;
581 		ctx->rx_buf = (uint8_t *)
582 			spi_context_get_next_buf(&ctx->current_rx,
583 						 &ctx->rx_count,
584 						 &ctx->rx_len, dfs);
585 	} else if (ctx->rx_buf) {
586 		ctx->rx_buf += dfs * len;
587 	}
588 
589 	LOG_DBG("rx buf/len %p/%zu", (void *)ctx->rx_buf, ctx->rx_len);
590 }
591 
592 /* Returns true if there is still RX buffers left in the spi_buf_set
593  * even if they are "null" (nop) buffers.
594  */
595 static ALWAYS_INLINE
spi_context_rx_on(struct spi_context * ctx)596 bool spi_context_rx_on(struct spi_context *ctx)
597 {
598 	return !!(ctx->rx_len);
599 }
600 
601 /* Similar to spi_context_rx_on, but only returns true if the current buffer is
602  * not a null/NOP placeholder.
603  */
604 static ALWAYS_INLINE
spi_context_rx_buf_on(struct spi_context * ctx)605 bool spi_context_rx_buf_on(struct spi_context *ctx)
606 {
607 	return !!(ctx->rx_buf && ctx->rx_len);
608 }
609 
610 /*
611  * Returns the maximum length of a transfer for which all currently active
612  * directions have a continuous buffer, i.e. the maximum SPI transfer that
613  * can be done with DMA that handles only non-scattered buffers.
614  *
615  * In other words, returns the length of the smaller of the current RX or current TX buffer.
616  * Except if either RX or TX buf length is 0, returns the length of the other.
617  * And if both are 0 then will return 0 and should indicate transfer completion.
618  */
spi_context_max_continuous_chunk(struct spi_context * ctx)619 static inline size_t spi_context_max_continuous_chunk(struct spi_context *ctx)
620 {
621 	if (!ctx->tx_len) {
622 		return ctx->rx_len;
623 	} else if (!ctx->rx_len) {
624 		return ctx->tx_len;
625 	}
626 
627 	return MIN(ctx->tx_len, ctx->rx_len);
628 }
629 
630 /* Returns the length of the longer of the current RX or current TX buffer. */
spi_context_longest_current_buf(struct spi_context * ctx)631 static inline size_t spi_context_longest_current_buf(struct spi_context *ctx)
632 {
633 	return ctx->tx_len > ctx->rx_len ? ctx->tx_len : ctx->rx_len;
634 }
635 
636 /* Helper function, not intended to be used by drivers directly */
spi_context_count_tx_buf_lens(struct spi_context * ctx,size_t start_index)637 static size_t spi_context_count_tx_buf_lens(struct spi_context *ctx, size_t start_index)
638 {
639 	size_t n;
640 	size_t total_len = 0;
641 
642 	for (n = start_index; n < ctx->tx_count; ++n) {
643 		total_len += ctx->current_tx[n].len;
644 	}
645 
646 	return total_len;
647 }
648 
649 /* Helper function, not intended to be used by drivers directly */
spi_context_count_rx_buf_lens(struct spi_context * ctx,size_t start_index)650 static size_t spi_context_count_rx_buf_lens(struct spi_context *ctx, size_t start_index)
651 {
652 	size_t n;
653 	size_t total_len = 0;
654 
655 	for (n = start_index; n < ctx->rx_count; ++n) {
656 		total_len += ctx->current_rx[n].len;
657 	}
658 
659 	return total_len;
660 }
661 
662 
663 /* Returns the length of the sum of the remaining TX buffers in the buf set, including
664  * the current buffer in the total.
665  */
spi_context_total_tx_len(struct spi_context * ctx)666 static inline size_t spi_context_total_tx_len(struct spi_context *ctx)
667 {
668 	return spi_context_count_tx_buf_lens(ctx, 0);
669 }
670 
671 /* Returns the length of the sum of the remaining RX buffers in the buf set, including
672  * the current buffer in the total.
673  */
spi_context_total_rx_len(struct spi_context * ctx)674 static inline size_t spi_context_total_rx_len(struct spi_context *ctx)
675 {
676 	return spi_context_count_rx_buf_lens(ctx, 0);
677 }
678 
679 /* Similar to spi_context_total_tx_len, except does not count words that have been finished
680  * in the current buffer, ie only including what is remaining in the current buffer in the sum.
681  */
spi_context_tx_len_left(struct spi_context * ctx,uint8_t dfs)682 static inline size_t spi_context_tx_len_left(struct spi_context *ctx, uint8_t dfs)
683 {
684 	return (ctx->tx_len * dfs) + spi_context_count_tx_buf_lens(ctx, 1);
685 }
686 
687 /* Similar to spi_context_total_rx_len, except does not count words that have been finished
688  * in the current buffer, ie only including what is remaining in the current buffer in the sum.
689  */
spi_context_rx_len_left(struct spi_context * ctx,uint8_t dfs)690 static inline size_t spi_context_rx_len_left(struct spi_context *ctx, uint8_t dfs)
691 {
692 	return (ctx->rx_len * dfs) + spi_context_count_rx_buf_lens(ctx, 1);
693 }
694 
695 #ifdef __cplusplus
696 }
697 #endif
698 
699 #endif /* ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_ */
700