1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2018 Exceet Electronics GmbH
4  * Copyright (C) 2018 Bootlin
5  *
6  * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7  */
8 
9 #ifndef __UBOOT__
10 #include <log.h>
11 #include <dm/devres.h>
12 #include <linux/dmaengine.h>
13 #include <linux/pm_runtime.h>
14 #include "internals.h"
15 #else
16 #include <common.h>
17 #include <dm.h>
18 #include <errno.h>
19 #include <malloc.h>
20 #include <spi.h>
21 #include <spi.h>
22 #include <spi-mem.h>
23 #include <dm/device_compat.h>
24 #include <dm/devres.h>
25 #include <linux/bug.h>
26 #endif
27 
28 #ifndef __UBOOT__
29 /**
30  * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
31  *					  memory operation
32  * @ctlr: the SPI controller requesting this dma_map()
33  * @op: the memory operation containing the buffer to map
34  * @sgt: a pointer to a non-initialized sg_table that will be filled by this
35  *	 function
36  *
37  * Some controllers might want to do DMA on the data buffer embedded in @op.
38  * This helper prepares everything for you and provides a ready-to-use
39  * sg_table. This function is not intended to be called from spi drivers.
40  * Only SPI controller drivers should use it.
41  * Note that the caller must ensure the memory region pointed by
42  * op->data.buf.{in,out} is DMA-able before calling this function.
43  *
44  * Return: 0 in case of success, a negative error code otherwise.
45  */
spi_controller_dma_map_mem_op_data(struct spi_controller * ctlr,const struct spi_mem_op * op,struct sg_table * sgt)46 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
47 				       const struct spi_mem_op *op,
48 				       struct sg_table *sgt)
49 {
50 	struct device *dmadev;
51 
52 	if (!op->data.nbytes)
53 		return -EINVAL;
54 
55 	if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
56 		dmadev = ctlr->dma_tx->device->dev;
57 	else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
58 		dmadev = ctlr->dma_rx->device->dev;
59 	else
60 		dmadev = ctlr->dev.parent;
61 
62 	if (!dmadev)
63 		return -EINVAL;
64 
65 	return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
66 			   op->data.dir == SPI_MEM_DATA_IN ?
67 			   DMA_FROM_DEVICE : DMA_TO_DEVICE);
68 }
69 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
70 
71 /**
72  * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
73  *					    memory operation
74  * @ctlr: the SPI controller requesting this dma_unmap()
75  * @op: the memory operation containing the buffer to unmap
76  * @sgt: a pointer to an sg_table previously initialized by
77  *	 spi_controller_dma_map_mem_op_data()
78  *
79  * Some controllers might want to do DMA on the data buffer embedded in @op.
80  * This helper prepares things so that the CPU can access the
81  * op->data.buf.{in,out} buffer again.
82  *
83  * This function is not intended to be called from SPI drivers. Only SPI
84  * controller drivers should use it.
85  *
86  * This function should be called after the DMA operation has finished and is
87  * only valid if the previous spi_controller_dma_map_mem_op_data() call
88  * returned 0.
89  *
90  * Return: 0 in case of success, a negative error code otherwise.
91  */
spi_controller_dma_unmap_mem_op_data(struct spi_controller * ctlr,const struct spi_mem_op * op,struct sg_table * sgt)92 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
93 					  const struct spi_mem_op *op,
94 					  struct sg_table *sgt)
95 {
96 	struct device *dmadev;
97 
98 	if (!op->data.nbytes)
99 		return;
100 
101 	if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
102 		dmadev = ctlr->dma_tx->device->dev;
103 	else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
104 		dmadev = ctlr->dma_rx->device->dev;
105 	else
106 		dmadev = ctlr->dev.parent;
107 
108 	spi_unmap_buf(ctlr, dmadev, sgt,
109 		      op->data.dir == SPI_MEM_DATA_IN ?
110 		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
111 }
112 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
113 #endif /* __UBOOT__ */
114 
spi_check_buswidth_req(struct spi_slave * slave,u8 buswidth,bool tx)115 static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx)
116 {
117 	u32 mode = slave->mode;
118 
119 	switch (buswidth) {
120 	case 1:
121 		return 0;
122 
123 	case 2:
124 		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
125 		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
126 			return 0;
127 
128 		break;
129 
130 	case 4:
131 		if ((tx && (mode & SPI_TX_QUAD)) ||
132 		    (!tx && (mode & SPI_RX_QUAD)))
133 			return 0;
134 
135 		break;
136 	case 8:
137 		if ((tx && (mode & SPI_TX_OCTAL)) ||
138 		    (!tx && (mode & SPI_RX_OCTAL)))
139 			return 0;
140 
141 		break;
142 
143 	default:
144 		break;
145 	}
146 
147 	return -ENOTSUPP;
148 }
149 
spi_mem_check_buswidth(struct spi_slave * slave,const struct spi_mem_op * op)150 static bool spi_mem_check_buswidth(struct spi_slave *slave,
151 				   const struct spi_mem_op *op)
152 {
153 	if (spi_check_buswidth_req(slave, op->cmd.buswidth, true))
154 		return false;
155 
156 	if (op->addr.nbytes &&
157 	    spi_check_buswidth_req(slave, op->addr.buswidth, true))
158 		return false;
159 
160 	if (op->dummy.nbytes &&
161 	    spi_check_buswidth_req(slave, op->dummy.buswidth, true))
162 		return false;
163 
164 	if (op->data.dir != SPI_MEM_NO_DATA &&
165 	    spi_check_buswidth_req(slave, op->data.buswidth,
166 				   op->data.dir == SPI_MEM_DATA_OUT))
167 		return false;
168 
169 	return true;
170 }
171 
spi_mem_dtr_supports_op(struct spi_slave * slave,const struct spi_mem_op * op)172 bool spi_mem_dtr_supports_op(struct spi_slave *slave,
173 			     const struct spi_mem_op *op)
174 {
175 	if (op->cmd.buswidth == 8 && op->cmd.nbytes % 2)
176 		return false;
177 
178 	if (op->addr.nbytes && op->addr.buswidth == 8 && op->addr.nbytes % 2)
179 		return false;
180 
181 	if (op->dummy.nbytes && op->dummy.buswidth == 8 && op->dummy.nbytes % 2)
182 		return false;
183 
184 	/*
185 	 * Transactions of odd length do not make sense for 8D-8D-8D mode
186 	 * because a byte is transferred in just half a cycle.
187 	 */
188 	if (op->data.dir != SPI_MEM_NO_DATA && op->data.dir != SPI_MEM_DATA_IN &&
189 	    op->data.buswidth == 8 && op->data.nbytes % 2)
190 		return false;
191 
192 	return spi_mem_check_buswidth(slave, op);
193 }
194 EXPORT_SYMBOL_GPL(spi_mem_dtr_supports_op);
195 
spi_mem_default_supports_op(struct spi_slave * slave,const struct spi_mem_op * op)196 bool spi_mem_default_supports_op(struct spi_slave *slave,
197 				 const struct spi_mem_op *op)
198 {
199 	if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
200 		return false;
201 
202 	if (op->cmd.nbytes != 1)
203 		return false;
204 
205 	return spi_mem_check_buswidth(slave, op);
206 }
207 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
208 
209 /**
210  * spi_mem_supports_op() - Check if a memory device and the controller it is
211  *			   connected to support a specific memory operation
212  * @slave: the SPI device
213  * @op: the memory operation to check
214  *
215  * Some controllers are only supporting Single or Dual IOs, others might only
216  * support specific opcodes, or it can even be that the controller and device
217  * both support Quad IOs but the hardware prevents you from using it because
218  * only 2 IO lines are connected.
219  *
220  * This function checks whether a specific operation is supported.
221  *
222  * Return: true if @op is supported, false otherwise.
223  */
spi_mem_supports_op(struct spi_slave * slave,const struct spi_mem_op * op)224 bool spi_mem_supports_op(struct spi_slave *slave,
225 			 const struct spi_mem_op *op)
226 {
227 	struct udevice *bus = slave->dev->parent;
228 	struct dm_spi_ops *ops = spi_get_ops(bus);
229 
230 	if (ops->mem_ops && ops->mem_ops->supports_op)
231 		return ops->mem_ops->supports_op(slave, op);
232 
233 	return spi_mem_default_supports_op(slave, op);
234 }
235 EXPORT_SYMBOL_GPL(spi_mem_supports_op);
236 
237 /**
238  * spi_mem_exec_op() - Execute a memory operation
239  * @slave: the SPI device
240  * @op: the memory operation to execute
241  *
242  * Executes a memory operation.
243  *
244  * This function first checks that @op is supported and then tries to execute
245  * it.
246  *
247  * Return: 0 in case of success, a negative error code otherwise.
248  */
spi_mem_exec_op(struct spi_slave * slave,const struct spi_mem_op * op)249 int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
250 {
251 	struct udevice *bus = slave->dev->parent;
252 	struct dm_spi_ops *ops = spi_get_ops(bus);
253 	unsigned int pos = 0;
254 	const u8 *tx_buf = NULL;
255 	u8 *rx_buf = NULL;
256 	int op_len;
257 	u32 flag;
258 	int ret;
259 	int i;
260 
261 	if (!spi_mem_supports_op(slave, op))
262 		return -ENOTSUPP;
263 
264 	ret = spi_claim_bus(slave);
265 	if (ret < 0)
266 		return ret;
267 
268 	if (ops->mem_ops && ops->mem_ops->exec_op) {
269 #ifndef __UBOOT__
270 		/*
271 		 * Flush the message queue before executing our SPI memory
272 		 * operation to prevent preemption of regular SPI transfers.
273 		 */
274 		spi_flush_queue(ctlr);
275 
276 		if (ctlr->auto_runtime_pm) {
277 			ret = pm_runtime_get_sync(ctlr->dev.parent);
278 			if (ret < 0) {
279 				dev_err(&ctlr->dev,
280 					"Failed to power device: %d\n",
281 					ret);
282 				return ret;
283 			}
284 		}
285 
286 		mutex_lock(&ctlr->bus_lock_mutex);
287 		mutex_lock(&ctlr->io_mutex);
288 #endif
289 		ret = ops->mem_ops->exec_op(slave, op);
290 
291 #ifndef __UBOOT__
292 		mutex_unlock(&ctlr->io_mutex);
293 		mutex_unlock(&ctlr->bus_lock_mutex);
294 
295 		if (ctlr->auto_runtime_pm)
296 			pm_runtime_put(ctlr->dev.parent);
297 #endif
298 
299 		/*
300 		 * Some controllers only optimize specific paths (typically the
301 		 * read path) and expect the core to use the regular SPI
302 		 * interface in other cases.
303 		 */
304 		if (!ret || ret != -ENOTSUPP) {
305 			spi_release_bus(slave);
306 			return ret;
307 		}
308 	}
309 
310 #ifndef __UBOOT__
311 	tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
312 
313 	/*
314 	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
315 	 * we're guaranteed that this buffer is DMA-able, as required by the
316 	 * SPI layer.
317 	 */
318 	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
319 	if (!tmpbuf)
320 		return -ENOMEM;
321 
322 	spi_message_init(&msg);
323 
324 	tmpbuf[0] = op->cmd.opcode;
325 	xfers[xferpos].tx_buf = tmpbuf;
326 	xfers[xferpos].len = op->cmd.nbytes;
327 	xfers[xferpos].tx_nbits = op->cmd.buswidth;
328 	spi_message_add_tail(&xfers[xferpos], &msg);
329 	xferpos++;
330 	totalxferlen++;
331 
332 	if (op->addr.nbytes) {
333 		int i;
334 
335 		for (i = 0; i < op->addr.nbytes; i++)
336 			tmpbuf[i + 1] = op->addr.val >>
337 					(8 * (op->addr.nbytes - i - 1));
338 
339 		xfers[xferpos].tx_buf = tmpbuf + 1;
340 		xfers[xferpos].len = op->addr.nbytes;
341 		xfers[xferpos].tx_nbits = op->addr.buswidth;
342 		spi_message_add_tail(&xfers[xferpos], &msg);
343 		xferpos++;
344 		totalxferlen += op->addr.nbytes;
345 	}
346 
347 	if (op->dummy.nbytes) {
348 		memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
349 		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
350 		xfers[xferpos].len = op->dummy.nbytes;
351 		xfers[xferpos].tx_nbits = op->dummy.buswidth;
352 		spi_message_add_tail(&xfers[xferpos], &msg);
353 		xferpos++;
354 		totalxferlen += op->dummy.nbytes;
355 	}
356 
357 	if (op->data.nbytes) {
358 		if (op->data.dir == SPI_MEM_DATA_IN) {
359 			xfers[xferpos].rx_buf = op->data.buf.in;
360 			xfers[xferpos].rx_nbits = op->data.buswidth;
361 		} else {
362 			xfers[xferpos].tx_buf = op->data.buf.out;
363 			xfers[xferpos].tx_nbits = op->data.buswidth;
364 		}
365 
366 		xfers[xferpos].len = op->data.nbytes;
367 		spi_message_add_tail(&xfers[xferpos], &msg);
368 		xferpos++;
369 		totalxferlen += op->data.nbytes;
370 	}
371 
372 	ret = spi_sync(slave, &msg);
373 
374 	kfree(tmpbuf);
375 
376 	if (ret)
377 		return ret;
378 
379 	if (msg.actual_length != totalxferlen)
380 		return -EIO;
381 #else
382 
383 	if (op->data.nbytes) {
384 		if (op->data.dir == SPI_MEM_DATA_IN)
385 			rx_buf = op->data.buf.in;
386 		else
387 			tx_buf = op->data.buf.out;
388 	}
389 
390 	op_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
391 
392 	/*
393 	 * Avoid using malloc() here so that we can use this code in SPL where
394 	 * simple malloc may be used. That implementation does not allow free()
395 	 * so repeated calls to this code can exhaust the space.
396 	 *
397 	 * The value of op_len is small, since it does not include the actual
398 	 * data being sent, only the op-code and address. In fact, it should be
399 	 * possible to just use a small fixed value here instead of op_len.
400 	 */
401 	u8 op_buf[op_len];
402 
403 	op_buf[pos++] = op->cmd.opcode;
404 
405 	if (op->addr.nbytes) {
406 		for (i = 0; i < op->addr.nbytes; i++)
407 			op_buf[pos + i] = op->addr.val >>
408 				(8 * (op->addr.nbytes - i - 1));
409 
410 		pos += op->addr.nbytes;
411 	}
412 
413 	if (op->dummy.nbytes)
414 		memset(op_buf + pos, 0xff, op->dummy.nbytes);
415 
416 	/* 1st transfer: opcode + address + dummy cycles */
417 	flag = SPI_XFER_BEGIN;
418 	/* Make sure to set END bit if no tx or rx data messages follow */
419 	if (!tx_buf && !rx_buf)
420 		flag |= SPI_XFER_END;
421 
422 	ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
423 	if (ret)
424 		return ret;
425 
426 	/* 2nd transfer: rx or tx data path */
427 	if (tx_buf || rx_buf) {
428 		ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
429 			       rx_buf, SPI_XFER_END);
430 		if (ret)
431 			return ret;
432 	}
433 
434 	spi_release_bus(slave);
435 
436 	for (i = 0; i < pos; i++)
437 		debug("%02x ", op_buf[i]);
438 	debug("| [%dB %s] ",
439 	      tx_buf || rx_buf ? op->data.nbytes : 0,
440 	      tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
441 	for (i = 0; i < op->data.nbytes; i++)
442 		debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
443 	debug("[ret %d]\n", ret);
444 
445 	if (ret < 0)
446 		return ret;
447 #endif /* __UBOOT__ */
448 
449 	return 0;
450 }
451 EXPORT_SYMBOL_GPL(spi_mem_exec_op);
452 
453 /**
454  * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
455  *				 match controller limitations
456  * @slave: the SPI device
457  * @op: the operation to adjust
458  *
459  * Some controllers have FIFO limitations and must split a data transfer
460  * operation into multiple ones, others require a specific alignment for
461  * optimized accesses. This function allows SPI mem drivers to split a single
462  * operation into multiple sub-operations when required.
463  *
464  * Return: a negative error code if the controller can't properly adjust @op,
465  *	   0 otherwise. Note that @op->data.nbytes will be updated if @op
466  *	   can't be handled in a single step.
467  */
spi_mem_adjust_op_size(struct spi_slave * slave,struct spi_mem_op * op)468 int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op)
469 {
470 	struct udevice *bus = slave->dev->parent;
471 	struct dm_spi_ops *ops = spi_get_ops(bus);
472 
473 	if (ops->mem_ops && ops->mem_ops->adjust_op_size)
474 		return ops->mem_ops->adjust_op_size(slave, op);
475 
476 	if (!ops->mem_ops || !ops->mem_ops->exec_op) {
477 		unsigned int len;
478 
479 		len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
480 		if (slave->max_write_size && len > slave->max_write_size)
481 			return -EINVAL;
482 
483 		if (op->data.dir == SPI_MEM_DATA_IN) {
484 			if (slave->max_read_size)
485 				op->data.nbytes = min(op->data.nbytes,
486 					      slave->max_read_size);
487 		} else if (slave->max_write_size) {
488 			op->data.nbytes = min(op->data.nbytes,
489 					      slave->max_write_size - len);
490 		}
491 
492 		if (!op->data.nbytes)
493 			return -EINVAL;
494 	}
495 
496 	return 0;
497 }
498 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
499 
spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,void * buf)500 static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
501 				      u64 offs, size_t len, void *buf)
502 {
503 	struct spi_mem_op op = desc->info.op_tmpl;
504 	int ret;
505 
506 	op.addr.val = desc->info.offset + offs;
507 	op.data.buf.in = buf;
508 	op.data.nbytes = len;
509 	ret = spi_mem_adjust_op_size(desc->slave, &op);
510 	if (ret)
511 		return ret;
512 
513 	ret = spi_mem_exec_op(desc->slave, &op);
514 	if (ret)
515 		return ret;
516 
517 	return op.data.nbytes;
518 }
519 
spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,const void * buf)520 static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
521 				       u64 offs, size_t len, const void *buf)
522 {
523 	struct spi_mem_op op = desc->info.op_tmpl;
524 	int ret;
525 
526 	op.addr.val = desc->info.offset + offs;
527 	op.data.buf.out = buf;
528 	op.data.nbytes = len;
529 	ret = spi_mem_adjust_op_size(desc->slave, &op);
530 	if (ret)
531 		return ret;
532 
533 	ret = spi_mem_exec_op(desc->slave, &op);
534 	if (ret)
535 		return ret;
536 
537 	return op.data.nbytes;
538 }
539 
540 /**
541  * spi_mem_dirmap_create() - Create a direct mapping descriptor
542  * @mem: SPI mem device this direct mapping should be created for
543  * @info: direct mapping information
544  *
545  * This function is creating a direct mapping descriptor which can then be used
546  * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
547  * If the SPI controller driver does not support direct mapping, this function
548  * falls back to an implementation using spi_mem_exec_op(), so that the caller
549  * doesn't have to bother implementing a fallback on his own.
550  *
551  * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
552  */
553 struct spi_mem_dirmap_desc *
spi_mem_dirmap_create(struct spi_slave * slave,const struct spi_mem_dirmap_info * info)554 spi_mem_dirmap_create(struct spi_slave *slave,
555 		      const struct spi_mem_dirmap_info *info)
556 {
557 	struct udevice *bus = slave->dev->parent;
558 	struct dm_spi_ops *ops = spi_get_ops(bus);
559 	struct spi_mem_dirmap_desc *desc;
560 	int ret = -EOPNOTSUPP;
561 
562 	/* Make sure the number of address cycles is between 1 and 8 bytes. */
563 	if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
564 		return ERR_PTR(-EINVAL);
565 
566 	/* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
567 	if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
568 		return ERR_PTR(-EINVAL);
569 
570 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
571 	if (!desc)
572 		return ERR_PTR(-ENOMEM);
573 
574 	desc->slave = slave;
575 	desc->info = *info;
576 	if (ops->mem_ops && ops->mem_ops->dirmap_create)
577 		ret = ops->mem_ops->dirmap_create(desc);
578 
579 	if (ret) {
580 		desc->nodirmap = true;
581 		if (!spi_mem_supports_op(desc->slave, &desc->info.op_tmpl))
582 			ret = -EOPNOTSUPP;
583 		else
584 			ret = 0;
585 	}
586 
587 	if (ret) {
588 		kfree(desc);
589 		return ERR_PTR(ret);
590 	}
591 
592 	return desc;
593 }
594 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
595 
596 /**
597  * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
598  * @desc: the direct mapping descriptor to destroy
599  *
600  * This function destroys a direct mapping descriptor previously created by
601  * spi_mem_dirmap_create().
602  */
spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc * desc)603 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
604 {
605 	struct udevice *bus = desc->slave->dev->parent;
606 	struct dm_spi_ops *ops = spi_get_ops(bus);
607 
608 	if (!desc->nodirmap && ops->mem_ops && ops->mem_ops->dirmap_destroy)
609 		ops->mem_ops->dirmap_destroy(desc);
610 
611 	kfree(desc);
612 }
613 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
614 
615 #ifndef __UBOOT__
devm_spi_mem_dirmap_release(struct udevice * dev,void * res)616 static void devm_spi_mem_dirmap_release(struct udevice *dev, void *res)
617 {
618 	struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
619 
620 	spi_mem_dirmap_destroy(desc);
621 }
622 
623 /**
624  * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
625  *				  it to a device
626  * @dev: device the dirmap desc will be attached to
627  * @mem: SPI mem device this direct mapping should be created for
628  * @info: direct mapping information
629  *
630  * devm_ variant of the spi_mem_dirmap_create() function. See
631  * spi_mem_dirmap_create() for more details.
632  *
633  * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
634  */
635 struct spi_mem_dirmap_desc *
devm_spi_mem_dirmap_create(struct udevice * dev,struct spi_slave * slave,const struct spi_mem_dirmap_info * info)636 devm_spi_mem_dirmap_create(struct udevice *dev, struct spi_slave *slave,
637 			   const struct spi_mem_dirmap_info *info)
638 {
639 	struct spi_mem_dirmap_desc **ptr, *desc;
640 
641 	ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
642 			   GFP_KERNEL);
643 	if (!ptr)
644 		return ERR_PTR(-ENOMEM);
645 
646 	desc = spi_mem_dirmap_create(slave, info);
647 	if (IS_ERR(desc)) {
648 		devres_free(ptr);
649 	} else {
650 		*ptr = desc;
651 		devres_add(dev, ptr);
652 	}
653 
654 	return desc;
655 }
656 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
657 
devm_spi_mem_dirmap_match(struct udevice * dev,void * res,void * data)658 static int devm_spi_mem_dirmap_match(struct udevice *dev, void *res, void *data)
659 {
660 	struct spi_mem_dirmap_desc **ptr = res;
661 
662 	if (WARN_ON(!ptr || !*ptr))
663 		return 0;
664 
665 	return *ptr == data;
666 }
667 
668 /**
669  * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
670  *				   to a device
671  * @dev: device the dirmap desc is attached to
672  * @desc: the direct mapping descriptor to destroy
673  *
674  * devm_ variant of the spi_mem_dirmap_destroy() function. See
675  * spi_mem_dirmap_destroy() for more details.
676  */
devm_spi_mem_dirmap_destroy(struct udevice * dev,struct spi_mem_dirmap_desc * desc)677 void devm_spi_mem_dirmap_destroy(struct udevice *dev,
678 				 struct spi_mem_dirmap_desc *desc)
679 {
680 	devres_release(dev, devm_spi_mem_dirmap_release,
681 		       devm_spi_mem_dirmap_match, desc);
682 }
683 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
684 #endif /* __UBOOT__ */
685 
686 /**
687  * spi_mem_dirmap_read() - Read data through a direct mapping
688  * @desc: direct mapping descriptor
689  * @offs: offset to start reading from. Note that this is not an absolute
690  *	  offset, but the offset within the direct mapping which already has
691  *	  its own offset
692  * @len: length in bytes
693  * @buf: destination buffer. This buffer must be DMA-able
694  *
695  * This function reads data from a memory device using a direct mapping
696  * previously instantiated with spi_mem_dirmap_create().
697  *
698  * Return: the amount of data read from the memory device or a negative error
699  * code. Note that the returned size might be smaller than @len, and the caller
700  * is responsible for calling spi_mem_dirmap_read() again when that happens.
701  */
spi_mem_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,void * buf)702 ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
703 			    u64 offs, size_t len, void *buf)
704 {
705 	struct udevice *bus = desc->slave->dev->parent;
706 	struct dm_spi_ops *ops = spi_get_ops(bus);
707 	ssize_t ret;
708 
709 	if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
710 		return -EINVAL;
711 
712 	if (!len)
713 		return 0;
714 
715 	if (desc->nodirmap)
716 		ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
717 	else if (ops->mem_ops && ops->mem_ops->dirmap_read)
718 		ret = ops->mem_ops->dirmap_read(desc, offs, len, buf);
719 	else
720 		ret = -EOPNOTSUPP;
721 
722 	return ret;
723 }
724 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
725 
726 /**
727  * spi_mem_dirmap_write() - Write data through a direct mapping
728  * @desc: direct mapping descriptor
729  * @offs: offset to start writing from. Note that this is not an absolute
730  *	  offset, but the offset within the direct mapping which already has
731  *	  its own offset
732  * @len: length in bytes
733  * @buf: source buffer. This buffer must be DMA-able
734  *
735  * This function writes data to a memory device using a direct mapping
736  * previously instantiated with spi_mem_dirmap_create().
737  *
738  * Return: the amount of data written to the memory device or a negative error
739  * code. Note that the returned size might be smaller than @len, and the caller
740  * is responsible for calling spi_mem_dirmap_write() again when that happens.
741  */
spi_mem_dirmap_write(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,const void * buf)742 ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
743 			     u64 offs, size_t len, const void *buf)
744 {
745 	struct udevice *bus = desc->slave->dev->parent;
746 	struct dm_spi_ops *ops = spi_get_ops(bus);
747 	ssize_t ret;
748 
749 	if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
750 		return -EINVAL;
751 
752 	if (!len)
753 		return 0;
754 
755 	if (desc->nodirmap)
756 		ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
757 	else if (ops->mem_ops && ops->mem_ops->dirmap_write)
758 		ret = ops->mem_ops->dirmap_write(desc, offs, len, buf);
759 	else
760 		ret = -EOPNOTSUPP;
761 
762 	return ret;
763 }
764 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
765 
766 #ifndef __UBOOT__
to_spi_mem_drv(struct device_driver * drv)767 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
768 {
769 	return container_of(drv, struct spi_mem_driver, spidrv.driver);
770 }
771 
spi_mem_probe(struct spi_device * spi)772 static int spi_mem_probe(struct spi_device *spi)
773 {
774 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
775 	struct spi_mem *mem;
776 
777 	mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
778 	if (!mem)
779 		return -ENOMEM;
780 
781 	mem->spi = spi;
782 	spi_set_drvdata(spi, mem);
783 
784 	return memdrv->probe(mem);
785 }
786 
spi_mem_remove(struct spi_device * spi)787 static int spi_mem_remove(struct spi_device *spi)
788 {
789 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
790 	struct spi_mem *mem = spi_get_drvdata(spi);
791 
792 	if (memdrv->remove)
793 		return memdrv->remove(mem);
794 
795 	return 0;
796 }
797 
spi_mem_shutdown(struct spi_device * spi)798 static void spi_mem_shutdown(struct spi_device *spi)
799 {
800 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
801 	struct spi_mem *mem = spi_get_drvdata(spi);
802 
803 	if (memdrv->shutdown)
804 		memdrv->shutdown(mem);
805 }
806 
807 /**
808  * spi_mem_driver_register_with_owner() - Register a SPI memory driver
809  * @memdrv: the SPI memory driver to register
810  * @owner: the owner of this driver
811  *
812  * Registers a SPI memory driver.
813  *
814  * Return: 0 in case of success, a negative error core otherwise.
815  */
816 
spi_mem_driver_register_with_owner(struct spi_mem_driver * memdrv,struct module * owner)817 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
818 				       struct module *owner)
819 {
820 	memdrv->spidrv.probe = spi_mem_probe;
821 	memdrv->spidrv.remove = spi_mem_remove;
822 	memdrv->spidrv.shutdown = spi_mem_shutdown;
823 
824 	return __spi_register_driver(owner, &memdrv->spidrv);
825 }
826 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
827 
828 /**
829  * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
830  * @memdrv: the SPI memory driver to unregister
831  *
832  * Unregisters a SPI memory driver.
833  */
spi_mem_driver_unregister(struct spi_mem_driver * memdrv)834 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
835 {
836 	spi_unregister_driver(&memdrv->spidrv);
837 }
838 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
839 #endif /* __UBOOT__ */
840