1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Direct Memory Access U-Class driver
4  *
5  * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
6  * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com>
7  * Written by Mugunthan V N <mugunthanvnm@ti.com>
8  *
9  * Author: Mugunthan V N <mugunthanvnm@ti.com>
10  */
11 
12 #define LOG_CATEGORY UCLASS_DMA
13 
14 #include <cpu_func.h>
15 #include <dm.h>
16 #include <log.h>
17 #include <malloc.h>
18 #include <asm/cache.h>
19 #include <dm/read.h>
20 #include <dma-uclass.h>
21 #include <linux/dma-mapping.h>
22 #include <dt-structs.h>
23 #include <errno.h>
24 #include <linux/printk.h>
25 
26 #ifdef CONFIG_DMA_CHANNELS
dma_dev_ops(struct udevice * dev)27 static inline struct dma_ops *dma_dev_ops(struct udevice *dev)
28 {
29 	return (struct dma_ops *)dev->driver->ops;
30 }
31 
32 # if CONFIG_IS_ENABLED(OF_CONTROL)
dma_of_xlate_default(struct dma * dma,struct ofnode_phandle_args * args)33 static int dma_of_xlate_default(struct dma *dma,
34 				struct ofnode_phandle_args *args)
35 {
36 	debug("%s(dma=%p)\n", __func__, dma);
37 
38 	if (args->args_count > 1) {
39 		pr_err("Invalid args_count: %d\n", args->args_count);
40 		return -EINVAL;
41 	}
42 
43 	if (args->args_count)
44 		dma->id = args->args[0];
45 	else
46 		dma->id = 0;
47 
48 	return 0;
49 }
50 
dma_get_by_index(struct udevice * dev,int index,struct dma * dma)51 int dma_get_by_index(struct udevice *dev, int index, struct dma *dma)
52 {
53 	int ret;
54 	struct ofnode_phandle_args args;
55 	struct udevice *dev_dma;
56 	const struct dma_ops *ops;
57 
58 	debug("%s(dev=%p, index=%d, dma=%p)\n", __func__, dev, index, dma);
59 
60 	assert(dma);
61 	dma->dev = NULL;
62 
63 	ret = dev_read_phandle_with_args(dev, "dmas", "#dma-cells", 0, index,
64 					 &args);
65 	if (ret) {
66 		pr_err("%s: dev_read_phandle_with_args failed: err=%d\n",
67 		       __func__, ret);
68 		return ret;
69 	}
70 
71 	ret = uclass_get_device_by_ofnode(UCLASS_DMA, args.node, &dev_dma);
72 	if (ret) {
73 		pr_err("%s: uclass_get_device_by_ofnode failed: err=%d\n",
74 		       __func__, ret);
75 		return ret;
76 	}
77 
78 	dma->dev = dev_dma;
79 
80 	ops = dma_dev_ops(dev_dma);
81 
82 	if (ops->of_xlate)
83 		ret = ops->of_xlate(dma, &args);
84 	else
85 		ret = dma_of_xlate_default(dma, &args);
86 	if (ret) {
87 		pr_err("of_xlate() failed: %d\n", ret);
88 		return ret;
89 	}
90 
91 	return dma_request(dev_dma, dma);
92 }
93 
dma_get_by_name(struct udevice * dev,const char * name,struct dma * dma)94 int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma)
95 {
96 	int index;
97 
98 	debug("%s(dev=%p, name=%s, dma=%p)\n", __func__, dev, name, dma);
99 	dma->dev = NULL;
100 
101 	index = dev_read_stringlist_search(dev, "dma-names", name);
102 	if (index < 0) {
103 		pr_err("dev_read_stringlist_search() failed: %d\n", index);
104 		return index;
105 	}
106 
107 	return dma_get_by_index(dev, index, dma);
108 }
109 # endif /* OF_CONTROL */
110 
dma_request(struct udevice * dev,struct dma * dma)111 int dma_request(struct udevice *dev, struct dma *dma)
112 {
113 	struct dma_ops *ops = dma_dev_ops(dev);
114 
115 	debug("%s(dev=%p, dma=%p)\n", __func__, dev, dma);
116 
117 	dma->dev = dev;
118 
119 	if (!ops->request)
120 		return 0;
121 
122 	return ops->request(dma);
123 }
124 
dma_free(struct dma * dma)125 int dma_free(struct dma *dma)
126 {
127 	struct dma_ops *ops = dma_dev_ops(dma->dev);
128 
129 	debug("%s(dma=%p)\n", __func__, dma);
130 
131 	if (!ops->rfree)
132 		return 0;
133 
134 	return ops->rfree(dma);
135 }
136 
dma_enable(struct dma * dma)137 int dma_enable(struct dma *dma)
138 {
139 	struct dma_ops *ops = dma_dev_ops(dma->dev);
140 
141 	debug("%s(dma=%p)\n", __func__, dma);
142 
143 	if (!ops->enable)
144 		return -ENOSYS;
145 
146 	return ops->enable(dma);
147 }
148 
dma_disable(struct dma * dma)149 int dma_disable(struct dma *dma)
150 {
151 	struct dma_ops *ops = dma_dev_ops(dma->dev);
152 
153 	debug("%s(dma=%p)\n", __func__, dma);
154 
155 	if (!ops->disable)
156 		return -ENOSYS;
157 
158 	return ops->disable(dma);
159 }
160 
dma_prepare_rcv_buf(struct dma * dma,void * dst,size_t size)161 int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
162 {
163 	struct dma_ops *ops = dma_dev_ops(dma->dev);
164 
165 	debug("%s(dma=%p)\n", __func__, dma);
166 
167 	if (!ops->prepare_rcv_buf)
168 		return -1;
169 
170 	return ops->prepare_rcv_buf(dma, dst, size);
171 }
172 
dma_receive(struct dma * dma,void ** dst,void * metadata)173 int dma_receive(struct dma *dma, void **dst, void *metadata)
174 {
175 	struct dma_ops *ops = dma_dev_ops(dma->dev);
176 
177 	debug("%s(dma=%p)\n", __func__, dma);
178 
179 	if (!ops->receive)
180 		return -ENOSYS;
181 
182 	return ops->receive(dma, dst, metadata);
183 }
184 
dma_send(struct dma * dma,void * src,size_t len,void * metadata)185 int dma_send(struct dma *dma, void *src, size_t len, void *metadata)
186 {
187 	struct dma_ops *ops = dma_dev_ops(dma->dev);
188 
189 	debug("%s(dma=%p)\n", __func__, dma);
190 
191 	if (!ops->send)
192 		return -ENOSYS;
193 
194 	return ops->send(dma, src, len, metadata);
195 }
196 
dma_get_cfg(struct dma * dma,u32 cfg_id,void ** cfg_data)197 int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data)
198 {
199 	struct dma_ops *ops = dma_dev_ops(dma->dev);
200 
201 	debug("%s(dma=%p)\n", __func__, dma);
202 
203 	if (!ops->get_cfg)
204 		return -ENOSYS;
205 
206 	return ops->get_cfg(dma, cfg_id, cfg_data);
207 }
208 #endif /* CONFIG_DMA_CHANNELS */
209 
dma_get_device(u32 transfer_type,struct udevice ** devp)210 int dma_get_device(u32 transfer_type, struct udevice **devp)
211 {
212 	struct udevice *dev;
213 
214 	for (uclass_first_device(UCLASS_DMA, &dev); dev;
215 	     uclass_next_device(&dev)) {
216 		struct dma_dev_priv *uc_priv;
217 
218 		uc_priv = dev_get_uclass_priv(dev);
219 		if (uc_priv->supported & transfer_type)
220 			break;
221 	}
222 
223 	if (!dev) {
224 		pr_debug("No DMA device found that supports %x type\n",
225 			 transfer_type);
226 		return -EPROTONOSUPPORT;
227 	}
228 
229 	*devp = dev;
230 
231 	return 0;
232 }
233 
dma_memcpy(void * dst,void * src,size_t len)234 int dma_memcpy(void *dst, void *src, size_t len)
235 {
236 	struct udevice *dev;
237 	const struct dma_ops *ops;
238 	dma_addr_t destination;
239 	dma_addr_t source;
240 	int ret;
241 
242 	ret = dma_get_device(DMA_SUPPORTS_MEM_TO_MEM, &dev);
243 	if (ret < 0)
244 		return ret;
245 
246 	ops = device_get_ops(dev);
247 	if (!ops->transfer)
248 		return -ENOSYS;
249 
250 	/* Clean the areas, so no writeback into the RAM races with DMA */
251 	destination = dma_map_single(dst, len, DMA_FROM_DEVICE);
252 	source = dma_map_single(src, len, DMA_TO_DEVICE);
253 
254 	ret = ops->transfer(dev, DMA_MEM_TO_MEM, destination, source, len);
255 
256 	/* Clean+Invalidate the areas after, so we can see DMA'd data */
257 	dma_unmap_single(destination, len, DMA_FROM_DEVICE);
258 	dma_unmap_single(source, len, DMA_TO_DEVICE);
259 
260 	return ret;
261 }
262 
263 UCLASS_DRIVER(dma) = {
264 	.id		= UCLASS_DMA,
265 	.name		= "dma",
266 	.flags		= DM_UC_FLAG_SEQ_ALIAS,
267 	.per_device_auto	= sizeof(struct dma_dev_priv),
268 };
269