1 /*
2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/dma-resv.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/refcount.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21
22 #include <media/videobuf2-v4l2.h>
23 #include <media/videobuf2-vmalloc.h>
24 #include <media/videobuf2-memops.h>
25
26 struct vb2_vmalloc_buf {
27 void *vaddr;
28 struct frame_vector *vec;
29 enum dma_data_direction dma_dir;
30 unsigned long size;
31 refcount_t refcount;
32 struct vb2_vmarea_handler handler;
33 struct dma_buf *dbuf;
34 };
35
36 static void vb2_vmalloc_put(void *buf_priv);
37
vb2_vmalloc_alloc(struct vb2_buffer * vb,struct device * dev,unsigned long size)38 static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
39 unsigned long size)
40 {
41 struct vb2_vmalloc_buf *buf;
42
43 buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
44 if (!buf)
45 return ERR_PTR(-ENOMEM);
46
47 buf->size = size;
48 buf->vaddr = vmalloc_user(buf->size);
49 if (!buf->vaddr) {
50 pr_debug("vmalloc of size %ld failed\n", buf->size);
51 kfree(buf);
52 return ERR_PTR(-ENOMEM);
53 }
54
55 buf->dma_dir = vb->vb2_queue->dma_dir;
56 buf->handler.refcount = &buf->refcount;
57 buf->handler.put = vb2_vmalloc_put;
58 buf->handler.arg = buf;
59
60 refcount_set(&buf->refcount, 1);
61 return buf;
62 }
63
vb2_vmalloc_put(void * buf_priv)64 static void vb2_vmalloc_put(void *buf_priv)
65 {
66 struct vb2_vmalloc_buf *buf = buf_priv;
67
68 if (refcount_dec_and_test(&buf->refcount)) {
69 vfree(buf->vaddr);
70 kfree(buf);
71 }
72 }
73
vb2_vmalloc_get_userptr(struct vb2_buffer * vb,struct device * dev,unsigned long vaddr,unsigned long size)74 static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
75 unsigned long vaddr, unsigned long size)
76 {
77 struct vb2_vmalloc_buf *buf;
78 struct frame_vector *vec;
79 int n_pages, offset, i;
80 int ret = -ENOMEM;
81
82 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
83 if (!buf)
84 return ERR_PTR(-ENOMEM);
85
86 buf->dma_dir = vb->vb2_queue->dma_dir;
87 offset = vaddr & ~PAGE_MASK;
88 buf->size = size;
89 vec = vb2_create_framevec(vaddr, size,
90 buf->dma_dir == DMA_FROM_DEVICE ||
91 buf->dma_dir == DMA_BIDIRECTIONAL);
92 if (IS_ERR(vec)) {
93 ret = PTR_ERR(vec);
94 goto fail_pfnvec_create;
95 }
96 buf->vec = vec;
97 n_pages = frame_vector_count(vec);
98 if (frame_vector_to_pages(vec) < 0) {
99 unsigned long *nums = frame_vector_pfns(vec);
100
101 /*
102 * We cannot get page pointers for these pfns. Check memory is
103 * physically contiguous and use direct mapping.
104 */
105 for (i = 1; i < n_pages; i++)
106 if (nums[i-1] + 1 != nums[i])
107 goto fail_map;
108 buf->vaddr = (__force void *)
109 ioremap(__pfn_to_phys(nums[0]), size + offset);
110 } else {
111 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
112 }
113
114 if (!buf->vaddr)
115 goto fail_map;
116 buf->vaddr += offset;
117 return buf;
118
119 fail_map:
120 vb2_destroy_framevec(vec);
121 fail_pfnvec_create:
122 kfree(buf);
123
124 return ERR_PTR(ret);
125 }
126
vb2_vmalloc_put_userptr(void * buf_priv)127 static void vb2_vmalloc_put_userptr(void *buf_priv)
128 {
129 struct vb2_vmalloc_buf *buf = buf_priv;
130 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
131 unsigned int i;
132 struct page **pages;
133 unsigned int n_pages;
134
135 if (!buf->vec->is_pfns) {
136 n_pages = frame_vector_count(buf->vec);
137 pages = frame_vector_pages(buf->vec);
138 if (vaddr)
139 vm_unmap_ram((void *)vaddr, n_pages);
140 if (buf->dma_dir == DMA_FROM_DEVICE ||
141 buf->dma_dir == DMA_BIDIRECTIONAL)
142 for (i = 0; i < n_pages; i++)
143 set_page_dirty_lock(pages[i]);
144 } else {
145 iounmap((__force void __iomem *)buf->vaddr);
146 }
147 vb2_destroy_framevec(buf->vec);
148 kfree(buf);
149 }
150
vb2_vmalloc_vaddr(struct vb2_buffer * vb,void * buf_priv)151 static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
152 {
153 struct vb2_vmalloc_buf *buf = buf_priv;
154
155 if (!buf->vaddr) {
156 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
157 return NULL;
158 }
159
160 return buf->vaddr;
161 }
162
vb2_vmalloc_num_users(void * buf_priv)163 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
164 {
165 struct vb2_vmalloc_buf *buf = buf_priv;
166 return refcount_read(&buf->refcount);
167 }
168
vb2_vmalloc_mmap(void * buf_priv,struct vm_area_struct * vma)169 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
170 {
171 struct vb2_vmalloc_buf *buf = buf_priv;
172 int ret;
173
174 if (!buf) {
175 pr_err("No memory to map\n");
176 return -EINVAL;
177 }
178
179 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
180 if (ret) {
181 pr_err("Remapping vmalloc memory, error: %d\n", ret);
182 return ret;
183 }
184
185 /*
186 * Make sure that vm_areas for 2 buffers won't be merged together
187 */
188 vm_flags_set(vma, VM_DONTEXPAND);
189
190 /*
191 * Use common vm_area operations to track buffer refcount.
192 */
193 vma->vm_private_data = &buf->handler;
194 vma->vm_ops = &vb2_common_vm_ops;
195
196 vma->vm_ops->open(vma);
197
198 return 0;
199 }
200
201 #ifdef CONFIG_HAS_DMA
202 /*********************************************/
203 /* DMABUF ops for exporters */
204 /*********************************************/
205
206 struct vb2_vmalloc_attachment {
207 struct sg_table sgt;
208 enum dma_data_direction dma_dir;
209 };
210
vb2_vmalloc_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)211 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
212 struct dma_buf_attachment *dbuf_attach)
213 {
214 struct vb2_vmalloc_attachment *attach;
215 struct vb2_vmalloc_buf *buf = dbuf->priv;
216 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
217 struct sg_table *sgt;
218 struct scatterlist *sg;
219 void *vaddr = buf->vaddr;
220 int ret;
221 int i;
222
223 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
224 if (!attach)
225 return -ENOMEM;
226
227 sgt = &attach->sgt;
228 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
229 if (ret) {
230 kfree(attach);
231 return ret;
232 }
233 for_each_sgtable_sg(sgt, sg, i) {
234 struct page *page = vmalloc_to_page(vaddr);
235
236 if (!page) {
237 sg_free_table(sgt);
238 kfree(attach);
239 return -ENOMEM;
240 }
241 sg_set_page(sg, page, PAGE_SIZE, 0);
242 vaddr += PAGE_SIZE;
243 }
244
245 attach->dma_dir = DMA_NONE;
246 dbuf_attach->priv = attach;
247 return 0;
248 }
249
vb2_vmalloc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)250 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
251 struct dma_buf_attachment *db_attach)
252 {
253 struct vb2_vmalloc_attachment *attach = db_attach->priv;
254 struct sg_table *sgt;
255
256 if (!attach)
257 return;
258
259 sgt = &attach->sgt;
260
261 /* release the scatterlist cache */
262 if (attach->dma_dir != DMA_NONE)
263 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
264 sg_free_table(sgt);
265 kfree(attach);
266 db_attach->priv = NULL;
267 }
268
vb2_vmalloc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)269 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
270 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
271 {
272 struct vb2_vmalloc_attachment *attach = db_attach->priv;
273 struct sg_table *sgt;
274
275 sgt = &attach->sgt;
276 /* return previously mapped sg table */
277 if (attach->dma_dir == dma_dir)
278 return sgt;
279
280 /* release any previous cache */
281 if (attach->dma_dir != DMA_NONE) {
282 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
283 attach->dma_dir = DMA_NONE;
284 }
285
286 /* mapping to the client with new direction */
287 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
288 pr_err("failed to map scatterlist\n");
289 return ERR_PTR(-EIO);
290 }
291
292 attach->dma_dir = dma_dir;
293
294 return sgt;
295 }
296
vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)297 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
298 struct sg_table *sgt, enum dma_data_direction dma_dir)
299 {
300 /* nothing to be done here */
301 }
302
vb2_vmalloc_dmabuf_ops_release(struct dma_buf * dbuf)303 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
304 {
305 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
306 vb2_vmalloc_put(dbuf->priv);
307 }
308
vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf * dbuf,struct iosys_map * map)309 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
310 struct iosys_map *map)
311 {
312 struct vb2_vmalloc_buf *buf = dbuf->priv;
313
314 iosys_map_set_vaddr(map, buf->vaddr);
315
316 return 0;
317 }
318
vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)319 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
320 struct vm_area_struct *vma)
321 {
322 dma_resv_assert_held(dbuf->resv);
323
324 return vb2_vmalloc_mmap(dbuf->priv, vma);
325 }
326
327 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
328 .attach = vb2_vmalloc_dmabuf_ops_attach,
329 .detach = vb2_vmalloc_dmabuf_ops_detach,
330 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
331 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
332 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
333 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
334 .release = vb2_vmalloc_dmabuf_ops_release,
335 };
336
vb2_vmalloc_get_dmabuf(struct vb2_buffer * vb,void * buf_priv,unsigned long flags)337 static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
338 void *buf_priv,
339 unsigned long flags)
340 {
341 struct vb2_vmalloc_buf *buf = buf_priv;
342 struct dma_buf *dbuf;
343 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
344
345 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
346 exp_info.size = buf->size;
347 exp_info.flags = flags;
348 exp_info.priv = buf;
349
350 if (WARN_ON(!buf->vaddr))
351 return NULL;
352
353 dbuf = dma_buf_export(&exp_info);
354 if (IS_ERR(dbuf))
355 return NULL;
356
357 /* dmabuf keeps reference to vb2 buffer */
358 refcount_inc(&buf->refcount);
359
360 return dbuf;
361 }
362 #endif /* CONFIG_HAS_DMA */
363
364
365 /*********************************************/
366 /* callbacks for DMABUF buffers */
367 /*********************************************/
368
vb2_vmalloc_map_dmabuf(void * mem_priv)369 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
370 {
371 struct vb2_vmalloc_buf *buf = mem_priv;
372 struct iosys_map map;
373 int ret;
374
375 ret = dma_buf_vmap_unlocked(buf->dbuf, &map);
376 if (ret)
377 return -EFAULT;
378 buf->vaddr = map.vaddr;
379
380 return 0;
381 }
382
vb2_vmalloc_unmap_dmabuf(void * mem_priv)383 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
384 {
385 struct vb2_vmalloc_buf *buf = mem_priv;
386 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
387
388 dma_buf_vunmap_unlocked(buf->dbuf, &map);
389 buf->vaddr = NULL;
390 }
391
vb2_vmalloc_detach_dmabuf(void * mem_priv)392 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
393 {
394 struct vb2_vmalloc_buf *buf = mem_priv;
395 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
396
397 if (buf->vaddr)
398 dma_buf_vunmap_unlocked(buf->dbuf, &map);
399
400 kfree(buf);
401 }
402
vb2_vmalloc_attach_dmabuf(struct vb2_buffer * vb,struct device * dev,struct dma_buf * dbuf,unsigned long size)403 static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
404 struct device *dev,
405 struct dma_buf *dbuf,
406 unsigned long size)
407 {
408 struct vb2_vmalloc_buf *buf;
409
410 if (dbuf->size < size)
411 return ERR_PTR(-EFAULT);
412
413 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
414 if (!buf)
415 return ERR_PTR(-ENOMEM);
416
417 buf->dbuf = dbuf;
418 buf->dma_dir = vb->vb2_queue->dma_dir;
419 buf->size = size;
420
421 return buf;
422 }
423
424
425 const struct vb2_mem_ops vb2_vmalloc_memops = {
426 .alloc = vb2_vmalloc_alloc,
427 .put = vb2_vmalloc_put,
428 .get_userptr = vb2_vmalloc_get_userptr,
429 .put_userptr = vb2_vmalloc_put_userptr,
430 #ifdef CONFIG_HAS_DMA
431 .get_dmabuf = vb2_vmalloc_get_dmabuf,
432 #endif
433 .map_dmabuf = vb2_vmalloc_map_dmabuf,
434 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
435 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
436 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
437 .vaddr = vb2_vmalloc_vaddr,
438 .mmap = vb2_vmalloc_mmap,
439 .num_users = vb2_vmalloc_num_users,
440 };
441 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
442
443 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
444 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
445 MODULE_LICENSE("GPL");
446 MODULE_IMPORT_NS(DMA_BUF);
447