1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 */
9 /*
10 * This file contains entry functions for memory management of ISP driver
11 */
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h> /* for kmap */
16 #include <linux/io.h> /* for page_to_phys */
17 #include <linux/sysfs.h>
18
19 #include "hmm/hmm.h"
20 #include "hmm/hmm_bo.h"
21
22 #include "atomisp_internal.h"
23 #include "asm/cacheflush.h"
24 #include "mmu/isp_mmu.h"
25 #include "mmu/sh_mmu_mrfld.h"
26
27 struct hmm_bo_device bo_device;
28 static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
29 static bool hmm_initialized;
30
hmm_init(void)31 int hmm_init(void)
32 {
33 int ret;
34
35 ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
36 ISP_VM_START, ISP_VM_SIZE);
37 if (ret)
38 dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
39
40 hmm_initialized = true;
41
42 /*
43 * As hmm use NULL to indicate invalid ISP virtual address,
44 * and ISP_VM_START is defined to 0 too, so we allocate
45 * one piece of dummy memory, which should return value 0,
46 * at the beginning, to avoid hmm_alloc return 0 in the
47 * further allocation.
48 */
49 dummy_ptr = hmm_alloc(1);
50
51 return ret;
52 }
53
hmm_cleanup(void)54 void hmm_cleanup(void)
55 {
56 if (dummy_ptr == mmgr_EXCEPTION)
57 return;
58
59 /* free dummy memory first */
60 hmm_free(dummy_ptr);
61 dummy_ptr = 0;
62
63 hmm_bo_device_exit(&bo_device);
64 hmm_initialized = false;
65 }
66
__hmm_alloc(size_t bytes,enum hmm_bo_type type,void * vmalloc_addr)67 static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
68 void *vmalloc_addr)
69 {
70 unsigned int pgnr;
71 struct hmm_buffer_object *bo;
72 int ret;
73
74 /*
75 * Check if we are initialized. In the ideal world we wouldn't need
76 * this but we can tackle it once the driver is a lot cleaner
77 */
78
79 if (!hmm_initialized)
80 hmm_init();
81 /* Get page number from size */
82 pgnr = size_to_pgnr_ceil(bytes);
83
84 /* Buffer object structure init */
85 bo = hmm_bo_alloc(&bo_device, pgnr);
86 if (!bo) {
87 dev_err(atomisp_dev, "hmm_bo_create failed.\n");
88 goto create_bo_err;
89 }
90
91 /* Allocate pages for memory */
92 ret = hmm_bo_alloc_pages(bo, type, vmalloc_addr);
93 if (ret) {
94 dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
95 goto alloc_page_err;
96 }
97
98 /* Combine the virtual address and pages together */
99 ret = hmm_bo_bind(bo);
100 if (ret) {
101 dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
102 goto bind_err;
103 }
104
105 return bo->start;
106
107 bind_err:
108 hmm_bo_free_pages(bo);
109 alloc_page_err:
110 hmm_bo_unref(bo);
111 create_bo_err:
112 return 0;
113 }
114
hmm_alloc(size_t bytes)115 ia_css_ptr hmm_alloc(size_t bytes)
116 {
117 return __hmm_alloc(bytes, HMM_BO_PRIVATE, NULL);
118 }
119
hmm_create_from_vmalloc_buf(size_t bytes,void * vmalloc_addr)120 ia_css_ptr hmm_create_from_vmalloc_buf(size_t bytes, void *vmalloc_addr)
121 {
122 return __hmm_alloc(bytes, HMM_BO_VMALLOC, vmalloc_addr);
123 }
124
hmm_free(ia_css_ptr virt)125 void hmm_free(ia_css_ptr virt)
126 {
127 struct hmm_buffer_object *bo;
128
129 if (WARN_ON(virt == mmgr_EXCEPTION))
130 return;
131
132 bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
133
134 if (!bo) {
135 dev_err(atomisp_dev,
136 "can not find buffer object start with address 0x%x\n",
137 (unsigned int)virt);
138 return;
139 }
140
141 hmm_bo_unbind(bo);
142 hmm_bo_free_pages(bo);
143 hmm_bo_unref(bo);
144 }
145
hmm_check_bo(struct hmm_buffer_object * bo,unsigned int ptr)146 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
147 {
148 if (!bo) {
149 dev_err(atomisp_dev,
150 "can not find buffer object contains address 0x%x\n",
151 ptr);
152 return -EINVAL;
153 }
154
155 if (!hmm_bo_page_allocated(bo)) {
156 dev_err(atomisp_dev,
157 "buffer object has no page allocated.\n");
158 return -EINVAL;
159 }
160
161 if (!hmm_bo_allocated(bo)) {
162 dev_err(atomisp_dev,
163 "buffer object has no virtual address space allocated.\n");
164 return -EINVAL;
165 }
166
167 return 0;
168 }
169
170 /* Read function in ISP memory management */
load_and_flush_by_kmap(ia_css_ptr virt,void * data,unsigned int bytes)171 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
172 unsigned int bytes)
173 {
174 struct hmm_buffer_object *bo;
175 unsigned int idx, offset, len;
176 char *src, *des;
177 int ret;
178
179 bo = hmm_bo_device_search_in_range(&bo_device, virt);
180 ret = hmm_check_bo(bo, virt);
181 if (ret)
182 return ret;
183
184 des = (char *)data;
185 while (bytes) {
186 idx = (virt - bo->start) >> PAGE_SHIFT;
187 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
188
189 src = (char *)kmap_local_page(bo->pages[idx]) + offset;
190
191 if ((bytes + offset) >= PAGE_SIZE) {
192 len = PAGE_SIZE - offset;
193 bytes -= len;
194 } else {
195 len = bytes;
196 bytes = 0;
197 }
198
199 virt += len; /* update virt for next loop */
200
201 if (des) {
202 memcpy(des, src, len);
203 des += len;
204 }
205
206 clflush_cache_range(src, len);
207
208 kunmap_local(src);
209 }
210
211 return 0;
212 }
213
214 /* Read function in ISP memory management */
load_and_flush(ia_css_ptr virt,void * data,unsigned int bytes)215 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
216 {
217 struct hmm_buffer_object *bo;
218 int ret;
219
220 bo = hmm_bo_device_search_in_range(&bo_device, virt);
221 ret = hmm_check_bo(bo, virt);
222 if (ret)
223 return ret;
224
225 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
226 void *src = bo->vmap_addr;
227
228 src += (virt - bo->start);
229 memcpy(data, src, bytes);
230 if (bo->status & HMM_BO_VMAPED_CACHED)
231 clflush_cache_range(src, bytes);
232 } else {
233 void *vptr;
234
235 vptr = hmm_bo_vmap(bo, true);
236 if (!vptr)
237 return load_and_flush_by_kmap(virt, data, bytes);
238 else
239 vptr = vptr + (virt - bo->start);
240
241 memcpy(data, vptr, bytes);
242 clflush_cache_range(vptr, bytes);
243 hmm_bo_vunmap(bo);
244 }
245
246 return 0;
247 }
248
249 /* Read function in ISP memory management */
hmm_load(ia_css_ptr virt,void * data,unsigned int bytes)250 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
251 {
252 if (!virt) {
253 dev_warn(atomisp_dev,
254 "hmm_store: address is NULL\n");
255 return -EINVAL;
256 }
257 if (!data) {
258 dev_err(atomisp_dev,
259 "hmm_store: data is a NULL argument\n");
260 return -EINVAL;
261 }
262 return load_and_flush(virt, data, bytes);
263 }
264
265 /* Flush hmm data from the data cache */
hmm_flush(ia_css_ptr virt,unsigned int bytes)266 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
267 {
268 return load_and_flush(virt, NULL, bytes);
269 }
270
271 /* Write function in ISP memory management */
hmm_store(ia_css_ptr virt,const void * data,unsigned int bytes)272 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
273 {
274 struct hmm_buffer_object *bo;
275 unsigned int idx, offset, len;
276 char *src, *des;
277 int ret;
278
279 if (!virt) {
280 dev_warn(atomisp_dev,
281 "hmm_store: address is NULL\n");
282 return -EINVAL;
283 }
284 if (!data) {
285 dev_err(atomisp_dev,
286 "hmm_store: data is a NULL argument\n");
287 return -EINVAL;
288 }
289
290 bo = hmm_bo_device_search_in_range(&bo_device, virt);
291 ret = hmm_check_bo(bo, virt);
292 if (ret)
293 return ret;
294
295 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
296 void *dst = bo->vmap_addr;
297
298 dst += (virt - bo->start);
299 memcpy(dst, data, bytes);
300 if (bo->status & HMM_BO_VMAPED_CACHED)
301 clflush_cache_range(dst, bytes);
302 } else {
303 void *vptr;
304
305 vptr = hmm_bo_vmap(bo, true);
306 if (vptr) {
307 vptr = vptr + (virt - bo->start);
308
309 memcpy(vptr, data, bytes);
310 clflush_cache_range(vptr, bytes);
311 hmm_bo_vunmap(bo);
312 return 0;
313 }
314 }
315
316 src = (char *)data;
317 while (bytes) {
318 idx = (virt - bo->start) >> PAGE_SHIFT;
319 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
320
321 des = (char *)kmap_local_page(bo->pages[idx]);
322
323 if (!des) {
324 dev_err(atomisp_dev,
325 "kmap buffer object page failed: pg_idx = %d\n",
326 idx);
327 return -EINVAL;
328 }
329
330 des += offset;
331
332 if ((bytes + offset) >= PAGE_SIZE) {
333 len = PAGE_SIZE - offset;
334 bytes -= len;
335 } else {
336 len = bytes;
337 bytes = 0;
338 }
339
340 virt += len;
341
342 memcpy(des, src, len);
343
344 src += len;
345
346 clflush_cache_range(des, len);
347
348 kunmap_local(des);
349 }
350
351 return 0;
352 }
353
354 /* memset function in ISP memory management */
hmm_set(ia_css_ptr virt,int c,unsigned int bytes)355 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
356 {
357 struct hmm_buffer_object *bo;
358 unsigned int idx, offset, len;
359 char *des;
360 int ret;
361
362 bo = hmm_bo_device_search_in_range(&bo_device, virt);
363 ret = hmm_check_bo(bo, virt);
364 if (ret)
365 return ret;
366
367 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
368 void *dst = bo->vmap_addr;
369
370 dst += (virt - bo->start);
371 memset(dst, c, bytes);
372
373 if (bo->status & HMM_BO_VMAPED_CACHED)
374 clflush_cache_range(dst, bytes);
375 } else {
376 void *vptr;
377
378 vptr = hmm_bo_vmap(bo, true);
379 if (vptr) {
380 vptr = vptr + (virt - bo->start);
381 memset(vptr, c, bytes);
382 clflush_cache_range(vptr, bytes);
383 hmm_bo_vunmap(bo);
384 return 0;
385 }
386 }
387
388 while (bytes) {
389 idx = (virt - bo->start) >> PAGE_SHIFT;
390 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
391
392 des = (char *)kmap_local_page(bo->pages[idx]) + offset;
393
394 if ((bytes + offset) >= PAGE_SIZE) {
395 len = PAGE_SIZE - offset;
396 bytes -= len;
397 } else {
398 len = bytes;
399 bytes = 0;
400 }
401
402 virt += len;
403
404 memset(des, c, len);
405
406 clflush_cache_range(des, len);
407
408 kunmap_local(des);
409 }
410
411 return 0;
412 }
413
414 /* Virtual address to physical address convert */
hmm_virt_to_phys(ia_css_ptr virt)415 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
416 {
417 unsigned int idx, offset;
418 struct hmm_buffer_object *bo;
419
420 bo = hmm_bo_device_search_in_range(&bo_device, virt);
421 if (!bo) {
422 dev_err(atomisp_dev,
423 "can not find buffer object contains address 0x%x\n",
424 virt);
425 return -1;
426 }
427
428 idx = (virt - bo->start) >> PAGE_SHIFT;
429 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
430
431 return page_to_phys(bo->pages[idx]) + offset;
432 }
433
hmm_mmap(struct vm_area_struct * vma,ia_css_ptr virt)434 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
435 {
436 struct hmm_buffer_object *bo;
437
438 bo = hmm_bo_device_search_start(&bo_device, virt);
439 if (!bo) {
440 dev_err(atomisp_dev,
441 "can not find buffer object start with address 0x%x\n",
442 virt);
443 return -EINVAL;
444 }
445
446 return hmm_bo_mmap(vma, bo);
447 }
448
449 /* Map ISP virtual address into IA virtual address */
hmm_vmap(ia_css_ptr virt,bool cached)450 void *hmm_vmap(ia_css_ptr virt, bool cached)
451 {
452 struct hmm_buffer_object *bo;
453 void *ptr;
454
455 bo = hmm_bo_device_search_in_range(&bo_device, virt);
456 if (!bo) {
457 dev_err(atomisp_dev,
458 "can not find buffer object contains address 0x%x\n",
459 virt);
460 return NULL;
461 }
462
463 ptr = hmm_bo_vmap(bo, cached);
464 if (ptr)
465 return ptr + (virt - bo->start);
466 else
467 return NULL;
468 }
469
470 /* Flush the memory which is mapped as cached memory through hmm_vmap */
hmm_flush_vmap(ia_css_ptr virt)471 void hmm_flush_vmap(ia_css_ptr virt)
472 {
473 struct hmm_buffer_object *bo;
474
475 bo = hmm_bo_device_search_in_range(&bo_device, virt);
476 if (!bo) {
477 dev_warn(atomisp_dev,
478 "can not find buffer object contains address 0x%x\n",
479 virt);
480 return;
481 }
482
483 hmm_bo_flush_vmap(bo);
484 }
485
hmm_vunmap(ia_css_ptr virt)486 void hmm_vunmap(ia_css_ptr virt)
487 {
488 struct hmm_buffer_object *bo;
489
490 bo = hmm_bo_device_search_in_range(&bo_device, virt);
491 if (!bo) {
492 dev_warn(atomisp_dev,
493 "can not find buffer object contains address 0x%x\n",
494 virt);
495 return;
496 }
497
498 hmm_bo_vunmap(bo);
499 }
500