1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 - 2025 Intel Corporation
4 */
5
6 #include <asm/barrier.h>
7
8 #include <linux/align.h>
9 #include <linux/atomic.h>
10 #include <linux/bitops.h>
11 #include <linux/bits.h>
12 #include <linux/bug.h>
13 #include <linux/cacheflush.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/gfp.h>
18 #include <linux/iopoll.h>
19 #include <linux/iova.h>
20 #include <linux/math.h>
21 #include <linux/minmax.h>
22 #include <linux/pci.h>
23 #include <linux/pfn.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <linux/types.h>
27 #include <linux/vmalloc.h>
28
29 #include "ipu7.h"
30 #include "ipu7-dma.h"
31 #include "ipu7-mmu.h"
32 #include "ipu7-platform-regs.h"
33
34 #define ISP_PAGE_SHIFT 12
35 #define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT)
36 #define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1U))
37
38 #define ISP_L1PT_SHIFT 22
39 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1))
40
41 #define ISP_L2PT_SHIFT 12
42 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
43
44 #define ISP_L1PT_PTES 1024U
45 #define ISP_L2PT_PTES 1024U
46
47 #define ISP_PADDR_SHIFT 12
48
49 #define REG_L1_PHYS 0x0004 /* 27-bit pfn */
50 #define REG_INFO 0x0008
51
52 #define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
53
54 #define MMU_TLB_INVALIDATE_TIMEOUT 2000
55
mmu_irq_handler(struct ipu7_mmu * mmu)56 static __maybe_unused void mmu_irq_handler(struct ipu7_mmu *mmu)
57 {
58 unsigned int i;
59 u32 irq_cause;
60
61 for (i = 0; i < mmu->nr_mmus; i++) {
62 irq_cause = readl(mmu->mmu_hw[i].base + MMU_REG_IRQ_CAUSE);
63 pr_info("mmu %s irq_cause = 0x%x", mmu->mmu_hw[i].name,
64 irq_cause);
65 writel(0x1ffff, mmu->mmu_hw[i].base + MMU_REG_IRQ_CLEAR);
66 }
67 }
68
tlb_invalidate(struct ipu7_mmu * mmu)69 static void tlb_invalidate(struct ipu7_mmu *mmu)
70 {
71 unsigned long flags;
72 unsigned int i;
73 int ret;
74 u32 val;
75
76 spin_lock_irqsave(&mmu->ready_lock, flags);
77 if (!mmu->ready) {
78 spin_unlock_irqrestore(&mmu->ready_lock, flags);
79 return;
80 }
81
82 for (i = 0; i < mmu->nr_mmus; i++) {
83 writel(0xffffffffU, mmu->mmu_hw[i].base +
84 MMU_REG_INVALIDATE_0);
85
86 /* Need check with HW, use l1streams or l2streams */
87 if (mmu->mmu_hw[i].nr_l2streams > 32)
88 writel(0xffffffffU, mmu->mmu_hw[i].base +
89 MMU_REG_INVALIDATE_1);
90
91 /*
92 * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
93 * When the actual MMIO write reaches the IPU TLB Invalidate
94 * register, wmb() will force the TLB invalidate out if the CPU
95 * attempts to update the IOMMU page table (or sooner).
96 */
97 wmb();
98
99 /* wait invalidation done */
100 ret = readl_poll_timeout_atomic(mmu->mmu_hw[i].base +
101 MMU_REG_INVALIDATION_STATUS,
102 val, !(val & 0x1U), 500,
103 MMU_TLB_INVALIDATE_TIMEOUT);
104 if (ret)
105 dev_err(mmu->dev, "MMU[%u] TLB invalidate failed\n", i);
106 }
107
108 spin_unlock_irqrestore(&mmu->ready_lock, flags);
109 }
110
map_single(struct ipu7_mmu_info * mmu_info,void * ptr)111 static dma_addr_t map_single(struct ipu7_mmu_info *mmu_info, void *ptr)
112 {
113 dma_addr_t dma;
114
115 dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
116 if (dma_mapping_error(mmu_info->dev, dma))
117 return 0;
118
119 return dma;
120 }
121
get_dummy_page(struct ipu7_mmu_info * mmu_info)122 static int get_dummy_page(struct ipu7_mmu_info *mmu_info)
123 {
124 void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
125 dma_addr_t dma;
126
127 if (!pt)
128 return -ENOMEM;
129
130 dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt);
131
132 dma = map_single(mmu_info, pt);
133 if (!dma) {
134 dev_err(mmu_info->dev, "Failed to map dummy page\n");
135 goto err_free_page;
136 }
137
138 mmu_info->dummy_page = pt;
139 mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT;
140
141 return 0;
142
143 err_free_page:
144 free_page((unsigned long)pt);
145 return -ENOMEM;
146 }
147
free_dummy_page(struct ipu7_mmu_info * mmu_info)148 static void free_dummy_page(struct ipu7_mmu_info *mmu_info)
149 {
150 dma_unmap_single(mmu_info->dev,
151 TBL_PHYS_ADDR(mmu_info->dummy_page_pteval),
152 PAGE_SIZE, DMA_BIDIRECTIONAL);
153 free_page((unsigned long)mmu_info->dummy_page);
154 }
155
alloc_dummy_l2_pt(struct ipu7_mmu_info * mmu_info)156 static int alloc_dummy_l2_pt(struct ipu7_mmu_info *mmu_info)
157 {
158 u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
159 dma_addr_t dma;
160 unsigned int i;
161
162 if (!pt)
163 return -ENOMEM;
164
165 dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt);
166
167 dma = map_single(mmu_info, pt);
168 if (!dma) {
169 dev_err(mmu_info->dev, "Failed to map l2pt page\n");
170 goto err_free_page;
171 }
172
173 for (i = 0; i < ISP_L2PT_PTES; i++)
174 pt[i] = mmu_info->dummy_page_pteval;
175
176 mmu_info->dummy_l2_pt = pt;
177 mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT;
178
179 return 0;
180
181 err_free_page:
182 free_page((unsigned long)pt);
183 return -ENOMEM;
184 }
185
free_dummy_l2_pt(struct ipu7_mmu_info * mmu_info)186 static void free_dummy_l2_pt(struct ipu7_mmu_info *mmu_info)
187 {
188 dma_unmap_single(mmu_info->dev,
189 TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval),
190 PAGE_SIZE, DMA_BIDIRECTIONAL);
191 free_page((unsigned long)mmu_info->dummy_l2_pt);
192 }
193
alloc_l1_pt(struct ipu7_mmu_info * mmu_info)194 static u32 *alloc_l1_pt(struct ipu7_mmu_info *mmu_info)
195 {
196 u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
197 dma_addr_t dma;
198 unsigned int i;
199
200 if (!pt)
201 return NULL;
202
203 dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt);
204
205 for (i = 0; i < ISP_L1PT_PTES; i++)
206 pt[i] = mmu_info->dummy_l2_pteval;
207
208 dma = map_single(mmu_info, pt);
209 if (!dma) {
210 dev_err(mmu_info->dev, "Failed to map l1pt page\n");
211 goto err_free_page;
212 }
213
214 mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
215 dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma);
216
217 return pt;
218
219 err_free_page:
220 free_page((unsigned long)pt);
221 return NULL;
222 }
223
alloc_l2_pt(struct ipu7_mmu_info * mmu_info)224 static u32 *alloc_l2_pt(struct ipu7_mmu_info *mmu_info)
225 {
226 u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
227 unsigned int i;
228
229 if (!pt)
230 return NULL;
231
232 dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt);
233
234 for (i = 0; i < ISP_L1PT_PTES; i++)
235 pt[i] = mmu_info->dummy_page_pteval;
236
237 return pt;
238 }
239
l2_unmap(struct ipu7_mmu_info * mmu_info,unsigned long iova,phys_addr_t dummy,size_t size)240 static void l2_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova,
241 phys_addr_t dummy, size_t size)
242 {
243 unsigned int l2_entries;
244 unsigned int l2_idx;
245 unsigned long flags;
246 u32 l1_idx;
247 u32 *l2_pt;
248
249 spin_lock_irqsave(&mmu_info->lock, flags);
250 for (l1_idx = iova >> ISP_L1PT_SHIFT;
251 size > 0U && l1_idx < ISP_L1PT_PTES; l1_idx++) {
252 dev_dbg(mmu_info->dev,
253 "unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n",
254 l1_idx, iova);
255
256 if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
257 dev_err(mmu_info->dev,
258 "unmap not mapped iova 0x%8.8lx l1 index %u\n",
259 iova, l1_idx);
260 continue;
261 }
262 l2_pt = mmu_info->l2_pts[l1_idx];
263
264 l2_entries = 0;
265 for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
266 size > 0U && l2_idx < ISP_L2PT_PTES; l2_idx++) {
267 phys_addr_t pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
268
269 dev_dbg(mmu_info->dev,
270 "unmap l2 index %u with pteval 0x%p\n",
271 l2_idx, &pteval);
272 l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
273
274 iova += ISP_PAGE_SIZE;
275 size -= ISP_PAGE_SIZE;
276
277 l2_entries++;
278 }
279
280 WARN_ON_ONCE(!l2_entries);
281 clflush_cache_range(&l2_pt[l2_idx - l2_entries],
282 sizeof(l2_pt[0]) * l2_entries);
283 }
284
285 WARN_ON_ONCE(size);
286 spin_unlock_irqrestore(&mmu_info->lock, flags);
287 }
288
l2_map(struct ipu7_mmu_info * mmu_info,unsigned long iova,phys_addr_t paddr,size_t size)289 static int l2_map(struct ipu7_mmu_info *mmu_info, unsigned long iova,
290 phys_addr_t paddr, size_t size)
291 {
292 struct device *dev = mmu_info->dev;
293 unsigned int l2_entries;
294 u32 *l2_pt, *l2_virt;
295 unsigned int l2_idx;
296 unsigned long flags;
297 size_t mapped = 0;
298 dma_addr_t dma;
299 u32 l1_entry;
300 u32 l1_idx;
301 int err = 0;
302
303 spin_lock_irqsave(&mmu_info->lock, flags);
304
305 paddr = ALIGN(paddr, ISP_PAGE_SIZE);
306 for (l1_idx = iova >> ISP_L1PT_SHIFT;
307 size && l1_idx < ISP_L1PT_PTES; l1_idx++) {
308 dev_dbg(dev,
309 "mapping l2 page table for l1 index %u (iova %8.8x)\n",
310 l1_idx, (u32)iova);
311
312 l1_entry = mmu_info->l1_pt[l1_idx];
313 if (l1_entry == mmu_info->dummy_l2_pteval) {
314 l2_virt = mmu_info->l2_pts[l1_idx];
315 if (likely(!l2_virt)) {
316 l2_virt = alloc_l2_pt(mmu_info);
317 if (!l2_virt) {
318 err = -ENOMEM;
319 goto error;
320 }
321 }
322
323 dma = map_single(mmu_info, l2_virt);
324 if (!dma) {
325 dev_err(dev, "Failed to map l2pt page\n");
326 free_page((unsigned long)l2_virt);
327 err = -EINVAL;
328 goto error;
329 }
330
331 l1_entry = dma >> ISP_PADDR_SHIFT;
332
333 dev_dbg(dev, "page for l1_idx %u %p allocated\n",
334 l1_idx, l2_virt);
335 mmu_info->l1_pt[l1_idx] = l1_entry;
336 mmu_info->l2_pts[l1_idx] = l2_virt;
337
338 clflush_cache_range(&mmu_info->l1_pt[l1_idx],
339 sizeof(mmu_info->l1_pt[l1_idx]));
340 }
341
342 l2_pt = mmu_info->l2_pts[l1_idx];
343 l2_entries = 0;
344
345 for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
346 size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
347 l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
348
349 dev_dbg(dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx,
350 l2_pt[l2_idx]);
351
352 iova += ISP_PAGE_SIZE;
353 paddr += ISP_PAGE_SIZE;
354 mapped += ISP_PAGE_SIZE;
355 size -= ISP_PAGE_SIZE;
356
357 l2_entries++;
358 }
359
360 WARN_ON_ONCE(!l2_entries);
361 clflush_cache_range(&l2_pt[l2_idx - l2_entries],
362 sizeof(l2_pt[0]) * l2_entries);
363 }
364
365 spin_unlock_irqrestore(&mmu_info->lock, flags);
366
367 return 0;
368
369 error:
370 spin_unlock_irqrestore(&mmu_info->lock, flags);
371 /* unroll mapping in case something went wrong */
372 if (mapped)
373 l2_unmap(mmu_info, iova - mapped, paddr - mapped, mapped);
374
375 return err;
376 }
377
__ipu7_mmu_map(struct ipu7_mmu_info * mmu_info,unsigned long iova,phys_addr_t paddr,size_t size)378 static int __ipu7_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova,
379 phys_addr_t paddr, size_t size)
380 {
381 u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
382 u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
383
384 dev_dbg(mmu_info->dev,
385 "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n",
386 iova_start, iova_end, size, &paddr);
387
388 return l2_map(mmu_info, iova_start, paddr, size);
389 }
390
__ipu7_mmu_unmap(struct ipu7_mmu_info * mmu_info,unsigned long iova,size_t size)391 static void __ipu7_mmu_unmap(struct ipu7_mmu_info *mmu_info,
392 unsigned long iova, size_t size)
393 {
394 l2_unmap(mmu_info, iova, 0, size);
395 }
396
allocate_trash_buffer(struct ipu7_mmu * mmu)397 static int allocate_trash_buffer(struct ipu7_mmu *mmu)
398 {
399 unsigned int n_pages = PFN_UP(IPU_MMUV2_TRASH_RANGE);
400 unsigned long iova_addr;
401 struct iova *iova;
402 unsigned int i;
403 dma_addr_t dma;
404 int ret;
405
406 /* Allocate 8MB in iova range */
407 iova = alloc_iova(&mmu->dmap->iovad, n_pages,
408 PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
409 if (!iova) {
410 dev_err(mmu->dev, "cannot allocate iova range for trash\n");
411 return -ENOMEM;
412 }
413
414 dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0,
415 PAGE_SIZE, DMA_BIDIRECTIONAL);
416 if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) {
417 dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n");
418 ret = -ENOMEM;
419 goto out_free_iova;
420 }
421
422 mmu->pci_trash_page = dma;
423
424 /*
425 * Map the 8MB iova address range to the same physical trash page
426 * mmu->trash_page which is already reserved at the probe
427 */
428 iova_addr = iova->pfn_lo;
429 for (i = 0; i < n_pages; i++) {
430 ret = ipu7_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
431 mmu->pci_trash_page, PAGE_SIZE);
432 if (ret) {
433 dev_err(mmu->dev,
434 "mapping trash buffer range failed\n");
435 goto out_unmap;
436 }
437
438 iova_addr++;
439 }
440
441 mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo);
442 dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
443 mmu->mmid, (unsigned int)mmu->iova_trash_page);
444 return 0;
445
446 out_unmap:
447 ipu7_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
448 PFN_PHYS(iova_size(iova)));
449 dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page,
450 PAGE_SIZE, DMA_BIDIRECTIONAL);
451 out_free_iova:
452 __free_iova(&mmu->dmap->iovad, iova);
453 return ret;
454 }
455
__mmu_at_init(struct ipu7_mmu * mmu)456 static void __mmu_at_init(struct ipu7_mmu *mmu)
457 {
458 struct ipu7_mmu_info *mmu_info;
459 unsigned int i;
460
461 mmu_info = mmu->dmap->mmu_info;
462 for (i = 0; i < mmu->nr_mmus; i++) {
463 struct ipu7_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
464 unsigned int j;
465
466 /* Write page table address per MMU */
467 writel((phys_addr_t)mmu_info->l1_pt_dma,
468 mmu_hw->base + MMU_REG_PAGE_TABLE_BASE_ADDR);
469 dev_dbg(mmu->dev, "mmu %s base was set as %x\n", mmu_hw->name,
470 readl(mmu_hw->base + MMU_REG_PAGE_TABLE_BASE_ADDR));
471
472 /* Set info bits and axi_refill per MMU */
473 writel(mmu_hw->info_bits,
474 mmu_hw->base + MMU_REG_USER_INFO_BITS);
475 writel(mmu_hw->refill, mmu_hw->base + MMU_REG_AXI_REFILL_IF_ID);
476 writel(mmu_hw->collapse_en_bitmap,
477 mmu_hw->base + MMU_REG_COLLAPSE_ENABLE_BITMAP);
478
479 dev_dbg(mmu->dev, "mmu %s info_bits was set as %x\n",
480 mmu_hw->name,
481 readl(mmu_hw->base + MMU_REG_USER_INFO_BITS));
482
483 if (mmu_hw->at_sp_arb_cfg)
484 writel(mmu_hw->at_sp_arb_cfg,
485 mmu_hw->base + MMU_REG_AT_SP_ARB_CFG);
486
487 /* default irq configuration */
488 writel(0x3ff, mmu_hw->base + MMU_REG_IRQ_MASK);
489 writel(0x3ff, mmu_hw->base + MMU_REG_IRQ_ENABLE);
490
491 /* Configure MMU TLB stream configuration for L1/L2 */
492 for (j = 0; j < mmu_hw->nr_l1streams; j++) {
493 writel(mmu_hw->l1_block_sz[j], mmu_hw->base +
494 mmu_hw->l1_block + 4U * j);
495 }
496
497 for (j = 0; j < mmu_hw->nr_l2streams; j++) {
498 writel(mmu_hw->l2_block_sz[j], mmu_hw->base +
499 mmu_hw->l2_block + 4U * j);
500 }
501
502 for (j = 0; j < mmu_hw->uao_p_num; j++) {
503 if (!mmu_hw->uao_p2tlb[j])
504 continue;
505 writel(mmu_hw->uao_p2tlb[j], mmu_hw->uao_base + 4U * j);
506 }
507 }
508 }
509
__mmu_zlx_init(struct ipu7_mmu * mmu)510 static void __mmu_zlx_init(struct ipu7_mmu *mmu)
511 {
512 unsigned int i;
513
514 dev_dbg(mmu->dev, "mmu zlx init\n");
515
516 for (i = 0; i < mmu->nr_mmus; i++) {
517 struct ipu7_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
518 unsigned int j;
519
520 dev_dbg(mmu->dev, "mmu %s zlx init\n", mmu_hw->name);
521 for (j = 0; j < IPU_ZLX_POOL_NUM; j++) {
522 if (!mmu_hw->zlx_axi_pool[j])
523 continue;
524 writel(mmu_hw->zlx_axi_pool[j],
525 mmu_hw->zlx_base + ZLX_REG_AXI_POOL + j * 0x4U);
526 }
527
528 for (j = 0; j < mmu_hw->zlx_nr; j++) {
529 if (!mmu_hw->zlx_conf[j])
530 continue;
531
532 writel(mmu_hw->zlx_conf[j],
533 mmu_hw->zlx_base + ZLX_REG_CONF + j * 0x8U);
534 }
535
536 for (j = 0; j < mmu_hw->zlx_nr; j++) {
537 if (!mmu_hw->zlx_en[j])
538 continue;
539
540 writel(mmu_hw->zlx_en[j],
541 mmu_hw->zlx_base + ZLX_REG_EN + j * 0x8U);
542 }
543 }
544 }
545
ipu7_mmu_hw_init(struct ipu7_mmu * mmu)546 int ipu7_mmu_hw_init(struct ipu7_mmu *mmu)
547 {
548 unsigned long flags;
549
550 dev_dbg(mmu->dev, "IPU mmu hardware init\n");
551
552 /* Initialise the each MMU and ZLX */
553 __mmu_at_init(mmu);
554 __mmu_zlx_init(mmu);
555
556 if (!mmu->trash_page) {
557 int ret;
558
559 mmu->trash_page = alloc_page(GFP_KERNEL);
560 if (!mmu->trash_page) {
561 dev_err(mmu->dev, "insufficient memory for trash buffer\n");
562 return -ENOMEM;
563 }
564
565 ret = allocate_trash_buffer(mmu);
566 if (ret) {
567 __free_page(mmu->trash_page);
568 mmu->trash_page = NULL;
569 dev_err(mmu->dev, "trash buffer allocation failed\n");
570 return ret;
571 }
572 }
573
574 spin_lock_irqsave(&mmu->ready_lock, flags);
575 mmu->ready = true;
576 spin_unlock_irqrestore(&mmu->ready_lock, flags);
577
578 return 0;
579 }
580 EXPORT_SYMBOL_NS_GPL(ipu7_mmu_hw_init, "INTEL_IPU7");
581
ipu7_mmu_alloc(struct ipu7_device * isp)582 static struct ipu7_mmu_info *ipu7_mmu_alloc(struct ipu7_device *isp)
583 {
584 struct ipu7_mmu_info *mmu_info;
585 int ret;
586
587 mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
588 if (!mmu_info)
589 return NULL;
590
591 if (isp->secure_mode) {
592 mmu_info->aperture_start = IPU_FW_CODE_REGION_END;
593 mmu_info->aperture_end =
594 (dma_addr_t)DMA_BIT_MASK(IPU_MMU_ADDR_BITS);
595 } else {
596 mmu_info->aperture_start = IPU_FW_CODE_REGION_START;
597 mmu_info->aperture_end =
598 (dma_addr_t)DMA_BIT_MASK(IPU_MMU_ADDR_BITS_NON_SECURE);
599 }
600
601 mmu_info->pgsize_bitmap = SZ_4K;
602 mmu_info->dev = &isp->pdev->dev;
603
604 ret = get_dummy_page(mmu_info);
605 if (ret)
606 goto err_free_info;
607
608 ret = alloc_dummy_l2_pt(mmu_info);
609 if (ret)
610 goto err_free_dummy_page;
611
612 mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts));
613 if (!mmu_info->l2_pts)
614 goto err_free_dummy_l2_pt;
615
616 /*
617 * We always map the L1 page table (a single page as well as
618 * the L2 page tables).
619 */
620 mmu_info->l1_pt = alloc_l1_pt(mmu_info);
621 if (!mmu_info->l1_pt)
622 goto err_free_l2_pts;
623
624 spin_lock_init(&mmu_info->lock);
625
626 dev_dbg(mmu_info->dev, "domain initialised\n");
627
628 return mmu_info;
629
630 err_free_l2_pts:
631 vfree(mmu_info->l2_pts);
632 err_free_dummy_l2_pt:
633 free_dummy_l2_pt(mmu_info);
634 err_free_dummy_page:
635 free_dummy_page(mmu_info);
636 err_free_info:
637 kfree(mmu_info);
638
639 return NULL;
640 }
641
ipu7_mmu_hw_cleanup(struct ipu7_mmu * mmu)642 void ipu7_mmu_hw_cleanup(struct ipu7_mmu *mmu)
643 {
644 unsigned long flags;
645
646 spin_lock_irqsave(&mmu->ready_lock, flags);
647 mmu->ready = false;
648 spin_unlock_irqrestore(&mmu->ready_lock, flags);
649 }
650 EXPORT_SYMBOL_NS_GPL(ipu7_mmu_hw_cleanup, "INTEL_IPU7");
651
alloc_dma_mapping(struct ipu7_device * isp)652 static struct ipu7_dma_mapping *alloc_dma_mapping(struct ipu7_device *isp)
653 {
654 struct ipu7_dma_mapping *dmap;
655 unsigned long base_pfn;
656
657 dmap = kzalloc(sizeof(*dmap), GFP_KERNEL);
658 if (!dmap)
659 return NULL;
660
661 dmap->mmu_info = ipu7_mmu_alloc(isp);
662 if (!dmap->mmu_info) {
663 kfree(dmap);
664 return NULL;
665 }
666
667 /* 0~64M is forbidden for uctile controller */
668 base_pfn = max_t(unsigned long, 1,
669 PFN_DOWN(dmap->mmu_info->aperture_start));
670 init_iova_domain(&dmap->iovad, SZ_4K, base_pfn);
671 dmap->mmu_info->dmap = dmap;
672
673 dev_dbg(&isp->pdev->dev, "alloc mapping\n");
674
675 iova_cache_get();
676
677 return dmap;
678 }
679
ipu7_mmu_iova_to_phys(struct ipu7_mmu_info * mmu_info,dma_addr_t iova)680 phys_addr_t ipu7_mmu_iova_to_phys(struct ipu7_mmu_info *mmu_info,
681 dma_addr_t iova)
682 {
683 phys_addr_t phy_addr;
684 unsigned long flags;
685 u32 *l2_pt;
686
687 spin_lock_irqsave(&mmu_info->lock, flags);
688 l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT];
689 phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT];
690 phy_addr <<= ISP_PAGE_SHIFT;
691 spin_unlock_irqrestore(&mmu_info->lock, flags);
692
693 return phy_addr;
694 }
695
ipu7_mmu_unmap(struct ipu7_mmu_info * mmu_info,unsigned long iova,size_t size)696 void ipu7_mmu_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova,
697 size_t size)
698 {
699 unsigned int min_pagesz;
700
701 dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size);
702
703 /* find out the minimum page size supported */
704 min_pagesz = 1U << __ffs(mmu_info->pgsize_bitmap);
705
706 /*
707 * The virtual address and the size of the mapping must be
708 * aligned (at least) to the size of the smallest page supported
709 * by the hardware
710 */
711 if (!IS_ALIGNED(iova | size, min_pagesz)) {
712 dev_err(mmu_info->dev,
713 "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
714 iova, size, min_pagesz);
715 return;
716 }
717
718 __ipu7_mmu_unmap(mmu_info, iova, size);
719 }
720
ipu7_mmu_map(struct ipu7_mmu_info * mmu_info,unsigned long iova,phys_addr_t paddr,size_t size)721 int ipu7_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova,
722 phys_addr_t paddr, size_t size)
723 {
724 unsigned int min_pagesz;
725
726 if (mmu_info->pgsize_bitmap == 0UL)
727 return -ENODEV;
728
729 /* find out the minimum page size supported */
730 min_pagesz = 1U << __ffs(mmu_info->pgsize_bitmap);
731
732 /*
733 * both the virtual address and the physical one, as well as
734 * the size of the mapping, must be aligned (at least) to the
735 * size of the smallest page supported by the hardware
736 */
737 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
738 dev_err(mmu_info->dev,
739 "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n",
740 iova, &paddr, size, min_pagesz);
741 return -EINVAL;
742 }
743
744 dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
745 iova, &paddr, size);
746
747 return __ipu7_mmu_map(mmu_info, iova, paddr, size);
748 }
749
ipu7_mmu_destroy(struct ipu7_mmu * mmu)750 static void ipu7_mmu_destroy(struct ipu7_mmu *mmu)
751 {
752 struct ipu7_dma_mapping *dmap = mmu->dmap;
753 struct ipu7_mmu_info *mmu_info = dmap->mmu_info;
754 struct iova *iova;
755 u32 l1_idx;
756
757 if (mmu->iova_trash_page) {
758 iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page));
759 if (iova) {
760 /* unmap and free the trash buffer iova */
761 ipu7_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo),
762 PFN_PHYS(iova_size(iova)));
763 __free_iova(&dmap->iovad, iova);
764 } else {
765 dev_err(mmu->dev, "trash buffer iova not found.\n");
766 }
767
768 mmu->iova_trash_page = 0;
769 dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
770 PAGE_SIZE, DMA_BIDIRECTIONAL);
771 mmu->pci_trash_page = 0;
772 __free_page(mmu->trash_page);
773 }
774
775 for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
776 if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) {
777 dma_unmap_single(mmu_info->dev,
778 TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]),
779 PAGE_SIZE, DMA_BIDIRECTIONAL);
780 free_page((unsigned long)mmu_info->l2_pts[l1_idx]);
781 }
782 }
783
784 vfree(mmu_info->l2_pts);
785 free_dummy_page(mmu_info);
786 dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma),
787 PAGE_SIZE, DMA_BIDIRECTIONAL);
788 free_page((unsigned long)mmu_info->dummy_l2_pt);
789 free_page((unsigned long)mmu_info->l1_pt);
790 kfree(mmu_info);
791 }
792
ipu7_mmu_init(struct device * dev,void __iomem * base,int mmid,const struct ipu7_hw_variants * hw)793 struct ipu7_mmu *ipu7_mmu_init(struct device *dev,
794 void __iomem *base, int mmid,
795 const struct ipu7_hw_variants *hw)
796 {
797 struct ipu7_device *isp = pci_get_drvdata(to_pci_dev(dev));
798 struct ipu7_mmu_pdata *pdata;
799 struct ipu7_mmu *mmu;
800 unsigned int i;
801
802 if (hw->nr_mmus > IPU_MMU_MAX_NUM)
803 return ERR_PTR(-EINVAL);
804
805 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
806 if (!pdata)
807 return ERR_PTR(-ENOMEM);
808
809 for (i = 0; i < hw->nr_mmus; i++) {
810 struct ipu7_mmu_hw *pdata_mmu = &pdata->mmu_hw[i];
811 const struct ipu7_mmu_hw *src_mmu = &hw->mmu_hw[i];
812
813 if (src_mmu->nr_l1streams > IPU_MMU_MAX_TLB_L1_STREAMS ||
814 src_mmu->nr_l2streams > IPU_MMU_MAX_TLB_L2_STREAMS)
815 return ERR_PTR(-EINVAL);
816
817 *pdata_mmu = *src_mmu;
818 pdata_mmu->base = base + src_mmu->offset;
819 pdata_mmu->zlx_base = base + src_mmu->zlx_offset;
820 pdata_mmu->uao_base = base + src_mmu->uao_offset;
821 }
822
823 mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
824 if (!mmu)
825 return ERR_PTR(-ENOMEM);
826
827 mmu->mmid = mmid;
828 mmu->mmu_hw = pdata->mmu_hw;
829 mmu->nr_mmus = hw->nr_mmus;
830 mmu->tlb_invalidate = tlb_invalidate;
831 mmu->ready = false;
832 INIT_LIST_HEAD(&mmu->vma_list);
833 spin_lock_init(&mmu->ready_lock);
834
835 mmu->dmap = alloc_dma_mapping(isp);
836 if (!mmu->dmap) {
837 dev_err(dev, "can't alloc dma mapping\n");
838 return ERR_PTR(-ENOMEM);
839 }
840
841 return mmu;
842 }
843
ipu7_mmu_cleanup(struct ipu7_mmu * mmu)844 void ipu7_mmu_cleanup(struct ipu7_mmu *mmu)
845 {
846 struct ipu7_dma_mapping *dmap = mmu->dmap;
847
848 ipu7_mmu_destroy(mmu);
849 mmu->dmap = NULL;
850 iova_cache_put();
851 put_iova_domain(&dmap->iovad);
852 kfree(dmap);
853 }
854