/linux-6.3-rc2/drivers/staging/media/ipu3/ |
A D | ipu3-mmu.c | 89 func(mmu); in call_if_imgu_is_powered() 217 if (!mmu) in __imgu_mmu_map() 346 if (!mmu) in __imgu_mmu_unmap() 432 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); in imgu_mmu_init() 433 if (!mmu) in imgu_mmu_init() 467 mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts)); in imgu_mmu_init() 468 if (!mmu->l2pts) in imgu_mmu_init() 472 mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval); in imgu_mmu_init() 473 if (!mmu->l1pt) in imgu_mmu_init() 493 kfree(mmu); in imgu_mmu_init() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
A D | base.c | 258 mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type; in nvkm_mmu_type() 259 mmu->type[mmu->type_nr].heap = heap; in nvkm_mmu_type() 260 mmu->type_nr++; in nvkm_mmu_type() 268 if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) { in nvkm_mmu_heap() 269 mmu->heap[mmu->heap_nr].type = type; in nvkm_mmu_heap() 270 mmu->heap[mmu->heap_nr].size = size; in nvkm_mmu_heap() 373 if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram) in nvkm_mmu_oneinit() 393 mmu->func->init(mmu); in nvkm_mmu_init() 406 return mmu; in nvkm_mmu_dtor() 421 mmu->func = func; in nvkm_mmu_ctor() [all …]
|
A D | Kbuild | 2 nvkm-y += nvkm/subdev/mmu/base.o 3 nvkm-y += nvkm/subdev/mmu/nv04.o 4 nvkm-y += nvkm/subdev/mmu/nv41.o 5 nvkm-y += nvkm/subdev/mmu/nv44.o 6 nvkm-y += nvkm/subdev/mmu/nv50.o 7 nvkm-y += nvkm/subdev/mmu/g84.o 8 nvkm-y += nvkm/subdev/mmu/mcp77.o 9 nvkm-y += nvkm/subdev/mmu/gf100.o 10 nvkm-y += nvkm/subdev/mmu/gk104.o 11 nvkm-y += nvkm/subdev/mmu/gk20a.o [all …]
|
A D | ummu.c | 35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local 59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local 79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local 89 type = mmu->type[index].type; in nvkm_ummu_type() 108 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_kind() local 116 if (mmu->func->kind) in nvkm_ummu_kind() 117 kind = mmu->func->kind(mmu, &count, &kind_inv); in nvkm_ummu_kind() 159 struct nvkm_mmu *mmu = device->mmu; in nvkm_ummu_new() local 164 if (mmu->func->kind) in nvkm_ummu_new() 165 mmu->func->kind(mmu, &kinds, &unused); in nvkm_ummu_new() [all …]
|
A D | umem.c | 72 struct nvkm_device *device = umem->mmu->subdev.device; in nvkm_umem_unmap() 90 struct nvkm_mmu *mmu = umem->mmu; in nvkm_umem_map() local 109 int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc, in nvkm_umem_map() 145 struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu; in nvkm_umem_new() local 161 if (type >= mmu->type_nr) in nvkm_umem_new() 167 umem->mmu = mmu; in nvkm_umem_new() 168 umem->type = mmu->type[type].type; in nvkm_umem_new() 172 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) { in nvkm_umem_new() 177 ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc, in nvkm_umem_new()
|
A D | mem.c | 33 struct nvkm_mmu *mmu; member 88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor() 147 struct device *dev = mmu->subdev.device->dev; in nvkm_mem_new_host() 157 if ( (mmu->type[type].type & NVKM_MEM_COHERENT) && in nvkm_mem_new_host() 158 !(mmu->type[type].type & NVKM_MEM_UNCACHED)) in nvkm_mem_new_host() 169 mem->mmu = mmu; in nvkm_mem_new_host() 199 if (mmu->dma_bits > 32) in nvkm_mem_new_host() 209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, in nvkm_mem_new_host() 230 if (mmu->type[type].type & NVKM_MEM_VRAM) { in nvkm_mem_new_type() 231 ret = mmu->func->mem.vram(mmu, type, page, size, in nvkm_mem_new_type() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/nouveau/nvif/ |
A D | mmu.c | 30 kfree(mmu->kind); in nvif_mmu_dtor() 31 kfree(mmu->type); in nvif_mmu_dtor() 32 kfree(mmu->heap); in nvif_mmu_dtor() 50 mmu->heap = NULL; in nvif_mmu_ctor() 51 mmu->type = NULL; in nvif_mmu_ctor() 52 mmu->kind = NULL; in nvif_mmu_ctor() 69 mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap), in nvif_mmu_ctor() 71 mmu->type = kmalloc_array(mmu->type_nr, sizeof(*mmu->type), in nvif_mmu_ctor() 73 if (ret = -ENOMEM, !mmu->heap || !mmu->type) in nvif_mmu_ctor() 76 mmu->kind = kmalloc_array(mmu->kind_nr, sizeof(*mmu->kind), in nvif_mmu_ctor() [all …]
|
A D | mem.c | 28 nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size, in nvif_mem_ctor_map() argument 31 int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type, in nvif_mem_ctor_map() 48 nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass, in nvif_mem_ctor_type() argument 72 ret = nvif_object_ctor(&mmu->object, name ? name : "nvifMem", 0, oclass, in nvif_mem_ctor_type() 75 mem->type = mmu->type[type].type; in nvif_mem_ctor_type() 88 nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type, in nvif_mem_ctor() argument 95 for (i = 0; ret && i < mmu->type_nr; i++) { in nvif_mem_ctor() 96 if ((mmu->type[i].type & type) == type) { in nvif_mem_ctor() 97 ret = nvif_mem_ctor_type(mmu, name, oclass, i, page, in nvif_mem_ctor()
|
/linux-6.3-rc2/drivers/staging/media/atomisp/pci/mmu/ |
A D | isp_mmu.c | 82 return mmu->driver->pte_to_phys(mmu, pte); in isp_pte_to_pgaddr() 88 unsigned int pte = mmu->driver->phys_to_pte(mmu, phys); in isp_pgaddr_to_pte_valid() 325 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { in mmu_map() 340 mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt); in mmu_map() 344 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); in mmu_map() 455 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { in mmu_unmap() 456 mmu_unmap_l1_pt_error(mmu, mmu->l1_pte); in mmu_unmap() 461 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); in mmu_unmap() 545 if (!mmu) in isp_mmu_exit() 548 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { in isp_mmu_exit() [all …]
|
/linux-6.3-rc2/drivers/iommu/ |
A D | ipmmu-vmsa.c | 152 return mmu->root == mmu; in ipmmu_is_root() 227 if (domain->mmu != domain->mmu->root) in ipmmu_ctx_write_all() 287 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable() local 332 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); in ipmmu_domain_allocate_context() 483 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_domain_irq() local 596 if (!mmu) { in ipmmu_attach_device() 605 domain->mmu = mmu; in ipmmu_attach_device() 614 } else if (domain->mmu != mmu) { in ipmmu_attach_device() 804 if (!mmu) in ipmmu_probe_device() 975 if (!mmu) { in ipmmu_probe() [all …]
|
/linux-6.3-rc2/drivers/staging/media/atomisp/include/mmu/ |
A D | isp_mmu.h | 100 void (*tlb_flush_range)(struct isp_mmu *mmu, 102 void (*tlb_flush_all)(struct isp_mmu *mmu); 120 #define ISP_PTE_VALID_MASK(mmu) \ argument 121 ((mmu)->driver->pte_valid_mask) 123 #define ISP_PTE_VALID(mmu, pte) \ argument 124 ((pte) & ISP_PTE_VALID_MASK(mmu)) 136 void isp_mmu_exit(struct isp_mmu *mmu); 156 if (mmu->driver && mmu->driver->tlb_flush_all) in isp_mmu_flush_tlb_all() 157 mmu->driver->tlb_flush_all(mmu); in isp_mmu_flush_tlb_all() 165 if (mmu->driver && mmu->driver->tlb_flush_range) in isp_mmu_flush_tlb_range() [all …]
|
/linux-6.3-rc2/arch/x86/kernel/ |
A D | paravirt.c | 313 .mmu.tlb_remove_table = 345 .mmu.pmd_val = PTE_IDENT, 346 .mmu.make_pmd = PTE_IDENT, 348 .mmu.pud_val = PTE_IDENT, 349 .mmu.make_pud = PTE_IDENT, 354 .mmu.p4d_val = PTE_IDENT, 355 .mmu.make_p4d = PTE_IDENT, 360 .mmu.pte_val = PTE_IDENT, 361 .mmu.pgd_val = PTE_IDENT, 363 .mmu.make_pte = PTE_IDENT, [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/panfrost/ |
A D | panfrost_mmu.c | 164 as = mmu->as; in panfrost_mmu_as_get() 210 mmu->as = as; in panfrost_mmu_as_get() 240 mmu->as = -1; in panfrost_mmu_reset() 276 if (mmu->as < 0) in panfrost_mmu_flush_range() 565 kfree(mmu); in panfrost_mmu_release_ctx() 577 return mmu; in panfrost_mmu_ctx_get() 610 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); in panfrost_mmu_ctx_create() 611 if (!mmu) in panfrost_mmu_ctx_create() 622 mmu->as = -1; in panfrost_mmu_ctx_create() 636 kfree(mmu); in panfrost_mmu_ctx_create() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/msm/ |
A D | msm_mmu.h | 13 void (*detach)(struct msm_mmu *mmu); 14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); 17 void (*destroy)(struct msm_mmu *mmu); 18 void (*resume_translation)(struct msm_mmu *mmu); 38 mmu->dev = dev; in msm_mmu_init() 39 mmu->funcs = funcs; in msm_mmu_init() 40 mmu->type = type; in msm_mmu_init() 49 mmu->arg = arg; in msm_mmu_set_fault_handler() 50 mmu->handler = handler; in msm_mmu_set_fault_handler() [all …]
|
A D | msm_iommu.c | 170 int msm_iommu_pagetable_params(struct msm_mmu *mmu, in msm_iommu_pagetable_params() argument 175 if (mmu->type != MSM_MMU_IOMMU_PAGETABLE) in msm_iommu_pagetable_params() 178 pagetable = to_pagetable(mmu); in msm_iommu_pagetable_params() 191 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_get_geometry() 327 static void msm_iommu_detach(struct msm_mmu *mmu) in msm_iommu_detach() argument 329 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_detach() 331 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach() 337 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_map() 352 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_unmap() 362 static void msm_iommu_destroy(struct msm_mmu *mmu) in msm_iommu_destroy() argument [all …]
|
A D | msm_gem_vma.c | 19 if (aspace->mmu) in msm_gem_address_space_destroy() 20 aspace->mmu->funcs->destroy(aspace->mmu); in msm_gem_address_space_destroy() 71 if (aspace->mmu) in msm_gem_purge_vma() 72 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma() 114 if (aspace && aspace->mmu) in msm_gem_map_vma() 115 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, in msm_gem_map_vma() 170 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, in msm_gem_address_space_create() argument 175 if (IS_ERR(mmu)) in msm_gem_address_space_create() 176 return ERR_CAST(mmu); in msm_gem_address_space_create() 184 aspace->mmu = mmu; in msm_gem_address_space_create()
|
A D | msm_gpummu.c | 24 static void msm_gpummu_detach(struct msm_mmu *mmu) in msm_gpummu_detach() argument 28 static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, in msm_gpummu_map() argument 31 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_map() 56 static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) in msm_gpummu_unmap() argument 58 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_unmap() 71 static void msm_gpummu_resume_translation(struct msm_mmu *mmu) in msm_gpummu_resume_translation() argument 75 static void msm_gpummu_destroy(struct msm_mmu *mmu) in msm_gpummu_destroy() argument 77 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_destroy() 79 dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, in msm_gpummu_destroy() 114 void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, in msm_gpummu_params() argument [all …]
|
/linux-6.3-rc2/arch/arc/mm/ |
A D | tlb.c | 134 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in local_flush_tlb_all() local 137 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all() 565 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in read_decode_mmu_bcr() local 589 mmu->ver = (tmp >> 24); in read_decode_mmu_bcr() 596 mmu->u_dtlb = mmu3->u_dtlb; in read_decode_mmu_bcr() 598 mmu->sasid = mmu3->sasid; in read_decode_mmu_bcr() 607 mmu->sasid = mmu4->sasid; in read_decode_mmu_bcr() 640 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in arc_mmu_init() local 707 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) argument 722 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in do_tlb_overlap_fault() local [all …]
|
/linux-6.3-rc2/drivers/accel/ivpu/ |
A D | ivpu_mmu.c | 269 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cdtab_alloc() local 284 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_alloc() local 304 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cmdq_alloc() local 323 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_evtq_alloc() local 474 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_reset() local 548 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_link_cd() local 589 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_invalidate_tlb() local 613 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cd_add() local 706 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_init() local 743 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_enable() local [all …]
|
/linux-6.3-rc2/arch/x86/include/asm/ |
A D | paravirt.h | 71 PVOP_VCALL0(mmu.flush_tlb_user); in __flush_tlb_local() 76 PVOP_VCALL0(mmu.flush_tlb_kernel); in __flush_tlb_global() 97 PVOP_VCALL1(mmu.exit_mmap, mm); in paravirt_arch_exit_mmap() 151 PVOP_VCALL1(mmu.write_cr2, x); in write_cr2() 162 PVOP_ALT_VCALL1(mmu.write_cr3, x, in write_cr3() 365 PVOP_VCALL1(mmu.release_pte, pfn); in paravirt_release_pte() 375 PVOP_VCALL1(mmu.release_pmd, pfn); in paravirt_release_pmd() 384 PVOP_VCALL1(mmu.release_pud, pfn); in paravirt_release_pud() 568 PVOP_VCALL0(mmu.lazy_mode.enter); in arch_enter_lazy_mmu_mode() 573 PVOP_VCALL0(mmu.lazy_mode.leave); in arch_leave_lazy_mmu_mode() [all …]
|
/linux-6.3-rc2/drivers/xen/ |
A D | grant-dma-iommu.c | 36 struct grant_dma_iommu_device *mmu; in grant_dma_iommu_probe() local 39 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); in grant_dma_iommu_probe() 40 if (!mmu) in grant_dma_iommu_probe() 43 mmu->dev = &pdev->dev; in grant_dma_iommu_probe() 45 ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev); in grant_dma_iommu_probe() 49 platform_set_drvdata(pdev, mmu); in grant_dma_iommu_probe() 56 struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev); in grant_dma_iommu_remove() local 59 iommu_device_unregister(&mmu->iommu); in grant_dma_iommu_remove()
|
/linux-6.3-rc2/arch/arm64/kvm/hyp/nvhe/ |
A D | tlb.c | 17 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, in __tlb_switch_to_guest() argument 42 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in __tlb_switch_to_guest() 58 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument 66 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa() 112 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) in __kvm_tlb_flush_vmid() argument 119 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid() 128 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) in __kvm_flush_cpu_context() argument 133 __tlb_switch_to_guest(mmu, &cxt); in __kvm_flush_cpu_context()
|
/linux-6.3-rc2/arch/arm64/kvm/hyp/vhe/ |
A D | tlb.c | 19 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, in __tlb_switch_to_guest() argument 56 __load_stage2(mmu, mmu->arch); in __tlb_switch_to_guest() 82 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument 90 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa() 114 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) in __kvm_tlb_flush_vmid() argument 121 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid() 130 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) in __kvm_flush_cpu_context() argument 135 __tlb_switch_to_guest(mmu, &cxt); in __kvm_flush_cpu_context()
|
/linux-6.3-rc2/drivers/gpu/drm/nouveau/include/nvif/ |
A D | mmu.h | 39 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument 42 if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) in nvif_mmu_kind_valid() 49 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument 52 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type() 53 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type()
|
/linux-6.3-rc2/arch/x86/kvm/mmu/ |
A D | paging_tmpl.h | 31 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument 45 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument 58 #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled) argument 115 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME() 159 if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && in FNAME() 198 struct kvm_mmu *mmu, in FNAME() 209 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME() 326 walker->level = mmu->cpu_role.base.level; in FNAME() 327 pte = mmu->get_guest_pgd(vcpu); in FNAME() 466 if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu))) in FNAME() [all …]
|