Lines Matching refs:smmu_domain

227 static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)  in arm_smmu_tlb_sync_context()  argument
229 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_sync_context()
232 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
233 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx), in arm_smmu_tlb_sync_context()
235 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
240 struct arm_smmu_domain *smmu_domain = cookie; in arm_smmu_tlb_inv_context_s1() local
246 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, in arm_smmu_tlb_inv_context_s1()
247 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid); in arm_smmu_tlb_inv_context_s1()
248 arm_smmu_tlb_sync_context(smmu_domain); in arm_smmu_tlb_inv_context_s1()
253 struct arm_smmu_domain *smmu_domain = cookie; in arm_smmu_tlb_inv_context_s2() local
254 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context_s2()
258 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_inv_context_s2()
265 struct arm_smmu_domain *smmu_domain = cookie; in arm_smmu_tlb_inv_range_s1() local
266 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s1()
267 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_range_s1()
293 struct arm_smmu_domain *smmu_domain = cookie; in arm_smmu_tlb_inv_range_s2() local
294 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s2()
295 int idx = smmu_domain->cfg.cbndx; in arm_smmu_tlb_inv_range_s2()
302 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_tlb_inv_range_s2()
313 struct arm_smmu_domain *smmu_domain = cookie; in arm_smmu_tlb_inv_walk_s1() local
314 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_walk_s1()
365 struct arm_smmu_domain *smmu_domain = cookie; in arm_smmu_tlb_add_page_s2_v1() local
366 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_add_page_s2_v1()
371 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_add_page_s2_v1()
397 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_context_fault() local
398 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_context_fault()
399 int idx = smmu_domain->cfg.cbndx; in arm_smmu_context_fault()
455 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, in arm_smmu_init_context_bank() argument
458 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_context_bank()
459 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; in arm_smmu_init_context_bank()
601 static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain, in arm_smmu_alloc_context_bank() argument
606 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start); in arm_smmu_alloc_context_bank()
620 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_init_domain_context() local
621 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_domain_context()
624 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
625 if (smmu_domain->smmu) in arm_smmu_init_domain_context()
629 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; in arm_smmu_init_domain_context()
630 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
653 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_init_domain_context()
655 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_init_domain_context()
670 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)) in arm_smmu_init_domain_context()
683 switch (smmu_domain->stage) { in arm_smmu_init_domain_context()
700 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; in arm_smmu_init_domain_context()
720 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; in arm_smmu_init_domain_context()
722 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; in arm_smmu_init_domain_context()
729 ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start); in arm_smmu_init_domain_context()
734 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
744 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) in arm_smmu_init_domain_context()
754 .tlb = smmu_domain->flush_ops, in arm_smmu_init_domain_context()
759 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev); in arm_smmu_init_domain_context()
764 if (smmu_domain->pgtbl_quirks) in arm_smmu_init_domain_context()
765 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks; in arm_smmu_init_domain_context()
767 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); in arm_smmu_init_domain_context()
786 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); in arm_smmu_init_domain_context()
808 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
811 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_init_domain_context()
816 smmu_domain->smmu = NULL; in arm_smmu_init_domain_context()
818 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
824 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_destroy_domain_context() local
825 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_destroy_domain_context()
826 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_destroy_domain_context()
848 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_destroy_domain_context()
856 struct arm_smmu_domain *smmu_domain; in arm_smmu_domain_alloc() local
868 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); in arm_smmu_domain_alloc()
869 if (!smmu_domain) in arm_smmu_domain_alloc()
872 mutex_init(&smmu_domain->init_mutex); in arm_smmu_domain_alloc()
873 spin_lock_init(&smmu_domain->cb_lock); in arm_smmu_domain_alloc()
875 return &smmu_domain->domain; in arm_smmu_domain_alloc()
880 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_domain_free() local
887 kfree(smmu_domain); in arm_smmu_domain_free()
1086 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, in arm_smmu_domain_add_master() argument
1090 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_add_master()
1092 u8 cbndx = smmu_domain->cfg.cbndx; in arm_smmu_domain_add_master()
1096 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) in arm_smmu_domain_add_master()
1115 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_attach_dev() local
1152 if (smmu_domain->smmu != smmu) { in arm_smmu_attach_dev()
1158 ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec); in arm_smmu_attach_dev()
1217 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_flush_iotlb_all() local
1218 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_flush_iotlb_all()
1220 if (smmu_domain->flush_ops) { in arm_smmu_flush_iotlb_all()
1222 smmu_domain->flush_ops->tlb_flush_all(smmu_domain); in arm_smmu_flush_iotlb_all()
1230 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_iotlb_sync() local
1231 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iotlb_sync()
1238 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iotlb_sync()
1239 arm_smmu_tlb_sync_context(smmu_domain); in arm_smmu_iotlb_sync()
1248 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_iova_to_phys_hard() local
1249 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iova_to_phys_hard()
1250 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_iova_to_phys_hard()
1251 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys_hard()
1264 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1274 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1283 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1300 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_iova_to_phys() local
1301 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys()
1306 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && in arm_smmu_iova_to_phys()
1307 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iova_to_phys()
1489 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_enable_nesting() local
1492 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()
1493 if (smmu_domain->smmu) in arm_smmu_enable_nesting()
1496 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; in arm_smmu_enable_nesting()
1497 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()
1505 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_set_pgtable_quirks() local
1508 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_set_pgtable_quirks()
1509 if (smmu_domain->smmu) in arm_smmu_set_pgtable_quirks()
1512 smmu_domain->pgtbl_quirks = quirks; in arm_smmu_set_pgtable_quirks()
1513 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_set_pgtable_quirks()