Lines Matching refs:iommu
114 struct iommu_device iommu; member
122 struct rk_iommu *iommu; member
345 static void rk_iommu_command(struct rk_iommu *iommu, u32 command) in rk_iommu_command() argument
349 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_command()
350 writel(command, iommu->bases[i] + RK_MMU_COMMAND); in rk_iommu_command()
357 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, in rk_iommu_zap_lines() argument
366 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_zap_lines()
370 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines()
374 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) in rk_iommu_is_stall_active() argument
379 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_stall_active()
380 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & in rk_iommu_is_stall_active()
386 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) in rk_iommu_is_paging_enabled() argument
391 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_paging_enabled()
392 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & in rk_iommu_is_paging_enabled()
398 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu) in rk_iommu_is_reset_done() argument
403 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_reset_done()
404 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0; in rk_iommu_is_reset_done()
409 static int rk_iommu_enable_stall(struct rk_iommu *iommu) in rk_iommu_enable_stall() argument
414 if (rk_iommu_is_stall_active(iommu)) in rk_iommu_enable_stall()
418 if (!rk_iommu_is_paging_enabled(iommu)) in rk_iommu_enable_stall()
421 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); in rk_iommu_enable_stall()
423 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, in rk_iommu_enable_stall()
427 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_enable_stall()
428 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", in rk_iommu_enable_stall()
429 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_enable_stall()
434 static int rk_iommu_disable_stall(struct rk_iommu *iommu) in rk_iommu_disable_stall() argument
439 if (!rk_iommu_is_stall_active(iommu)) in rk_iommu_disable_stall()
442 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); in rk_iommu_disable_stall()
444 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, in rk_iommu_disable_stall()
448 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_disable_stall()
449 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", in rk_iommu_disable_stall()
450 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_disable_stall()
455 static int rk_iommu_enable_paging(struct rk_iommu *iommu) in rk_iommu_enable_paging() argument
460 if (rk_iommu_is_paging_enabled(iommu)) in rk_iommu_enable_paging()
463 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); in rk_iommu_enable_paging()
465 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, in rk_iommu_enable_paging()
469 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_enable_paging()
470 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", in rk_iommu_enable_paging()
471 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_enable_paging()
476 static int rk_iommu_disable_paging(struct rk_iommu *iommu) in rk_iommu_disable_paging() argument
481 if (!rk_iommu_is_paging_enabled(iommu)) in rk_iommu_disable_paging()
484 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); in rk_iommu_disable_paging()
486 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, in rk_iommu_disable_paging()
490 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_disable_paging()
491 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", in rk_iommu_disable_paging()
492 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_disable_paging()
497 static int rk_iommu_force_reset(struct rk_iommu *iommu) in rk_iommu_force_reset() argument
503 if (iommu->reset_disabled) in rk_iommu_force_reset()
510 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_force_reset()
512 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); in rk_iommu_force_reset()
514 if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) { in rk_iommu_force_reset()
515 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); in rk_iommu_force_reset()
520 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); in rk_iommu_force_reset()
522 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, in rk_iommu_force_reset()
526 dev_err(iommu->dev, "FORCE_RESET command timed out\n"); in rk_iommu_force_reset()
560 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) in log_iova() argument
562 void __iomem *base = iommu->bases[index]; in log_iova()
599 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", in log_iova()
601 …dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa … in log_iova()
609 struct rk_iommu *iommu = dev_id; in rk_iommu_irq() local
616 err = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_irq()
620 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) in rk_iommu_irq()
623 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_irq()
624 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); in rk_iommu_irq()
629 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR); in rk_iommu_irq()
634 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); in rk_iommu_irq()
638 dev_err(iommu->dev, "Page fault at %pad of type %s\n", in rk_iommu_irq()
642 log_iova(iommu, i, iova); in rk_iommu_irq()
649 if (iommu->domain) in rk_iommu_irq()
650 report_iommu_fault(iommu->domain, iommu->dev, iova, in rk_iommu_irq()
653 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); in rk_iommu_irq()
655 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); in rk_iommu_irq()
656 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); in rk_iommu_irq()
660 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); in rk_iommu_irq()
663 dev_err(iommu->dev, "unexpected int_status: %#08x\n", in rk_iommu_irq()
666 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); in rk_iommu_irq()
669 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_irq()
672 pm_runtime_put(iommu->dev); in rk_iommu_irq()
713 struct rk_iommu *iommu; in rk_iommu_zap_iova() local
716 iommu = list_entry(pos, struct rk_iommu, node); in rk_iommu_zap_iova()
719 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_zap_iova()
723 WARN_ON(clk_bulk_enable(iommu->num_clocks, in rk_iommu_zap_iova()
724 iommu->clocks)); in rk_iommu_zap_iova()
725 rk_iommu_zap_lines(iommu, iova, size); in rk_iommu_zap_iova()
726 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_zap_iova()
727 pm_runtime_put(iommu->dev); in rk_iommu_zap_iova()
928 return data ? data->iommu : NULL; in rk_iommu_from_dev()
932 static void rk_iommu_disable(struct rk_iommu *iommu) in rk_iommu_disable() argument
937 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); in rk_iommu_disable()
938 rk_iommu_enable_stall(iommu); in rk_iommu_disable()
939 rk_iommu_disable_paging(iommu); in rk_iommu_disable()
940 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_disable()
941 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); in rk_iommu_disable()
942 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); in rk_iommu_disable()
944 rk_iommu_disable_stall(iommu); in rk_iommu_disable()
945 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_disable()
949 static int rk_iommu_enable(struct rk_iommu *iommu) in rk_iommu_enable() argument
951 struct iommu_domain *domain = iommu->domain; in rk_iommu_enable()
955 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); in rk_iommu_enable()
959 ret = rk_iommu_enable_stall(iommu); in rk_iommu_enable()
963 ret = rk_iommu_force_reset(iommu); in rk_iommu_enable()
967 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_enable()
968 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, in rk_iommu_enable()
970 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); in rk_iommu_enable()
971 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); in rk_iommu_enable()
974 ret = rk_iommu_enable_paging(iommu); in rk_iommu_enable()
977 rk_iommu_disable_stall(iommu); in rk_iommu_enable()
979 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_enable()
986 struct rk_iommu *iommu; in rk_iommu_detach_device() local
992 iommu = rk_iommu_from_dev(dev); in rk_iommu_detach_device()
993 if (!iommu) in rk_iommu_detach_device()
999 if (iommu->domain != domain) in rk_iommu_detach_device()
1002 iommu->domain = NULL; in rk_iommu_detach_device()
1005 list_del_init(&iommu->node); in rk_iommu_detach_device()
1008 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_detach_device()
1011 rk_iommu_disable(iommu); in rk_iommu_detach_device()
1012 pm_runtime_put(iommu->dev); in rk_iommu_detach_device()
1019 struct rk_iommu *iommu; in rk_iommu_attach_device() local
1028 iommu = rk_iommu_from_dev(dev); in rk_iommu_attach_device()
1029 if (!iommu) in rk_iommu_attach_device()
1035 if (iommu->domain == domain) in rk_iommu_attach_device()
1038 if (iommu->domain) in rk_iommu_attach_device()
1039 rk_iommu_detach_device(iommu->domain, dev); in rk_iommu_attach_device()
1041 iommu->domain = domain; in rk_iommu_attach_device()
1044 list_add_tail(&iommu->node, &rk_domain->iommus); in rk_iommu_attach_device()
1047 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_attach_device()
1051 ret = rk_iommu_enable(iommu); in rk_iommu_attach_device()
1053 rk_iommu_detach_device(iommu->domain, dev); in rk_iommu_attach_device()
1055 pm_runtime_put(iommu->dev); in rk_iommu_attach_device()
1136 struct rk_iommu *iommu; in rk_iommu_probe_device() local
1142 iommu = rk_iommu_from_dev(dev); in rk_iommu_probe_device()
1144 data->link = device_link_add(dev, iommu->dev, in rk_iommu_probe_device()
1147 return &iommu->iommu; in rk_iommu_probe_device()
1159 struct rk_iommu *iommu; in rk_iommu_device_group() local
1161 iommu = rk_iommu_from_dev(dev); in rk_iommu_device_group()
1163 return iommu_group_ref_get(iommu->group); in rk_iommu_device_group()
1178 data->iommu = platform_get_drvdata(iommu_dev); in rk_iommu_of_xlate()
1205 struct rk_iommu *iommu; in rk_iommu_probe() local
1211 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); in rk_iommu_probe()
1212 if (!iommu) in rk_iommu_probe()
1215 platform_set_drvdata(pdev, iommu); in rk_iommu_probe()
1216 iommu->dev = dev; in rk_iommu_probe()
1217 iommu->num_mmu = 0; in rk_iommu_probe()
1230 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), in rk_iommu_probe()
1232 if (!iommu->bases) in rk_iommu_probe()
1239 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); in rk_iommu_probe()
1240 if (IS_ERR(iommu->bases[i])) in rk_iommu_probe()
1242 iommu->num_mmu++; in rk_iommu_probe()
1244 if (iommu->num_mmu == 0) in rk_iommu_probe()
1245 return PTR_ERR(iommu->bases[0]); in rk_iommu_probe()
1247 iommu->num_irq = platform_irq_count(pdev); in rk_iommu_probe()
1248 if (iommu->num_irq < 0) in rk_iommu_probe()
1249 return iommu->num_irq; in rk_iommu_probe()
1251 iommu->reset_disabled = device_property_read_bool(dev, in rk_iommu_probe()
1254 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks); in rk_iommu_probe()
1255 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks, in rk_iommu_probe()
1256 sizeof(*iommu->clocks), GFP_KERNEL); in rk_iommu_probe()
1257 if (!iommu->clocks) in rk_iommu_probe()
1260 for (i = 0; i < iommu->num_clocks; ++i) in rk_iommu_probe()
1261 iommu->clocks[i].id = rk_iommu_clocks[i]; in rk_iommu_probe()
1268 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1270 iommu->num_clocks = 0; in rk_iommu_probe()
1274 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1278 iommu->group = iommu_group_alloc(); in rk_iommu_probe()
1279 if (IS_ERR(iommu->group)) { in rk_iommu_probe()
1280 err = PTR_ERR(iommu->group); in rk_iommu_probe()
1284 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); in rk_iommu_probe()
1288 err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev); in rk_iommu_probe()
1302 for (i = 0; i < iommu->num_irq; i++) { in rk_iommu_probe()
1308 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, in rk_iommu_probe()
1309 IRQF_SHARED, dev_name(dev), iommu); in rk_iommu_probe()
1320 iommu_device_sysfs_remove(&iommu->iommu); in rk_iommu_probe()
1322 iommu_group_put(iommu->group); in rk_iommu_probe()
1324 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1330 struct rk_iommu *iommu = platform_get_drvdata(pdev); in rk_iommu_shutdown() local
1333 for (i = 0; i < iommu->num_irq; i++) { in rk_iommu_shutdown()
1336 devm_free_irq(iommu->dev, irq, iommu); in rk_iommu_shutdown()
1344 struct rk_iommu *iommu = dev_get_drvdata(dev); in rk_iommu_suspend() local
1346 if (!iommu->domain) in rk_iommu_suspend()
1349 rk_iommu_disable(iommu); in rk_iommu_suspend()
1355 struct rk_iommu *iommu = dev_get_drvdata(dev); in rk_iommu_resume() local
1357 if (!iommu->domain) in rk_iommu_resume()
1360 return rk_iommu_enable(iommu); in rk_iommu_resume()