Lines Matching refs:xe

76 	struct xe_device *xe = to_xe_device(dev);  in xe_file_open()  local
94 xef->xe = xe; in xe_file_open()
157 struct xe_device *xe = to_xe_device(dev); in xe_file_close() local
163 xe_pm_runtime_get(xe); in xe_file_close()
182 xe_pm_runtime_put(xe); in xe_file_close()
208 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_ioctl() local
211 if (xe_device_wedged(xe)) in xe_drm_ioctl()
214 ret = xe_pm_runtime_get_ioctl(xe); in xe_drm_ioctl()
217 xe_pm_runtime_put(xe); in xe_drm_ioctl()
226 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_compat_ioctl() local
229 if (xe_device_wedged(xe)) in xe_drm_compat_ioctl()
232 ret = xe_pm_runtime_get_ioctl(xe); in xe_drm_compat_ioctl()
235 xe_pm_runtime_put(xe); in xe_drm_compat_ioctl()
310 struct xe_device *xe = to_xe_device(dev); in xe_pci_barrier_mmap() local
312 if (!IS_DGFX(xe)) in xe_pci_barrier_mmap()
395 struct xe_device *xe = to_xe_device(dev); in xe_device_destroy() local
397 xe_bo_dev_fini(&xe->bo_device); in xe_device_destroy()
399 if (xe->preempt_fence_wq) in xe_device_destroy()
400 destroy_workqueue(xe->preempt_fence_wq); in xe_device_destroy()
402 if (xe->ordered_wq) in xe_device_destroy()
403 destroy_workqueue(xe->ordered_wq); in xe_device_destroy()
405 if (xe->unordered_wq) in xe_device_destroy()
406 destroy_workqueue(xe->unordered_wq); in xe_device_destroy()
408 if (xe->destroy_wq) in xe_device_destroy()
409 destroy_workqueue(xe->destroy_wq); in xe_device_destroy()
411 ttm_device_fini(&xe->ttm); in xe_device_destroy()
417 struct xe_device *xe; in xe_device_create() local
426 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); in xe_device_create()
427 if (IS_ERR(xe)) in xe_device_create()
428 return xe; in xe_device_create()
430 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, in xe_device_create()
431 xe->drm.anon_inode->i_mapping, in xe_device_create()
432 xe->drm.vma_offset_manager, false, false); in xe_device_create()
436 xe_bo_dev_init(&xe->bo_device); in xe_device_create()
437 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); in xe_device_create()
441 err = xe_shrinker_create(xe); in xe_device_create()
445 xe->info.devid = pdev->device; in xe_device_create()
446 xe->info.revid = pdev->revision; in xe_device_create()
447 xe->info.force_execlist = xe_modparam.force_execlist; in xe_device_create()
448 xe->atomic_svm_timeslice_ms = 5; in xe_device_create()
450 err = xe_irq_init(xe); in xe_device_create()
454 init_waitqueue_head(&xe->ufence_wq); in xe_device_create()
456 init_rwsem(&xe->usm.lock); in xe_device_create()
458 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); in xe_device_create()
465 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, in xe_device_create()
467 &xe->usm.next_asid, GFP_KERNEL); in xe_device_create()
468 drm_WARN_ON(&xe->drm, err); in xe_device_create()
470 xa_erase(&xe->usm.asid_to_vm, asid); in xe_device_create()
473 err = xe_bo_pinned_init(xe); in xe_device_create()
477 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", in xe_device_create()
479 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); in xe_device_create()
480 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); in xe_device_create()
481 xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); in xe_device_create()
482 if (!xe->ordered_wq || !xe->unordered_wq || in xe_device_create()
483 !xe->preempt_fence_wq || !xe->destroy_wq) { in xe_device_create()
488 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); in xe_device_create()
493 err = drmm_mutex_init(&xe->drm, &xe->pmt.lock); in xe_device_create()
497 return xe; in xe_device_create()
504 static bool xe_driver_flr_disabled(struct xe_device *xe) in xe_driver_flr_disabled() argument
506 if (IS_SRIOV_VF(xe)) in xe_driver_flr_disabled()
509 if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { in xe_driver_flr_disabled()
510 drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n"); in xe_driver_flr_disabled()
530 static void __xe_driver_flr(struct xe_device *xe) in __xe_driver_flr() argument
533 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in __xe_driver_flr()
536 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); in __xe_driver_flr()
549 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); in __xe_driver_flr()
560 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); in __xe_driver_flr()
568 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); in __xe_driver_flr()
576 static void xe_driver_flr(struct xe_device *xe) in xe_driver_flr() argument
578 if (xe_driver_flr_disabled(xe)) in xe_driver_flr()
581 __xe_driver_flr(xe); in xe_driver_flr()
586 struct xe_device *xe = arg; in xe_driver_flr_fini() local
588 if (xe->needs_flr_on_fini) in xe_driver_flr_fini()
589 xe_driver_flr(xe); in xe_driver_flr_fini()
594 struct xe_device *xe = arg; in xe_device_sanitize() local
598 for_each_gt(gt, xe, id) in xe_device_sanitize()
602 static int xe_set_dma_info(struct xe_device *xe) in xe_set_dma_info() argument
604 unsigned int mask_size = xe->info.dma_mask_size; in xe_set_dma_info()
607 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); in xe_set_dma_info()
609 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
613 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
620 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); in xe_set_dma_info()
624 static bool verify_lmem_ready(struct xe_device *xe) in verify_lmem_ready() argument
626 u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT; in verify_lmem_ready()
631 static int wait_for_lmem_ready(struct xe_device *xe) in wait_for_lmem_ready() argument
635 if (!IS_DGFX(xe)) in wait_for_lmem_ready()
638 if (IS_SRIOV_VF(xe)) in wait_for_lmem_ready()
641 if (verify_lmem_ready(xe)) in wait_for_lmem_ready()
644 drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); in wait_for_lmem_ready()
664 drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); in wait_for_lmem_ready()
670 } while (!verify_lmem_ready(xe)); in wait_for_lmem_ready()
672 drm_dbg(&xe->drm, "lmem ready after %ums", in wait_for_lmem_ready()
679 static void sriov_update_device_info(struct xe_device *xe) in sriov_update_device_info() argument
682 if (IS_SRIOV_VF(xe)) { in sriov_update_device_info()
683 xe->info.probe_display = 0; in sriov_update_device_info()
684 xe->info.has_heci_cscfi = 0; in sriov_update_device_info()
685 xe->info.has_heci_gscfi = 0; in sriov_update_device_info()
686 xe->info.skip_guc_pc = 1; in sriov_update_device_info()
687 xe->info.skip_pcode = 1; in sriov_update_device_info()
701 int xe_device_probe_early(struct xe_device *xe) in xe_device_probe_early() argument
705 xe_wa_device_init(xe); in xe_device_probe_early()
706 xe_wa_process_device_oob(xe); in xe_device_probe_early()
708 err = xe_mmio_probe_early(xe); in xe_device_probe_early()
712 xe_sriov_probe_early(xe); in xe_device_probe_early()
714 sriov_update_device_info(xe); in xe_device_probe_early()
716 err = xe_pcode_probe_early(xe); in xe_device_probe_early()
717 if (err || xe_survivability_mode_is_requested(xe)) { in xe_device_probe_early()
725 err = xe_survivability_mode_enable(xe); in xe_device_probe_early()
732 err = wait_for_lmem_ready(xe); in xe_device_probe_early()
736 xe->wedged.mode = xe_modparam.wedged_mode; in xe_device_probe_early()
742 static int probe_has_flat_ccs(struct xe_device *xe) in probe_has_flat_ccs() argument
749 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe)) in probe_has_flat_ccs()
752 gt = xe_root_mmio_gt(xe); in probe_has_flat_ccs()
759 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); in probe_has_flat_ccs()
761 if (!xe->info.has_flat_ccs) in probe_has_flat_ccs()
762 drm_dbg(&xe->drm, in probe_has_flat_ccs()
770 int xe_device_probe(struct xe_device *xe) in xe_device_probe() argument
777 xe_pat_init_early(xe); in xe_device_probe()
779 err = xe_sriov_init(xe); in xe_device_probe()
783 xe->info.mem_region_mask = 1; in xe_device_probe()
785 err = xe_set_dma_info(xe); in xe_device_probe()
789 err = xe_mmio_probe_tiles(xe); in xe_device_probe()
793 for_each_gt(gt, xe, id) { in xe_device_probe()
799 for_each_tile(tile, xe, id) { in xe_device_probe()
808 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe); in xe_device_probe()
812 err = probe_has_flat_ccs(xe); in xe_device_probe()
816 err = xe_vram_probe(xe); in xe_device_probe()
820 for_each_tile(tile, xe, id) { in xe_device_probe()
830 err = xe_ttm_sys_mgr_init(xe); in xe_device_probe()
835 err = xe_ttm_stolen_mgr_init(xe); in xe_device_probe()
845 err = xe_display_init_early(xe); in xe_device_probe()
849 for_each_tile(tile, xe, id) { in xe_device_probe()
855 err = xe_irq_install(xe); in xe_device_probe()
859 for_each_gt(gt, xe, id) { in xe_device_probe()
865 if (xe->tiles->media_gt && in xe_device_probe()
866 XE_WA(xe->tiles->media_gt, 15015404425_disable)) in xe_device_probe()
867 XE_DEVICE_WA_DISABLE(xe, 15015404425); in xe_device_probe()
869 err = xe_devcoredump_init(xe); in xe_device_probe()
873 xe_nvm_init(xe); in xe_device_probe()
875 err = xe_heci_gsc_init(xe); in xe_device_probe()
879 err = xe_oa_init(xe); in xe_device_probe()
883 err = xe_display_init(xe); in xe_device_probe()
887 err = xe_pxp_init(xe); in xe_device_probe()
891 err = drm_dev_register(&xe->drm, 0); in xe_device_probe()
895 xe_display_register(xe); in xe_device_probe()
897 err = xe_oa_register(xe); in xe_device_probe()
901 err = xe_pmu_register(&xe->pmu); in xe_device_probe()
905 err = xe_device_sysfs_init(xe); in xe_device_probe()
909 xe_debugfs_register(xe); in xe_device_probe()
911 err = xe_hwmon_register(xe); in xe_device_probe()
915 err = xe_i2c_probe(xe); in xe_device_probe()
919 for_each_gt(gt, xe, id) in xe_device_probe()
922 xe_vsec_init(xe); in xe_device_probe()
924 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); in xe_device_probe()
927 xe_display_unregister(xe); in xe_device_probe()
932 void xe_device_remove(struct xe_device *xe) in xe_device_remove() argument
934 xe_display_unregister(xe); in xe_device_remove()
936 xe_nvm_fini(xe); in xe_device_remove()
938 drm_dev_unplug(&xe->drm); in xe_device_remove()
940 xe_bo_pci_dev_remove_all(xe); in xe_device_remove()
943 void xe_device_shutdown(struct xe_device *xe) in xe_device_shutdown() argument
948 drm_dbg(&xe->drm, "Shutting down device\n"); in xe_device_shutdown()
950 if (xe_driver_flr_disabled(xe)) { in xe_device_shutdown()
951 xe_display_pm_shutdown(xe); in xe_device_shutdown()
953 xe_irq_suspend(xe); in xe_device_shutdown()
955 for_each_gt(gt, xe, id) in xe_device_shutdown()
958 xe_display_pm_shutdown_late(xe); in xe_device_shutdown()
961 __xe_driver_flr(xe); in xe_device_shutdown()
974 void xe_device_wmb(struct xe_device *xe) in xe_device_wmb() argument
977 if (IS_DGFX(xe)) in xe_device_wmb()
978 xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0); in xe_device_wmb()
984 static void tdf_request_sync(struct xe_device *xe) in tdf_request_sync() argument
990 for_each_gt(gt, xe, id) { in tdf_request_sync()
1015 void xe_device_l2_flush(struct xe_device *xe) in xe_device_l2_flush() argument
1020 gt = xe_root_mmio_gt(xe); in xe_device_l2_flush()
1058 void xe_device_td_flush(struct xe_device *xe) in xe_device_td_flush() argument
1062 if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) in xe_device_td_flush()
1065 root_gt = xe_root_mmio_gt(xe); in xe_device_td_flush()
1068 xe_device_l2_flush(xe); in xe_device_td_flush()
1071 tdf_request_sync(xe); in xe_device_td_flush()
1076 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) in xe_device_ccs_bytes() argument
1078 return xe_device_has_flat_ccs(xe) ? in xe_device_ccs_bytes()
1079 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0; in xe_device_ccs_bytes()
1093 void xe_device_assert_mem_access(struct xe_device *xe) in xe_device_assert_mem_access() argument
1095 xe_assert(xe, !xe_pm_runtime_suspended(xe)); in xe_device_assert_mem_access()
1098 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) in xe_device_snapshot_print() argument
1103 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); in xe_device_snapshot_print()
1104 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); in xe_device_snapshot_print()
1106 for_each_gt(gt, xe, id) { in xe_device_snapshot_print()
1119 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address) in xe_device_canonicalize_addr() argument
1121 return sign_extend64(address, xe->info.va_bits - 1); in xe_device_canonicalize_addr()
1124 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) in xe_device_uncanonicalize_addr() argument
1126 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); in xe_device_uncanonicalize_addr()
1131 struct xe_device *xe = arg; in xe_device_wedged_fini() local
1133 xe_pm_runtime_put(xe); in xe_device_wedged_fini()
1151 void xe_device_declare_wedged(struct xe_device *xe) in xe_device_declare_wedged() argument
1156 if (xe->wedged.mode == 0) { in xe_device_declare_wedged()
1157 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); in xe_device_declare_wedged()
1161 xe_pm_runtime_get_noresume(xe); in xe_device_declare_wedged()
1163 if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { in xe_device_declare_wedged()
1164 …drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n… in xe_device_declare_wedged()
1168 if (!atomic_xchg(&xe->wedged.flag, 1)) { in xe_device_declare_wedged()
1169 xe->needs_flr_on_fini = true; in xe_device_declare_wedged()
1170 drm_err(&xe->drm, in xe_device_declare_wedged()
1174 dev_name(xe->drm.dev)); in xe_device_declare_wedged()
1177 drm_dev_wedged_event(&xe->drm, in xe_device_declare_wedged()
1182 for_each_gt(gt, xe, id) in xe_device_declare_wedged()