Lines Matching refs:xe
92 bool xe_rpm_reclaim_safe(const struct xe_device *xe) in xe_rpm_reclaim_safe() argument
94 return !xe->d3cold.capable; in xe_rpm_reclaim_safe()
97 static void xe_rpm_lockmap_acquire(const struct xe_device *xe) in xe_rpm_lockmap_acquire() argument
99 lock_map_acquire(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_acquire()
104 static void xe_rpm_lockmap_release(const struct xe_device *xe) in xe_rpm_lockmap_release() argument
106 lock_map_release(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_release()
117 int xe_pm_suspend(struct xe_device *xe) in xe_pm_suspend() argument
123 drm_dbg(&xe->drm, "Suspending device\n"); in xe_pm_suspend()
124 trace_xe_pm_suspend(xe, __builtin_return_address(0)); in xe_pm_suspend()
126 err = xe_pxp_pm_suspend(xe->pxp); in xe_pm_suspend()
130 for_each_gt(gt, xe, id) in xe_pm_suspend()
133 xe_display_pm_suspend(xe); in xe_pm_suspend()
136 err = xe_bo_evict_all(xe); in xe_pm_suspend()
140 for_each_gt(gt, xe, id) { in xe_pm_suspend()
146 xe_irq_suspend(xe); in xe_pm_suspend()
148 xe_display_pm_suspend_late(xe); in xe_pm_suspend()
150 xe_i2c_pm_suspend(xe); in xe_pm_suspend()
152 drm_dbg(&xe->drm, "Device suspended\n"); in xe_pm_suspend()
156 xe_display_pm_resume(xe); in xe_pm_suspend()
157 xe_pxp_pm_resume(xe->pxp); in xe_pm_suspend()
159 drm_dbg(&xe->drm, "Device suspend failed %d\n", err); in xe_pm_suspend()
169 int xe_pm_resume(struct xe_device *xe) in xe_pm_resume() argument
176 drm_dbg(&xe->drm, "Resuming device\n"); in xe_pm_resume()
177 trace_xe_pm_resume(xe, __builtin_return_address(0)); in xe_pm_resume()
179 for_each_tile(tile, xe, id) in xe_pm_resume()
182 err = xe_pcode_ready(xe, true); in xe_pm_resume()
186 xe_display_pm_resume_early(xe); in xe_pm_resume()
192 err = xe_bo_restore_early(xe); in xe_pm_resume()
196 xe_i2c_pm_resume(xe, xe->d3cold.allowed); in xe_pm_resume()
198 xe_irq_resume(xe); in xe_pm_resume()
200 for_each_gt(gt, xe, id) in xe_pm_resume()
203 xe_display_pm_resume(xe); in xe_pm_resume()
205 err = xe_bo_restore_late(xe); in xe_pm_resume()
209 xe_pxp_pm_resume(xe->pxp); in xe_pm_resume()
211 drm_dbg(&xe->drm, "Device resumed\n"); in xe_pm_resume()
214 drm_dbg(&xe->drm, "Device resume failed %d\n", err); in xe_pm_resume()
218 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe) in xe_pm_pci_d3cold_capable() argument
220 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_pci_d3cold_capable()
229 drm_dbg(&xe->drm, "d3cold: PME# not supported\n"); in xe_pm_pci_d3cold_capable()
235 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n"); in xe_pm_pci_d3cold_capable()
242 static void xe_pm_runtime_init(struct xe_device *xe) in xe_pm_runtime_init() argument
244 struct device *dev = xe->drm.dev; in xe_pm_runtime_init()
254 if (IS_DGFX(xe)) in xe_pm_runtime_init()
265 int xe_pm_init_early(struct xe_device *xe) in xe_pm_init_early() argument
269 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list); in xe_pm_init_early()
271 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock); in xe_pm_init_early()
275 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock); in xe_pm_init_early()
279 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe); in xe_pm_init_early()
284 static u32 vram_threshold_value(struct xe_device *xe) in vram_threshold_value() argument
287 if (xe->info.platform == XE_BATTLEMAGE) in vram_threshold_value()
296 struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier); in xe_pm_notifier_callback() local
302 xe_pm_runtime_get(xe); in xe_pm_notifier_callback()
303 err = xe_bo_evict_all_user(xe); in xe_pm_notifier_callback()
305 drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err); in xe_pm_notifier_callback()
306 xe_pm_runtime_put(xe); in xe_pm_notifier_callback()
310 err = xe_bo_notifier_prepare_all_pinned(xe); in xe_pm_notifier_callback()
312 drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err); in xe_pm_notifier_callback()
313 xe_pm_runtime_put(xe); in xe_pm_notifier_callback()
318 xe_bo_notifier_unprepare_all_pinned(xe); in xe_pm_notifier_callback()
319 xe_pm_runtime_put(xe); in xe_pm_notifier_callback()
337 int xe_pm_init(struct xe_device *xe) in xe_pm_init() argument
342 xe->pm_notifier.notifier_call = xe_pm_notifier_callback; in xe_pm_init()
343 err = register_pm_notifier(&xe->pm_notifier); in xe_pm_init()
348 if (!xe_device_uc_enabled(xe)) in xe_pm_init()
351 if (xe->d3cold.capable) { in xe_pm_init()
352 vram_threshold = vram_threshold_value(xe); in xe_pm_init()
353 err = xe_pm_set_vram_threshold(xe, vram_threshold); in xe_pm_init()
358 xe_pm_runtime_init(xe); in xe_pm_init()
362 unregister_pm_notifier(&xe->pm_notifier); in xe_pm_init()
366 static void xe_pm_runtime_fini(struct xe_device *xe) in xe_pm_runtime_fini() argument
368 struct device *dev = xe->drm.dev; in xe_pm_runtime_fini()
378 void xe_pm_fini(struct xe_device *xe) in xe_pm_fini() argument
380 if (xe_device_uc_enabled(xe)) in xe_pm_fini()
381 xe_pm_runtime_fini(xe); in xe_pm_fini()
383 unregister_pm_notifier(&xe->pm_notifier); in xe_pm_fini()
386 static void xe_pm_write_callback_task(struct xe_device *xe, in xe_pm_write_callback_task() argument
389 WRITE_ONCE(xe->pm_callback_task, task); in xe_pm_write_callback_task()
400 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe) in xe_pm_read_callback_task() argument
404 return READ_ONCE(xe->pm_callback_task); in xe_pm_read_callback_task()
418 bool xe_pm_runtime_suspended(struct xe_device *xe) in xe_pm_runtime_suspended() argument
420 return pm_runtime_suspended(xe->drm.dev); in xe_pm_runtime_suspended()
429 int xe_pm_runtime_suspend(struct xe_device *xe) in xe_pm_runtime_suspend() argument
436 trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0)); in xe_pm_runtime_suspend()
438 xe_pm_write_callback_task(xe, current); in xe_pm_runtime_suspend()
461 xe_rpm_lockmap_acquire(xe); in xe_pm_runtime_suspend()
463 err = xe_pxp_pm_suspend(xe->pxp); in xe_pm_runtime_suspend()
471 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
473 &xe->mem_access.vram_userfault.list, vram_userfault_link) in xe_pm_runtime_suspend()
475 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
477 xe_display_pm_runtime_suspend(xe); in xe_pm_runtime_suspend()
479 if (xe->d3cold.allowed) { in xe_pm_runtime_suspend()
480 err = xe_bo_evict_all(xe); in xe_pm_runtime_suspend()
485 for_each_gt(gt, xe, id) { in xe_pm_runtime_suspend()
491 xe_irq_suspend(xe); in xe_pm_runtime_suspend()
493 xe_display_pm_runtime_suspend_late(xe); in xe_pm_runtime_suspend()
495 xe_i2c_pm_suspend(xe); in xe_pm_runtime_suspend()
497 xe_rpm_lockmap_release(xe); in xe_pm_runtime_suspend()
498 xe_pm_write_callback_task(xe, NULL); in xe_pm_runtime_suspend()
502 xe_display_pm_runtime_resume(xe); in xe_pm_runtime_suspend()
503 xe_pxp_pm_resume(xe->pxp); in xe_pm_runtime_suspend()
505 xe_rpm_lockmap_release(xe); in xe_pm_runtime_suspend()
506 xe_pm_write_callback_task(xe, NULL); in xe_pm_runtime_suspend()
516 int xe_pm_runtime_resume(struct xe_device *xe) in xe_pm_runtime_resume() argument
522 trace_xe_pm_runtime_resume(xe, __builtin_return_address(0)); in xe_pm_runtime_resume()
524 xe_pm_write_callback_task(xe, current); in xe_pm_runtime_resume()
526 xe_rpm_lockmap_acquire(xe); in xe_pm_runtime_resume()
528 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
529 err = xe_pcode_ready(xe, true); in xe_pm_runtime_resume()
533 xe_display_pm_resume_early(xe); in xe_pm_runtime_resume()
539 err = xe_bo_restore_early(xe); in xe_pm_runtime_resume()
544 xe_i2c_pm_resume(xe, xe->d3cold.allowed); in xe_pm_runtime_resume()
546 xe_irq_resume(xe); in xe_pm_runtime_resume()
548 for_each_gt(gt, xe, id) in xe_pm_runtime_resume()
551 xe_display_pm_runtime_resume(xe); in xe_pm_runtime_resume()
553 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
554 err = xe_bo_restore_late(xe); in xe_pm_runtime_resume()
559 xe_pxp_pm_resume(xe->pxp); in xe_pm_runtime_resume()
562 xe_rpm_lockmap_release(xe); in xe_pm_runtime_resume()
563 xe_pm_write_callback_task(xe, NULL); in xe_pm_runtime_resume()
581 static void xe_rpm_might_enter_cb(const struct xe_device *xe) in xe_rpm_might_enter_cb() argument
583 xe_rpm_lockmap_acquire(xe); in xe_rpm_might_enter_cb()
584 xe_rpm_lockmap_release(xe); in xe_rpm_might_enter_cb()
613 void xe_pm_runtime_get(struct xe_device *xe) in xe_pm_runtime_get() argument
615 trace_xe_pm_runtime_get(xe, __builtin_return_address(0)); in xe_pm_runtime_get()
616 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get()
618 if (xe_pm_read_callback_task(xe) == current) in xe_pm_runtime_get()
621 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_get()
622 pm_runtime_resume(xe->drm.dev); in xe_pm_runtime_get()
629 void xe_pm_runtime_put(struct xe_device *xe) in xe_pm_runtime_put() argument
631 trace_xe_pm_runtime_put(xe, __builtin_return_address(0)); in xe_pm_runtime_put()
632 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_put()
633 pm_runtime_put_noidle(xe->drm.dev); in xe_pm_runtime_put()
635 pm_runtime_mark_last_busy(xe->drm.dev); in xe_pm_runtime_put()
636 pm_runtime_put(xe->drm.dev); in xe_pm_runtime_put()
647 int xe_pm_runtime_get_ioctl(struct xe_device *xe) in xe_pm_runtime_get_ioctl() argument
649 trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0)); in xe_pm_runtime_get_ioctl()
650 if (WARN_ON(xe_pm_read_callback_task(xe) == current)) in xe_pm_runtime_get_ioctl()
653 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_get_ioctl()
654 return pm_runtime_get_sync(xe->drm.dev); in xe_pm_runtime_get_ioctl()
664 bool xe_pm_runtime_get_if_active(struct xe_device *xe) in xe_pm_runtime_get_if_active() argument
666 return pm_runtime_get_if_active(xe->drm.dev) > 0; in xe_pm_runtime_get_if_active()
676 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe) in xe_pm_runtime_get_if_in_use() argument
678 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_get_if_in_use()
680 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_if_in_use()
684 return pm_runtime_get_if_in_use(xe->drm.dev) > 0; in xe_pm_runtime_get_if_in_use()
691 static bool xe_pm_suspending_or_resuming(struct xe_device *xe) in xe_pm_suspending_or_resuming() argument
694 struct device *dev = xe->drm.dev; in xe_pm_suspending_or_resuming()
714 void xe_pm_runtime_get_noresume(struct xe_device *xe) in xe_pm_runtime_get_noresume() argument
718 ref = xe_pm_runtime_get_if_in_use(xe); in xe_pm_runtime_get_noresume()
721 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_noresume()
722 drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe), in xe_pm_runtime_get_noresume()
733 bool xe_pm_runtime_resume_and_get(struct xe_device *xe) in xe_pm_runtime_resume_and_get() argument
735 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_resume_and_get()
737 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_resume_and_get()
741 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_resume_and_get()
742 return pm_runtime_resume_and_get(xe->drm.dev) >= 0; in xe_pm_runtime_resume_and_get()
749 void xe_pm_assert_unbounded_bridge(struct xe_device *xe) in xe_pm_assert_unbounded_bridge() argument
751 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_assert_unbounded_bridge()
758 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); in xe_pm_assert_unbounded_bridge()
772 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) in xe_pm_set_vram_threshold() argument
779 man = ttm_manager_type(&xe->ttm, i); in xe_pm_set_vram_threshold()
784 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); in xe_pm_set_vram_threshold()
789 mutex_lock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
790 xe->d3cold.vram_threshold = threshold; in xe_pm_set_vram_threshold()
791 mutex_unlock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
803 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) in xe_pm_d3cold_allowed_toggle() argument
810 if (!xe->d3cold.capable) { in xe_pm_d3cold_allowed_toggle()
811 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
816 man = ttm_manager_type(&xe->ttm, i); in xe_pm_d3cold_allowed_toggle()
823 mutex_lock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
825 if (total_vram_used_mb < xe->d3cold.vram_threshold) in xe_pm_d3cold_allowed_toggle()
826 xe->d3cold.allowed = true; in xe_pm_d3cold_allowed_toggle()
828 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
830 mutex_unlock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()