Lines Matching refs:ptdev

154 	struct panthor_device *ptdev;  member
525 struct panthor_device *ptdev; member
678 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
692 !panthor_device_reset_is_pending((sched)->ptdev)) \
708 !panthor_device_reset_is_pending((sched)->ptdev)) \
797 struct panthor_device *ptdev = group->ptdev; in panthor_queue_get_syncwait_obj() local
808 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo))) in panthor_queue_get_syncwait_obj()
813 if (drm_WARN_ON(&ptdev->base, ret)) in panthor_queue_get_syncwait_obj()
817 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap)) in panthor_queue_get_syncwait_obj()
872 struct panthor_device *ptdev = group->ptdev; in group_release() local
874 drm_WARN_ON(&ptdev->base, group->csg_id >= 0); in group_release()
875 drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node)); in group_release()
876 drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node)); in group_release()
906 struct panthor_device *ptdev = group->ptdev; in group_bind_locked() local
910 lockdep_assert_held(&ptdev->scheduler->lock); in group_bind_locked()
912 if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS || in group_bind_locked()
913 ptdev->scheduler->csg_slots[csg_id].group)) in group_bind_locked()
920 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in group_bind_locked()
948 struct panthor_device *ptdev = group->ptdev; in group_unbind_locked() local
951 lockdep_assert_held(&ptdev->scheduler->lock); in group_unbind_locked()
953 if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS)) in group_unbind_locked()
956 if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE)) in group_unbind_locked()
959 slot = &ptdev->scheduler->csg_slots[group->csg_id]; in group_unbind_locked()
988 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) in cs_slot_prog_locked() argument
990 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id]; in cs_slot_prog_locked()
991 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in cs_slot_prog_locked()
993 lockdep_assert_held(&ptdev->scheduler->lock); in cs_slot_prog_locked()
996 drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract); in cs_slot_prog_locked()
1032 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) in cs_slot_reset_locked() argument
1034 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in cs_slot_reset_locked()
1035 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; in cs_slot_reset_locked()
1038 lockdep_assert_held(&ptdev->scheduler->lock); in cs_slot_reset_locked()
1066 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_sync_priority_locked() argument
1068 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_sync_priority_locked()
1071 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_sync_priority_locked()
1073 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in csg_slot_sync_priority_locked()
1086 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) in cs_slot_sync_queue_state_locked() argument
1088 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; in cs_slot_sync_queue_state_locked()
1091 panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id); in cs_slot_sync_queue_state_locked()
1105 &group->ptdev->scheduler->groups.waiting); in cs_slot_sync_queue_state_locked()
1137 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_sync_queues_state_locked() argument
1139 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_sync_queues_state_locked()
1143 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_sync_queues_state_locked()
1150 cs_slot_sync_queue_state_locked(ptdev, csg_id, i); in csg_slot_sync_queues_state_locked()
1155 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_sync_state_locked() argument
1157 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_sync_state_locked()
1163 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_sync_state_locked()
1165 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in csg_slot_sync_state_locked()
1191 drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)", in csg_slot_sync_state_locked()
1204 panthor_device_schedule_reset(ptdev); in csg_slot_sync_state_locked()
1207 csg_slot_sync_queues_state_locked(ptdev, csg_id); in csg_slot_sync_state_locked()
1220 cs_slot_reset_locked(ptdev, csg_id, i); in csg_slot_sync_state_locked()
1228 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority) in csg_slot_prog_locked() argument
1235 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_prog_locked()
1240 if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS)) in csg_slot_prog_locked()
1243 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_prog_locked()
1248 csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id); in csg_slot_prog_locked()
1252 cs_slot_prog_locked(ptdev, csg_id, i); in csg_slot_prog_locked()
1284 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev, in cs_slot_process_fatal_event_locked() argument
1287 struct panthor_scheduler *sched = ptdev->scheduler; in cs_slot_process_fatal_event_locked()
1296 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in cs_slot_process_fatal_event_locked()
1307 panthor_device_schedule_reset(ptdev); in cs_slot_process_fatal_event_locked()
1313 drm_warn(&ptdev->base, in cs_slot_process_fatal_event_locked()
1320 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)), in cs_slot_process_fatal_event_locked()
1326 cs_slot_process_fault_event_locked(struct panthor_device *ptdev, in cs_slot_process_fault_event_locked() argument
1329 struct panthor_scheduler *sched = ptdev->scheduler; in cs_slot_process_fault_event_locked()
1340 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in cs_slot_process_fault_event_locked()
1361 drm_warn(&ptdev->base, in cs_slot_process_fault_event_locked()
1368 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)), in cs_slot_process_fault_event_locked()
1375 struct panthor_device *ptdev = group->ptdev; in group_process_tiler_oom() local
1376 struct panthor_scheduler *sched = ptdev->scheduler; in group_process_tiler_oom()
1388 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in group_process_tiler_oom()
1422 drm_warn(&ptdev->base, "Failed to extend the tiler heap\n"); in group_process_tiler_oom()
1434 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in group_process_tiler_oom()
1435 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in group_process_tiler_oom()
1441 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); in group_process_tiler_oom()
1476 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev, in cs_slot_process_tiler_oom_event_locked() argument
1479 struct panthor_scheduler *sched = ptdev->scheduler; in cs_slot_process_tiler_oom_event_locked()
1485 if (drm_WARN_ON(&ptdev->base, !group)) in cs_slot_process_tiler_oom_event_locked()
1498 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev, in cs_slot_process_irq_locked() argument
1504 lockdep_assert_held(&ptdev->scheduler->lock); in cs_slot_process_irq_locked()
1506 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in cs_slot_process_irq_locked()
1512 cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id); in cs_slot_process_irq_locked()
1515 cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id); in cs_slot_process_irq_locked()
1518 cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id); in cs_slot_process_irq_locked()
1528 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_sync_idle_state_locked() argument
1530 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_sync_idle_state_locked()
1533 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_sync_idle_state_locked()
1535 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in csg_slot_sync_idle_state_locked()
1539 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_process_idle_event_locked() argument
1541 struct panthor_scheduler *sched = ptdev->scheduler; in csg_slot_process_idle_event_locked()
1554 static void csg_slot_sync_update_locked(struct panthor_device *ptdev, in csg_slot_sync_update_locked() argument
1557 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_sync_update_locked()
1560 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_sync_update_locked()
1565 sched_queue_work(ptdev->scheduler, sync_upd); in csg_slot_sync_update_locked()
1569 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_process_progress_timer_event_locked() argument
1571 struct panthor_scheduler *sched = ptdev->scheduler; in csg_slot_process_progress_timer_event_locked()
1577 drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id); in csg_slot_process_progress_timer_event_locked()
1580 if (!drm_WARN_ON(&ptdev->base, !group)) in csg_slot_process_progress_timer_event_locked()
1586 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id) in sched_process_csg_irq_locked() argument
1592 lockdep_assert_held(&ptdev->scheduler->lock); in sched_process_csg_irq_locked()
1594 if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) in sched_process_csg_irq_locked()
1597 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in sched_process_csg_irq_locked()
1622 csg_slot_process_idle_event_locked(ptdev, csg_id); in sched_process_csg_irq_locked()
1625 csg_slot_process_progress_timer_event_locked(ptdev, csg_id); in sched_process_csg_irq_locked()
1631 if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id)) in sched_process_csg_irq_locked()
1638 csg_slot_sync_update_locked(ptdev, csg_id); in sched_process_csg_irq_locked()
1643 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); in sched_process_csg_irq_locked()
1646 static void sched_process_idle_event_locked(struct panthor_device *ptdev) in sched_process_idle_event_locked() argument
1648 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); in sched_process_idle_event_locked()
1650 lockdep_assert_held(&ptdev->scheduler->lock); in sched_process_idle_event_locked()
1654 sched_queue_delayed_work(ptdev->scheduler, tick, 0); in sched_process_idle_event_locked()
1661 static void sched_process_global_irq_locked(struct panthor_device *ptdev) in sched_process_global_irq_locked() argument
1663 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); in sched_process_global_irq_locked()
1666 lockdep_assert_held(&ptdev->scheduler->lock); in sched_process_global_irq_locked()
1673 sched_process_idle_event_locked(ptdev); in sched_process_global_irq_locked()
1681 struct panthor_device *ptdev = sched->ptdev; in process_fw_events_work() local
1686 sched_process_global_irq_locked(ptdev); in process_fw_events_work()
1693 sched_process_csg_irq_locked(ptdev, csg_id); in process_fw_events_work()
1703 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events) in panthor_sched_report_fw_events() argument
1705 if (!ptdev->scheduler) in panthor_sched_report_fw_events()
1708 atomic_or(events, &ptdev->scheduler->fw_events); in panthor_sched_report_fw_events()
1709 sched_queue_work(ptdev->scheduler, fw_events); in panthor_sched_report_fw_events()
1741 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev, in csgs_upd_ctx_queue_reqs() argument
1745 if (drm_WARN_ON(&ptdev->base, !mask) || in csgs_upd_ctx_queue_reqs()
1746 drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) in csgs_upd_ctx_queue_reqs()
1754 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev, in csgs_upd_ctx_apply_locked() argument
1757 struct panthor_scheduler *sched = ptdev->scheduler; in csgs_upd_ctx_apply_locked()
1770 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1776 panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask); in csgs_upd_ctx_apply_locked()
1786 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1788 ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100); in csgs_upd_ctx_apply_locked()
1791 csg_slot_sync_priority_locked(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1794 csg_slot_sync_state_locked(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1797 csg_slot_sync_queues_state_locked(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1798 csg_slot_sync_idle_state_locked(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1803 drm_err(&ptdev->base, "CSG %d update request timedout", csg_id); in csgs_upd_ctx_apply_locked()
1836 struct panthor_device *ptdev = group->ptdev; in group_is_idle() local
1840 return ptdev->scheduler->csg_slots[group->csg_id].idle; in group_is_idle()
1943 struct panthor_device *ptdev = sched->ptdev; in tick_ctx_init() local
1965 csg_iface = panthor_fw_get_csg_iface(ptdev, i); in tick_ctx_init()
1972 sched_process_csg_irq_locked(ptdev, i); in tick_ctx_init()
1980 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, in tick_ctx_init()
1985 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); in tick_ctx_init()
1987 panthor_device_schedule_reset(ptdev); in tick_ctx_init()
2002 if (drm_WARN_ON(&group->ptdev->base, group_can_run(group))) in group_term_post_processing()
2033 sched_queue_work(group->ptdev->scheduler, sync_upd); in group_term_post_processing()
2056 struct panthor_device *ptdev = sched->ptdev; in tick_ctx_cleanup() local
2065 drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask && in tick_ctx_cleanup()
2088 drm_WARN_ON(&ptdev->base, in tick_ctx_cleanup()
2109 struct panthor_device *ptdev = sched->ptdev; in tick_ctx_apply() local
2124 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) in tick_ctx_apply()
2128 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, in tick_ctx_apply()
2144 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in tick_ctx_apply()
2153 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, in tick_ctx_apply()
2160 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); in tick_ctx_apply()
2162 panthor_device_schedule_reset(ptdev); in tick_ctx_apply()
2175 sched_process_csg_irq_locked(ptdev, group->csg_id); in tick_ctx_apply()
2201 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) in tick_ctx_apply()
2204 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in tick_ctx_apply()
2207 csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--); in tick_ctx_apply()
2208 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, in tick_ctx_apply()
2212 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, in tick_ctx_apply()
2219 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); in tick_ctx_apply()
2221 panthor_device_schedule_reset(ptdev); in tick_ctx_apply()
2275 if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT)) in tick_ctx_update_resched_target()
2301 struct panthor_device *ptdev = sched->ptdev; in tick_work() local
2307 if (!drm_dev_enter(&ptdev->base, &cookie)) in tick_work()
2310 ret = pm_runtime_resume_and_get(ptdev->base.dev); in tick_work()
2311 if (drm_WARN_ON(&ptdev->base, ret)) in tick_work()
2318 if (panthor_device_reset_is_pending(sched->ptdev)) in tick_work()
2367 panthor_devfreq_record_idle(sched->ptdev); in tick_work()
2369 pm_runtime_put_autosuspend(ptdev->base.dev); in tick_work()
2373 panthor_devfreq_record_busy(sched->ptdev); in tick_work()
2375 pm_runtime_get(ptdev->base.dev); in tick_work()
2393 pm_runtime_mark_last_busy(ptdev->base.dev); in tick_work()
2394 pm_runtime_put_autosuspend(ptdev->base.dev); in tick_work()
2447 drm_WARN_ON(&group->ptdev->base, ret < 0); in sync_upd_work()
2476 struct panthor_device *ptdev = group->ptdev; in group_schedule_locked() local
2477 struct panthor_scheduler *sched = ptdev->scheduler; in group_schedule_locked()
2554 struct panthor_scheduler *sched = group->ptdev->scheduler; in panthor_group_stop()
2567 struct panthor_scheduler *sched = group->ptdev->scheduler; in panthor_group_start()
2569 lockdep_assert_held(&group->ptdev->scheduler->reset.lock); in panthor_group_start()
2588 static void panthor_sched_immediate_tick(struct panthor_device *ptdev) in panthor_sched_immediate_tick() argument
2590 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_immediate_tick()
2598 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev) in panthor_sched_report_mmu_fault() argument
2601 if (ptdev->scheduler) in panthor_sched_report_mmu_fault()
2602 panthor_sched_immediate_tick(ptdev); in panthor_sched_report_mmu_fault()
2605 void panthor_sched_resume(struct panthor_device *ptdev) in panthor_sched_resume() argument
2608 panthor_sched_immediate_tick(ptdev); in panthor_sched_resume()
2611 void panthor_sched_suspend(struct panthor_device *ptdev) in panthor_sched_suspend() argument
2613 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_suspend()
2625 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, in panthor_sched_suspend()
2634 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); in panthor_sched_suspend()
2640 drm_err(&ptdev->base, "CSG suspend failed, escalating to termination"); in panthor_sched_suspend()
2651 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, in panthor_sched_suspend()
2657 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); in panthor_sched_suspend()
2681 if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0)) in panthor_sched_suspend()
2691 csg_slot_sync_update_locked(ptdev, csg_id); in panthor_sched_suspend()
2707 sched_process_csg_irq_locked(ptdev, group->csg_id); in panthor_sched_suspend()
2711 drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node)); in panthor_sched_suspend()
2728 void panthor_sched_pre_reset(struct panthor_device *ptdev) in panthor_sched_pre_reset() argument
2730 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_pre_reset()
2743 panthor_sched_suspend(ptdev); in panthor_sched_pre_reset()
2750 drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i])); in panthor_sched_pre_reset()
2763 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) in panthor_sched_post_reset() argument
2765 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_post_reset()
2838 struct panthor_device *ptdev = group->ptdev; in queue_run_job() local
2839 struct panthor_scheduler *sched = ptdev->scheduler; in queue_run_job()
2842 u64 addr_reg = ptdev->csif_info.cs_reg_count - in queue_run_job()
2843 ptdev->csif_info.unpreserved_cs_reg_count; in queue_run_job()
2901 ret = pm_runtime_resume_and_get(ptdev->base.dev); in queue_run_job()
2902 if (drm_WARN_ON(&ptdev->base, ret)) in queue_run_job()
2950 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1); in queue_run_job()
2953 pm_runtime_get(ptdev->base.dev); in queue_run_job()
2956 panthor_devfreq_record_busy(sched->ptdev); in queue_run_job()
2967 pm_runtime_mark_last_busy(ptdev->base.dev); in queue_run_job()
2968 pm_runtime_put_autosuspend(ptdev->base.dev); in queue_run_job()
2978 struct panthor_device *ptdev = group->ptdev; in queue_timedout_job() local
2979 struct panthor_scheduler *sched = ptdev->scheduler; in queue_timedout_job()
2982 drm_warn(&ptdev->base, "job timeout\n"); in queue_timedout_job()
2984 drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress)); in queue_timedout_job()
2991 sched_queue_delayed_work(ptdev->scheduler, tick, 0); in queue_timedout_job()
3048 queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm, in group_create_queue()
3063 queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev, in group_create_queue()
3074 group->ptdev->scheduler->wq, 1, in group_create_queue()
3077 group->ptdev->reset.wq, in group_create_queue()
3078 NULL, "panthor-queue", group->ptdev->base.dev); in group_create_queue()
3098 struct panthor_device *ptdev = pfile->ptdev; in panthor_group_create() local
3100 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_group_create()
3101 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); in panthor_group_create()
3112 if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) || in panthor_group_create()
3113 (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) || in panthor_group_create()
3114 (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present)) in panthor_group_create()
3131 group->ptdev = ptdev; in panthor_group_create()
3154 group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); in panthor_group_create()
3162 group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); in panthor_group_create()
3169 group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm, in panthor_group_create()
3226 struct panthor_device *ptdev = pfile->ptdev; in panthor_group_destroy() local
3227 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_group_destroy()
3275 struct panthor_device *ptdev = pfile->ptdev; in panthor_group_get_state() local
3276 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_group_get_state()
3335 drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node)); in job_release()
3462 void panthor_sched_unplug(struct panthor_device *ptdev) in panthor_sched_unplug() argument
3464 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_unplug()
3470 pm_runtime_put(ptdev->base.dev); in panthor_sched_unplug()
3500 int panthor_sched_init(struct panthor_device *ptdev) in panthor_sched_init() argument
3502 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); in panthor_sched_init()
3503 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); in panthor_sched_init()
3504 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0); in panthor_sched_init()
3509 sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL); in panthor_sched_init()
3531 gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1)); in panthor_sched_init()
3533 drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)", in panthor_sched_init()
3538 sched->ptdev = ptdev; in panthor_sched_init()
3543 ptdev->csif_info.csg_slot_count = sched->csg_slot_count; in panthor_sched_init()
3544 ptdev->csif_info.cs_slot_count = sched->cs_slot_count; in panthor_sched_init()
3545 ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count; in panthor_sched_init()
3554 ret = drmm_mutex_init(&ptdev->base, &sched->lock); in panthor_sched_init()
3564 ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock); in panthor_sched_init()
3588 panthor_sched_fini(&ptdev->base, sched); in panthor_sched_init()
3589 drm_err(&ptdev->base, "Failed to allocate the workqueues"); in panthor_sched_init()
3593 ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched); in panthor_sched_init()
3597 ptdev->scheduler = sched; in panthor_sched_init()