Lines Matching refs:ptdev

153 	struct panthor_device *ptdev;  member
536 struct panthor_device *ptdev; member
726 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
740 !panthor_device_reset_is_pending((sched)->ptdev)) \
756 !panthor_device_reset_is_pending((sched)->ptdev)) \
854 struct panthor_device *ptdev = group->ptdev; in panthor_queue_get_syncwait_obj() local
865 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo))) in panthor_queue_get_syncwait_obj()
870 if (drm_WARN_ON(&ptdev->base, ret)) in panthor_queue_get_syncwait_obj()
874 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap)) in panthor_queue_get_syncwait_obj()
930 struct panthor_device *ptdev = group->ptdev; in group_release() local
932 drm_WARN_ON(&ptdev->base, group->csg_id >= 0); in group_release()
933 drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node)); in group_release()
934 drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node)); in group_release()
964 struct panthor_device *ptdev = group->ptdev; in group_bind_locked() local
968 lockdep_assert_held(&ptdev->scheduler->lock); in group_bind_locked()
970 if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS || in group_bind_locked()
971 ptdev->scheduler->csg_slots[csg_id].group)) in group_bind_locked()
978 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in group_bind_locked()
1006 struct panthor_device *ptdev = group->ptdev; in group_unbind_locked() local
1009 lockdep_assert_held(&ptdev->scheduler->lock); in group_unbind_locked()
1011 if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS)) in group_unbind_locked()
1014 if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE)) in group_unbind_locked()
1017 slot = &ptdev->scheduler->csg_slots[group->csg_id]; in group_unbind_locked()
1046 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) in cs_slot_prog_locked() argument
1048 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id]; in cs_slot_prog_locked()
1049 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in cs_slot_prog_locked()
1051 lockdep_assert_held(&ptdev->scheduler->lock); in cs_slot_prog_locked()
1054 drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract); in cs_slot_prog_locked()
1090 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) in cs_slot_reset_locked() argument
1092 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in cs_slot_reset_locked()
1093 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; in cs_slot_reset_locked()
1096 lockdep_assert_held(&ptdev->scheduler->lock); in cs_slot_reset_locked()
1124 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_sync_priority_locked() argument
1126 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_sync_priority_locked()
1129 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_sync_priority_locked()
1131 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in csg_slot_sync_priority_locked()
1144 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) in cs_slot_sync_queue_state_locked() argument
1146 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; in cs_slot_sync_queue_state_locked()
1149 panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id); in cs_slot_sync_queue_state_locked()
1163 &group->ptdev->scheduler->groups.waiting); in cs_slot_sync_queue_state_locked()
1195 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_sync_queues_state_locked() argument
1197 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_sync_queues_state_locked()
1201 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_sync_queues_state_locked()
1208 cs_slot_sync_queue_state_locked(ptdev, csg_id, i); in csg_slot_sync_queues_state_locked()
1213 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_sync_state_locked() argument
1215 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_sync_state_locked()
1221 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_sync_state_locked()
1223 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in csg_slot_sync_state_locked()
1249 drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)", in csg_slot_sync_state_locked()
1262 panthor_device_schedule_reset(ptdev); in csg_slot_sync_state_locked()
1265 csg_slot_sync_queues_state_locked(ptdev, csg_id); in csg_slot_sync_state_locked()
1278 cs_slot_reset_locked(ptdev, csg_id, i); in csg_slot_sync_state_locked()
1286 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority) in csg_slot_prog_locked() argument
1293 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_prog_locked()
1298 if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS)) in csg_slot_prog_locked()
1301 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_prog_locked()
1306 csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id); in csg_slot_prog_locked()
1310 cs_slot_prog_locked(ptdev, csg_id, i); in csg_slot_prog_locked()
1342 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev, in cs_slot_process_fatal_event_locked() argument
1345 struct panthor_scheduler *sched = ptdev->scheduler; in cs_slot_process_fatal_event_locked()
1354 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in cs_slot_process_fatal_event_locked()
1365 panthor_device_schedule_reset(ptdev); in cs_slot_process_fatal_event_locked()
1371 drm_warn(&ptdev->base, in cs_slot_process_fatal_event_locked()
1378 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)), in cs_slot_process_fatal_event_locked()
1384 cs_slot_process_fault_event_locked(struct panthor_device *ptdev, in cs_slot_process_fault_event_locked() argument
1387 struct panthor_scheduler *sched = ptdev->scheduler; in cs_slot_process_fault_event_locked()
1398 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in cs_slot_process_fault_event_locked()
1419 drm_warn(&ptdev->base, in cs_slot_process_fault_event_locked()
1426 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)), in cs_slot_process_fault_event_locked()
1433 struct panthor_device *ptdev = group->ptdev; in group_process_tiler_oom() local
1434 struct panthor_scheduler *sched = ptdev->scheduler; in group_process_tiler_oom()
1446 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in group_process_tiler_oom()
1480 drm_warn(&ptdev->base, "Failed to extend the tiler heap\n"); in group_process_tiler_oom()
1492 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in group_process_tiler_oom()
1493 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in group_process_tiler_oom()
1499 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); in group_process_tiler_oom()
1534 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev, in cs_slot_process_tiler_oom_event_locked() argument
1537 struct panthor_scheduler *sched = ptdev->scheduler; in cs_slot_process_tiler_oom_event_locked()
1543 if (drm_WARN_ON(&ptdev->base, !group)) in cs_slot_process_tiler_oom_event_locked()
1556 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev, in cs_slot_process_irq_locked() argument
1562 lockdep_assert_held(&ptdev->scheduler->lock); in cs_slot_process_irq_locked()
1564 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); in cs_slot_process_irq_locked()
1570 cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id); in cs_slot_process_irq_locked()
1573 cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id); in cs_slot_process_irq_locked()
1576 cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id); in cs_slot_process_irq_locked()
1586 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_sync_idle_state_locked() argument
1588 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_sync_idle_state_locked()
1591 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_sync_idle_state_locked()
1593 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in csg_slot_sync_idle_state_locked()
1597 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_process_idle_event_locked() argument
1599 struct panthor_scheduler *sched = ptdev->scheduler; in csg_slot_process_idle_event_locked()
1612 static void csg_slot_sync_update_locked(struct panthor_device *ptdev, in csg_slot_sync_update_locked() argument
1615 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; in csg_slot_sync_update_locked()
1618 lockdep_assert_held(&ptdev->scheduler->lock); in csg_slot_sync_update_locked()
1623 sched_queue_work(ptdev->scheduler, sync_upd); in csg_slot_sync_update_locked()
1627 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id) in csg_slot_process_progress_timer_event_locked() argument
1629 struct panthor_scheduler *sched = ptdev->scheduler; in csg_slot_process_progress_timer_event_locked()
1635 drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id); in csg_slot_process_progress_timer_event_locked()
1638 if (!drm_WARN_ON(&ptdev->base, !group)) in csg_slot_process_progress_timer_event_locked()
1644 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id) in sched_process_csg_irq_locked() argument
1650 lockdep_assert_held(&ptdev->scheduler->lock); in sched_process_csg_irq_locked()
1652 if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) in sched_process_csg_irq_locked()
1655 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in sched_process_csg_irq_locked()
1680 csg_slot_process_idle_event_locked(ptdev, csg_id); in sched_process_csg_irq_locked()
1683 csg_slot_process_progress_timer_event_locked(ptdev, csg_id); in sched_process_csg_irq_locked()
1689 if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id)) in sched_process_csg_irq_locked()
1696 csg_slot_sync_update_locked(ptdev, csg_id); in sched_process_csg_irq_locked()
1701 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); in sched_process_csg_irq_locked()
1704 static void sched_process_idle_event_locked(struct panthor_device *ptdev) in sched_process_idle_event_locked() argument
1706 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); in sched_process_idle_event_locked()
1708 lockdep_assert_held(&ptdev->scheduler->lock); in sched_process_idle_event_locked()
1712 sched_queue_delayed_work(ptdev->scheduler, tick, 0); in sched_process_idle_event_locked()
1719 static void sched_process_global_irq_locked(struct panthor_device *ptdev) in sched_process_global_irq_locked() argument
1721 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); in sched_process_global_irq_locked()
1724 lockdep_assert_held(&ptdev->scheduler->lock); in sched_process_global_irq_locked()
1731 sched_process_idle_event_locked(ptdev); in sched_process_global_irq_locked()
1739 struct panthor_device *ptdev = sched->ptdev; in process_fw_events_work() local
1744 sched_process_global_irq_locked(ptdev); in process_fw_events_work()
1751 sched_process_csg_irq_locked(ptdev, csg_id); in process_fw_events_work()
1761 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events) in panthor_sched_report_fw_events() argument
1763 if (!ptdev->scheduler) in panthor_sched_report_fw_events()
1766 atomic_or(events, &ptdev->scheduler->fw_events); in panthor_sched_report_fw_events()
1767 sched_queue_work(ptdev->scheduler, fw_events); in panthor_sched_report_fw_events()
1799 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev, in csgs_upd_ctx_queue_reqs() argument
1803 if (drm_WARN_ON(&ptdev->base, !mask) || in csgs_upd_ctx_queue_reqs()
1804 drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) in csgs_upd_ctx_queue_reqs()
1812 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev, in csgs_upd_ctx_apply_locked() argument
1815 struct panthor_scheduler *sched = ptdev->scheduler; in csgs_upd_ctx_apply_locked()
1828 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1834 panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask); in csgs_upd_ctx_apply_locked()
1844 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1846 ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100); in csgs_upd_ctx_apply_locked()
1849 csg_slot_sync_priority_locked(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1852 csg_slot_sync_state_locked(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1855 csg_slot_sync_queues_state_locked(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1856 csg_slot_sync_idle_state_locked(ptdev, csg_id); in csgs_upd_ctx_apply_locked()
1861 drm_err(&ptdev->base, "CSG %d update request timedout", csg_id); in csgs_upd_ctx_apply_locked()
1894 struct panthor_device *ptdev = group->ptdev; in group_is_idle() local
1898 return ptdev->scheduler->csg_slots[group->csg_id].idle; in group_is_idle()
2001 struct panthor_device *ptdev = sched->ptdev; in tick_ctx_init() local
2023 csg_iface = panthor_fw_get_csg_iface(ptdev, i); in tick_ctx_init()
2030 sched_process_csg_irq_locked(ptdev, i); in tick_ctx_init()
2038 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, in tick_ctx_init()
2043 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); in tick_ctx_init()
2045 panthor_device_schedule_reset(ptdev); in tick_ctx_init()
2058 if (drm_WARN_ON(&group->ptdev->base, group_can_run(group))) in group_term_post_processing()
2089 sched_queue_work(group->ptdev->scheduler, sync_upd); in group_term_post_processing()
2112 struct panthor_device *ptdev = sched->ptdev; in tick_ctx_cleanup() local
2121 drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask && in tick_ctx_cleanup()
2144 drm_WARN_ON(&ptdev->base, in tick_ctx_cleanup()
2165 struct panthor_device *ptdev = sched->ptdev; in tick_ctx_apply() local
2180 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) in tick_ctx_apply()
2184 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, in tick_ctx_apply()
2200 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in tick_ctx_apply()
2209 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, in tick_ctx_apply()
2216 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); in tick_ctx_apply()
2218 panthor_device_schedule_reset(ptdev); in tick_ctx_apply()
2231 sched_process_csg_irq_locked(ptdev, group->csg_id); in tick_ctx_apply()
2257 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) in tick_ctx_apply()
2260 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); in tick_ctx_apply()
2263 csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--); in tick_ctx_apply()
2264 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, in tick_ctx_apply()
2268 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, in tick_ctx_apply()
2275 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); in tick_ctx_apply()
2277 panthor_device_schedule_reset(ptdev); in tick_ctx_apply()
2331 if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT)) in tick_ctx_update_resched_target()
2357 struct panthor_device *ptdev = sched->ptdev; in tick_work() local
2363 if (!drm_dev_enter(&ptdev->base, &cookie)) in tick_work()
2366 ret = panthor_device_resume_and_get(ptdev); in tick_work()
2367 if (drm_WARN_ON(&ptdev->base, ret)) in tick_work()
2374 if (panthor_device_reset_is_pending(sched->ptdev)) in tick_work()
2423 panthor_devfreq_record_idle(sched->ptdev); in tick_work()
2425 pm_runtime_put_autosuspend(ptdev->base.dev); in tick_work()
2429 panthor_devfreq_record_busy(sched->ptdev); in tick_work()
2431 pm_runtime_get(ptdev->base.dev); in tick_work()
2449 pm_runtime_mark_last_busy(ptdev->base.dev); in tick_work()
2450 pm_runtime_put_autosuspend(ptdev->base.dev); in tick_work()
2503 drm_WARN_ON(&group->ptdev->base, ret < 0); in sync_upd_work()
2532 struct panthor_device *ptdev = group->ptdev; in group_schedule_locked() local
2533 struct panthor_scheduler *sched = ptdev->scheduler; in group_schedule_locked()
2610 struct panthor_scheduler *sched = group->ptdev->scheduler; in panthor_group_stop()
2623 struct panthor_scheduler *sched = group->ptdev->scheduler; in panthor_group_start()
2625 lockdep_assert_held(&group->ptdev->scheduler->reset.lock); in panthor_group_start()
2644 static void panthor_sched_immediate_tick(struct panthor_device *ptdev) in panthor_sched_immediate_tick() argument
2646 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_immediate_tick()
2654 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev) in panthor_sched_report_mmu_fault() argument
2657 if (ptdev->scheduler) in panthor_sched_report_mmu_fault()
2658 panthor_sched_immediate_tick(ptdev); in panthor_sched_report_mmu_fault()
2661 void panthor_sched_resume(struct panthor_device *ptdev) in panthor_sched_resume() argument
2664 panthor_sched_immediate_tick(ptdev); in panthor_sched_resume()
2667 void panthor_sched_suspend(struct panthor_device *ptdev) in panthor_sched_suspend() argument
2669 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_suspend()
2681 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, in panthor_sched_suspend()
2690 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); in panthor_sched_suspend()
2696 drm_err(&ptdev->base, "CSG suspend failed, escalating to termination"); in panthor_sched_suspend()
2713 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, in panthor_sched_suspend()
2719 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); in panthor_sched_suspend()
2743 if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0)) in panthor_sched_suspend()
2753 csg_slot_sync_update_locked(ptdev, csg_id); in panthor_sched_suspend()
2769 sched_process_csg_irq_locked(ptdev, group->csg_id); in panthor_sched_suspend()
2773 drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node)); in panthor_sched_suspend()
2790 void panthor_sched_pre_reset(struct panthor_device *ptdev) in panthor_sched_pre_reset() argument
2792 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_pre_reset()
2805 panthor_sched_suspend(ptdev); in panthor_sched_pre_reset()
2812 drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i])); in panthor_sched_pre_reset()
2825 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) in panthor_sched_post_reset() argument
2827 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_post_reset()
2989 struct panthor_device *ptdev = group->ptdev; in get_job_cs_params() local
2990 struct panthor_scheduler *sched = ptdev->scheduler; in get_job_cs_params()
2992 params->addr_reg = ptdev->csif_info.cs_reg_count - in get_job_cs_params()
2993 ptdev->csif_info.unpreserved_cs_reg_count; in get_job_cs_params()
3118 struct panthor_device *ptdev = group->ptdev; in queue_run_job() local
3119 struct panthor_scheduler *sched = ptdev->scheduler; in queue_run_job()
3134 ret = panthor_device_resume_and_get(ptdev); in queue_run_job()
3135 if (drm_WARN_ON(&ptdev->base, ret)) in queue_run_job()
3189 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1); in queue_run_job()
3192 pm_runtime_get(ptdev->base.dev); in queue_run_job()
3195 panthor_devfreq_record_busy(sched->ptdev); in queue_run_job()
3206 pm_runtime_mark_last_busy(ptdev->base.dev); in queue_run_job()
3207 pm_runtime_put_autosuspend(ptdev->base.dev); in queue_run_job()
3217 struct panthor_device *ptdev = group->ptdev; in queue_timedout_job() local
3218 struct panthor_scheduler *sched = ptdev->scheduler; in queue_timedout_job()
3221 drm_warn(&ptdev->base, "job timeout\n"); in queue_timedout_job()
3223 drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress)); in queue_timedout_job()
3230 sched_queue_delayed_work(ptdev->scheduler, tick, 0); in queue_timedout_job()
3259 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev, in calc_profiling_ringbuf_num_slots() argument
3292 .submit_wq = group->ptdev->scheduler->wq, in group_create_queue()
3302 .timeout_wq = group->ptdev->reset.wq, in group_create_queue()
3304 .dev = group->ptdev->base.dev, in group_create_queue()
3330 queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm, in group_create_queue()
3346 queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev, in group_create_queue()
3357 calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size); in group_create_queue()
3360 panthor_kernel_bo_create(group->ptdev, group->vm, in group_create_queue()
3392 static void add_group_kbo_sizes(struct panthor_device *ptdev, in add_group_kbo_sizes() argument
3398 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group))) in add_group_kbo_sizes()
3400 if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev)) in add_group_kbo_sizes()
3421 struct panthor_device *ptdev = pfile->ptdev; in panthor_group_create() local
3423 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_group_create()
3424 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); in panthor_group_create()
3435 if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) || in panthor_group_create()
3436 (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) || in panthor_group_create()
3437 (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present)) in panthor_group_create()
3454 group->ptdev = ptdev; in panthor_group_create()
3477 group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); in panthor_group_create()
3485 group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); in panthor_group_create()
3492 group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm, in panthor_group_create()
3540 add_group_kbo_sizes(group->ptdev, group); in panthor_group_create()
3553 struct panthor_device *ptdev = pfile->ptdev; in panthor_group_destroy() local
3554 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_group_destroy()
3602 struct panthor_device *ptdev = pfile->ptdev; in panthor_group_get_state() local
3603 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_group_get_state()
3691 drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node)); in job_release()
3799 job->profiling.mask = pfile->ptdev->profile_mask; in panthor_job_create()
3827 void panthor_sched_unplug(struct panthor_device *ptdev) in panthor_sched_unplug() argument
3829 struct panthor_scheduler *sched = ptdev->scheduler; in panthor_sched_unplug()
3835 pm_runtime_put(ptdev->base.dev); in panthor_sched_unplug()
3865 int panthor_sched_init(struct panthor_device *ptdev) in panthor_sched_init() argument
3867 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); in panthor_sched_init()
3868 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); in panthor_sched_init()
3869 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0); in panthor_sched_init()
3874 sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL); in panthor_sched_init()
3896 gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1)); in panthor_sched_init()
3898 drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)", in panthor_sched_init()
3903 sched->ptdev = ptdev; in panthor_sched_init()
3908 ptdev->csif_info.csg_slot_count = sched->csg_slot_count; in panthor_sched_init()
3909 ptdev->csif_info.cs_slot_count = sched->cs_slot_count; in panthor_sched_init()
3910 ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count; in panthor_sched_init()
3919 ret = drmm_mutex_init(&ptdev->base, &sched->lock); in panthor_sched_init()
3929 ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock); in panthor_sched_init()
3953 panthor_sched_fini(&ptdev->base, sched); in panthor_sched_init()
3954 drm_err(&ptdev->base, "Failed to allocate the workqueues"); in panthor_sched_init()
3958 ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched); in panthor_sched_init()
3962 ptdev->scheduler = sched; in panthor_sched_init()