Home
last modified time | relevance | path

Searched refs:sched (Results 1 – 25 of 180) sorted by relevance

12345678

/drivers/gpu/drm/scheduler/
A Dsched_main.c189 rq->sched = sched; in drm_sched_rq_init()
348 queue_work(sched->submit_wq, &sched->work_run_job); in drm_sched_run_job_queue()
358 queue_work(sched->submit_wq, &sched->work_free_job); in __drm_sched_run_free_queue()
391 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_job_done() local
429 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); in drm_sched_start_timeout()
523 struct drm_gpu_scheduler *sched = s_job->sched; in drm_sched_job_begin() local
633 if (bad && bad->sched == sched) in drm_sched_stop()
883 sched = entity->rq->sched; in drm_sched_job_arm()
885 job->sched = sched; in drm_sched_job_arm()
1367 drm_sched_rq_init(sched, sched->sched_rq[i]); in drm_sched_init()
[all …]
A Dsched_entity.c182 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_work()
280 struct drm_gpu_scheduler *sched; in drm_sched_entity_flush() local
287 sched = entity->rq->sched; in drm_sched_entity_flush()
295 sched->job_scheduled, in drm_sched_entity_flush()
396 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_entity_add_dependency_cb() local
412 if (!fence->error && s_fence && s_fence->sched == sched && in drm_sched_entity_add_dependency_cb()
448 if (job->sched->ops->prepare_job) in drm_sched_job_dependency()
520 struct drm_gpu_scheduler *sched; in drm_sched_entity_select_rq() local
547 rq = sched ? sched->sched_rq[entity->priority] : NULL; in drm_sched_entity_select_rq()
608 sched = rq->sched; in drm_sched_entity_push_job()
[all …]
A Dsched_fence.c95 return (const char *)fence->sched->name; in drm_sched_fence_get_timeline_name()
118 if (!WARN_ON_ONCE(fence->sched)) in drm_sched_fence_free()
230 fence->sched = entity->rq->sched; in drm_sched_fence_init()
/drivers/gpu/drm/xe/
A Dxe_gpu_scheduler.c11 queue_work(sched->base.submit_wq, &sched->work_process_msg); in xe_sched_process_msg_queue()
18 xe_sched_msg_lock(sched); in xe_sched_process_msg_queue_if_ready()
22 xe_sched_msg_unlock(sched); in xe_sched_process_msg_queue_if_ready()
30 xe_sched_msg_lock(sched); in xe_sched_get_msg()
35 xe_sched_msg_unlock(sched); in xe_sched_get_msg()
49 msg = xe_sched_get_msg(sched); in xe_sched_process_msg_work()
79 sched->ops = xe_ops; in xe_sched_init()
95 queue_work(sched->base.submit_wq, &sched->work_process_msg); in xe_sched_submission_start()
106 drm_sched_resume_timeout(&sched->base, sched->base.timeout); in xe_sched_submission_resume_tdr()
112 xe_sched_msg_lock(sched); in xe_sched_add_msg()
[all …]
A Dxe_gpu_scheduler.h12 int xe_sched_init(struct xe_gpu_scheduler *sched,
34 spin_lock(&sched->base.job_list_lock); in xe_sched_msg_lock()
39 spin_unlock(&sched->base.job_list_lock); in xe_sched_msg_unlock()
44 drm_sched_stop(&sched->base, NULL); in xe_sched_stop()
49 drm_sched_tdr_queue_imm(&sched->base); in xe_sched_tdr_queue_imm()
61 sched->base.ops->run_job(s_job); in xe_sched_resubmit_jobs()
74 spin_lock(&sched->base.job_list_lock); in xe_sched_add_pending_job()
76 spin_unlock(&sched->base.job_list_lock); in xe_sched_add_pending_job()
84 spin_lock(&sched->base.job_list_lock); in xe_sched_first_pending_job()
87 spin_unlock(&sched->base.job_list_lock); in xe_sched_first_pending_job()
[all …]
A Dxe_guc_submit.c814 struct xe_gpu_scheduler *sched = &q->guc->sched; in disable_scheduling_deregister() local
910 struct xe_gpu_scheduler *sched = &ge->sched; in xe_guc_exec_queue_lr_cleanup() local
1080 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_timedout_job() local
1500 sched = &ge->sched; in guc_exec_queue_init()
1517 xe_sched_stop(sched); in guc_exec_queue_init()
1645 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_suspend() local
1651 xe_sched_msg_lock(sched); in guc_exec_queue_suspend()
1688 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_resume() local
1725 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_stop() local
1831 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_start() local
[all …]
/drivers/gpu/drm/scheduler/tests/
A Dmock_scheduler.c34 drm_sched = &sched->base; in drm_mock_sched_entity_new()
172 &sched->lock, in mock_sched_run_job()
178 spin_lock_irq(&sched->lock); in mock_sched_run_job()
198 spin_unlock_irq(&sched->lock); in mock_sched_run_job()
217 struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched); in mock_sched_timedout_job() local
254 struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched); in mock_sched_cancel_job() local
302 sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL); in drm_mock_sched_new()
308 sched->test = test; in drm_mock_sched_new()
312 spin_lock_init(&sched->lock); in drm_mock_sched_new()
314 return sched; in drm_mock_sched_new()
[all …]
A Dtests_basic.c28 drm_mock_sched_fini(sched); in drm_sched_basic_exit()
53 sched); in drm_sched_basic_submit()
134 sched); in drm_sched_basic_test()
172 sched); in drm_sched_basic_entity_cleanup()
223 sched); in drm_sched_basic_cancel()
233 drm_mock_sched_fini(sched); in drm_sched_basic_cancel()
265 sched); in drm_sched_basic_timeout()
306 sched); in drm_sched_skip_reset()
462 sched[i % ARRAY_SIZE(sched)]); in drm_sched_test_modify_sched()
518 sched); in drm_sched_test_credits()
[all …]
A Dsched_tests.h115 drm_sched_to_mock_sched(struct drm_gpu_scheduler *sched) in drm_sched_to_mock_sched() argument
117 return container_of(sched, struct drm_mock_scheduler, base); in drm_sched_to_mock_sched()
134 void drm_mock_sched_fini(struct drm_mock_scheduler *sched);
135 unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
141 struct drm_mock_scheduler *sched);
/drivers/gpu/drm/panthor/
A Dpanthor_sched.c741 queue_work((sched)->wq, &(sched)->wname ## _work); \
2339 u64 resched_target = sched->last_tick + sched->tick_period; in tick_ctx_update_resched_target()
2341 if (time_before64(sched->resched_target, sched->last_tick) || in tick_ctx_update_resched_target()
2345 return sched->resched_target - sched->last_tick; in tick_ctx_update_resched_target()
2573 if (sched->used_csg_slot_count < sched->csg_slot_count) in group_schedule_locked()
2583 sched->resched_target = sched->last_tick + sched->tick_period; in group_schedule_locked()
2584 if (sched->used_csg_slot_count == sched->csg_slot_count && in group_schedule_locked()
3846 if (!sched || !sched->csg_slot_count) in panthor_sched_fini()
3851 if (sched->wq) in panthor_sched_fini()
3875 if (!sched) in panthor_sched_init()
[all …]
/drivers/gpu/drm/nouveau/
A Dnouveau_sched.c30 struct nouveau_sched *sched = args->sched; in nouveau_job_init() local
37 job->sched = sched; in nouveau_job_init()
123 struct nouveau_sched *sched = job->sched; in nouveau_job_done() local
276 struct nouveau_sched *sched = job->sched; in nouveau_job_submit() local
371 struct drm_gpu_scheduler *sched = sched_job->sched; in nouveau_sched_timedout_job() local
469 if (sched->wq) in nouveau_sched_init()
481 sched = kzalloc(sizeof(*sched), GFP_KERNEL); in nouveau_sched_create()
482 if (!sched) in nouveau_sched_create()
487 kfree(sched); in nouveau_sched_create()
508 if (sched->wq) in nouveau_sched_fini()
[all …]
/drivers/slimbus/
A Dsched.c29 struct slim_sched *sched = &ctrl->sched; in slim_ctrl_clk_pause() local
38 mutex_lock(&sched->m_reconf); in slim_ctrl_clk_pause()
41 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
52 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
67 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
74 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
83 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
112 sched->clk_state = SLIM_CLK_ACTIVE; in slim_ctrl_clk_pause()
114 sched->clk_state = SLIM_CLK_PAUSED; in slim_ctrl_clk_pause()
115 complete(&sched->pause_comp); in slim_ctrl_clk_pause()
[all …]
/drivers/net/wireless/ath/ath9k/
A Dchannel.c446 if (sc->sched.extend_absence) in ath_chanctx_set_periodic_noa()
455 sc->sched.extend_absence) in ath_chanctx_set_periodic_noa()
530 if (sc->sched.offchannel_pending && !sc->sched.wait_switch) { in ath_chanctx_event()
553 if (sc->sched.mgd_prepare_tx) in ath_chanctx_event()
660 sc->sched.beacon_pending) in ath_chanctx_event()
661 sc->sched.beacon_miss++; in ath_chanctx_event()
675 sc->sched.beacon_miss = 0; in ath_chanctx_event()
727 sc->sched.beacon_miss = 0; in ath_chanctx_event()
775 if (sc->sched.beacon_pending) in ath_chanctx_beacon_sent_ev()
809 sc->sched.wait_switch = true; in ath_chanctx_switch()
[all …]
/drivers/gpu/drm/amd/amdgpu/
A Damdgpu_job.c90 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); in amdgpu_job_timedout()
99 __func__, s_job->sched->name); in amdgpu_job_timedout()
118 s_job->sched->name); in amdgpu_job_timedout()
123 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
139 s_job->sched->name); in amdgpu_job_timedout()
144 ring->sched.name); in amdgpu_job_timedout()
173 drm_sched_suspend_timeout(&ring->sched); in amdgpu_job_timedout()
329 job->base.sched = &ring->sched; in amdgpu_job_submit_direct()
380 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); in amdgpu_job_run()
442 struct drm_sched_rq *rq = sched->sched_rq[i]; in amdgpu_job_stop_all_jobs_on_sched()
[all …]
A Damdgpu_sdma.c381 ring->sched.ready = true; in amdgpu_debugfs_sdma_sched_mask_set()
383 ring->sched.ready = false; in amdgpu_debugfs_sdma_sched_mask_set()
387 page->sched.ready = true; in amdgpu_debugfs_sdma_sched_mask_set()
389 page->sched.ready = false; in amdgpu_debugfs_sdma_sched_mask_set()
420 if (ring->sched.ready) in amdgpu_debugfs_sdma_sched_mask_get()
426 if (page->sched.ready) in amdgpu_debugfs_sdma_sched_mask_get()
568 drm_sched_wqueue_stop(&gfx_ring->sched); in amdgpu_sdma_reset_engine()
571 drm_sched_wqueue_stop(&page_ring->sched); in amdgpu_sdma_reset_engine()
601 drm_sched_wqueue_start(&gfx_ring->sched); in amdgpu_sdma_reset_engine()
604 drm_sched_wqueue_start(&page_ring->sched); in amdgpu_sdma_reset_engine()
/drivers/net/wireless/mediatek/mt76/
A Dsdio_txrx.c48 sdio->sched.pse_mcu_quota += pse_mcu_quota; in mt76s_refill_sched_quota()
51 sdio->sched.pse_mcu_quota = sdio->pse_mcu_quota_max; in mt76s_refill_sched_quota()
53 sdio->sched.pse_data_quota += pse_data_quota; in mt76s_refill_sched_quota()
54 sdio->sched.ple_data_quota += ple_data_quota; in mt76s_refill_sched_quota()
193 pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, in mt76s_tx_pick_quota()
194 sdio->sched.pse_page_size); in mt76s_tx_pick_quota()
200 if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz) in mt76s_tx_pick_quota()
204 sdio->sched.ple_data_quota < *ple_size + 1) in mt76s_tx_pick_quota()
219 sdio->sched.pse_mcu_quota -= pse_size; in mt76s_tx_update_quota()
221 sdio->sched.pse_data_quota -= pse_size; in mt76s_tx_update_quota()
[all …]
/drivers/gpu/drm/etnaviv/
A Detnaviv_sched.c77 drm_sched_stop(&gpu->sched, sched_job); in etnaviv_sched_timedout_job()
86 drm_sched_resubmit_jobs(&gpu->sched); in etnaviv_sched_timedout_job()
88 drm_sched_start(&gpu->sched, 0); in etnaviv_sched_timedout_job()
153 return drm_sched_init(&gpu->sched, &args); in etnaviv_sched_init()
158 drm_sched_fini(&gpu->sched); in etnaviv_sched_fini()
/drivers/gpu/drm/i915/
A Di915_request.c412 i915_sched_node_fini(&rq->sched); in i915_request_retire()
657 if (request->sched.semaphores && in __i915_request_submit()
737 request->sched.semaphores = 0; in __i915_request_unsubmit()
877 i915_sched_node_init(&rq->sched); in __i915_request_ctor()
970 i915_sched_node_reinit(&rq->sched); in __i915_request_create()
1239 to->sched.semaphores |= mask; in emit_semaphore_wait()
1324 &from->sched, in __i915_request_await_execution()
1482 &from->sched, in i915_request_await_request()
1651 &prev->sched, in __i915_request_ensure_parallel_ordering()
1706 &prev->sched, in __i915_request_ensure_ordering()
[all …]
/drivers/net/wireless/mediatek/mt76/mt7615/
A Dsdio_mcu.c24 sdio->sched.pse_data_quota = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, in mt7663s_mcu_init_sched()
26 sdio->sched.pse_mcu_quota = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, in mt7663s_mcu_init_sched()
28 sdio->sched.ple_data_quota = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, in mt7663s_mcu_init_sched()
30 sdio->sched.pse_page_size = MT_PSE_PAGE_SZ; in mt7663s_mcu_init_sched()
33 sdio->sched.deficit = txdwcnt << 2; in mt7663s_mcu_init_sched()
/drivers/gpu/drm/msm/
A Dmsm_submitqueue.c147 struct drm_gpu_scheduler *sched = &ring->sched; in get_sched_entity() local
152 ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL); in get_sched_entity()
216 struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched; in msm_submitqueue_create() local
221 &sched, 1, NULL); in msm_submitqueue_create()
/drivers/gpu/drm/msm/adreno/
A Dadreno_device.c334 struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; in suspend_scheduler() local
336 drm_sched_wqueue_stop(sched); in suspend_scheduler()
345 struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; in resume_scheduler() local
347 drm_sched_wqueue_start(sched); in resume_scheduler()
/drivers/gpu/drm/imagination/
A Dpvr_queue.c755 struct pvr_queue *queue = container_of(job->base.sched, in pvr_queue_run_job()
811 struct drm_gpu_scheduler *sched = s_job->sched; in pvr_queue_timedout_job() local
812 struct pvr_queue *queue = container_of(sched, struct pvr_queue, scheduler); in pvr_queue_timedout_job()
817 dev_err(sched->dev, "Job timeout\n"); in pvr_queue_timedout_job()
833 drm_sched_stop(sched, s_job); in pvr_queue_timedout_job()
836 list_for_each_entry(job, &sched->pending_list, base.list) { in pvr_queue_timedout_job()
855 drm_sched_start(sched, 0); in pvr_queue_timedout_job()
899 sched_fence->sched->ops == &pvr_queue_sched_ops) in pvr_queue_fence_is_ufo_backed()
1239 struct drm_gpu_scheduler *sched; in pvr_queue_create() local
1277 sched = &queue->scheduler; in pvr_queue_create()
[all …]
/drivers/gpu/drm/panfrost/
A Dpanfrost_job.c31 struct drm_gpu_scheduler sched; member
481 drm_sched_fault(&pfdev->js->queue[js].sched); in panfrost_job_handle_err()
661 drm_sched_stop(&pfdev->js->queue[i].sched, bad); in panfrost_reset()
736 drm_sched_start(&pfdev->js->queue[i].sched, 0); in panfrost_reset()
886 ret = drm_sched_init(&js->queue[j].sched, &args); in panfrost_job_init()
899 drm_sched_fini(&js->queue[j].sched); in panfrost_job_init()
913 drm_sched_fini(&js->queue[j].sched); in panfrost_job_fini()
924 struct drm_gpu_scheduler *sched; in panfrost_job_open() local
928 sched = &js->queue[i].sched; in panfrost_job_open()
930 DRM_SCHED_PRIORITY_NORMAL, &sched, in panfrost_job_open()
[all …]
/drivers/gpu/drm/i915/gt/
A Dintel_engine_heartbeat.c41 if (rq && rq->sched.attr.priority >= I915_PRIORITY_BARRIER && in next_heartbeat()
112 rq->sched.attr.priority); in show_heartbeat()
185 rq->sched.attr.priority < I915_PRIORITY_BARRIER) { in heartbeat()
193 if (rq->sched.attr.priority >= attr.priority) in heartbeat()
195 if (rq->sched.attr.priority >= attr.priority) in heartbeat()
292 GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER); in __intel_engine_pulse()
/drivers/gpu/drm/lima/
A Dlima_sched.c108 static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched) in to_lima_pipe() argument
110 return container_of(sched, struct lima_sched_pipe, base); in to_lima_pipe()
161 struct drm_gpu_scheduler *sched = &pipe->base; in lima_sched_context_init() local
164 &sched, 1, NULL); in lima_sched_context_init()
207 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_run_job()
274 struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched); in lima_sched_build_error_task_list()
403 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_timedout_job()
476 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_free_job()

Completed in 911 milliseconds

12345678