| /drivers/gpu/drm/scheduler/ |
| A D | sched_main.c | 390 struct drm_sched_fence *s_fence = s_job->s_fence; in drm_sched_job_done() local 396 trace_drm_sched_job_done(s_fence); in drm_sched_job_done() 398 dma_fence_get(&s_fence->finished); in drm_sched_job_done() 649 if (s_job->s_fence->parent && in drm_sched_stop() 653 s_job->s_fence->parent = NULL; in drm_sched_stop() 763 struct drm_sched_fence *s_fence = s_job->s_fence; in drm_sched_resubmit_jobs() local 779 s_job->s_fence->parent = NULL; in drm_sched_resubmit_jobs() 848 if (!job->s_fence) in drm_sched_job_init() 1082 job->s_fence = NULL; in drm_sched_job_cleanup() 1244 struct drm_sched_fence *s_fence; in drm_sched_run_job_work() local [all …]
|
| A D | sched_entity.c | 181 WARN_ON(job->s_fence->parent); in drm_sched_entity_kill_jobs_work() 199 if (s_fence && f == &s_fence->scheduled) { in drm_sched_entity_kill_jobs_cb() 205 f = dma_fence_get_rcu(&s_fence->finished); in drm_sched_entity_kill_jobs_cb() 211 dma_fence_put(&s_fence->scheduled); in drm_sched_entity_kill_jobs_cb() 247 struct drm_sched_fence *s_fence = job->s_fence; in drm_sched_entity_kill() local 249 dma_fence_get(&s_fence->finished); in drm_sched_entity_kill() 261 prev = &s_fence->finished; in drm_sched_entity_kill() 398 struct drm_sched_fence *s_fence; in drm_sched_entity_add_dependency_cb() local 411 s_fence = to_drm_sched_fence(fence); in drm_sched_entity_add_dependency_cb() 412 if (!fence->error && s_fence && s_fence->sched == sched && in drm_sched_entity_add_dependency_cb() [all …]
|
| A D | gpu_scheduler_trace.h | 76 __entry->fence_context = sched_job->s_fence->finished.context; 77 __entry->fence_seqno = sched_job->s_fence->finished.seqno; 78 __entry->client_id = sched_job->s_fence->drm_client_id; 123 __entry->fence_context = sched_job->s_fence->finished.context; 124 __entry->fence_seqno = sched_job->s_fence->finished.seqno; 144 __entry->fence_context = sched_job->s_fence->finished.context; 145 __entry->fence_seqno = sched_job->s_fence->finished.seqno;
|
| A D | sched_fence.c | 51 static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence, in drm_sched_fence_set_parent() argument 59 smp_store_release(&s_fence->parent, dma_fence_get(fence)); in drm_sched_fence_set_parent() 61 &s_fence->finished.flags)) in drm_sched_fence_set_parent() 62 dma_fence_set_deadline(fence, s_fence->deadline); in drm_sched_fence_set_parent()
|
| /drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_job.c | 254 if (job->base.s_fence && job->base.s_fence->finished.ops) in amdgpu_job_free_resources() 255 f = &job->base.s_fence->finished; in amdgpu_job_free_resources() 317 f = dma_fence_get(&job->base.s_fence->finished); in amdgpu_job_submit() 374 dma_fence_set_error(&job->base.s_fence->finished, r); in amdgpu_job_prepare_job() 387 finished = &job->base.s_fence->finished; in amdgpu_job_run() 446 struct drm_sched_fence *s_fence = s_job->s_fence; in amdgpu_job_stop_all_jobs_on_sched() local 448 dma_fence_signal(&s_fence->scheduled); in amdgpu_job_stop_all_jobs_on_sched() 450 dma_fence_signal(&s_fence->finished); in amdgpu_job_stop_all_jobs_on_sched() 458 struct drm_sched_fence *s_fence = s_job->s_fence; in amdgpu_job_stop_all_jobs_on_sched() local 460 dma_fence_set_error(&s_fence->finished, -EHWPOISON); in amdgpu_job_stop_all_jobs_on_sched() [all …]
|
| A D | amdgpu_sync.c | 68 struct drm_sched_fence *s_fence = to_drm_sched_fence(f); in amdgpu_sync_same_dev() local 70 if (s_fence) { in amdgpu_sync_same_dev() 89 struct drm_sched_fence *s_fence; in amdgpu_sync_get_owner() local 95 s_fence = to_drm_sched_fence(f); in amdgpu_sync_get_owner() 96 if (s_fence) in amdgpu_sync_get_owner() 97 return s_fence->owner; in amdgpu_sync_get_owner() 325 struct drm_sched_fence *s_fence = to_drm_sched_fence(f); in amdgpu_sync_peek_fence() local 331 if (ring && s_fence) { in amdgpu_sync_peek_fence() 335 if (s_fence->sched == &ring->sched) { in amdgpu_sync_peek_fence() 336 if (dma_fence_is_signaled(&s_fence->scheduled)) in amdgpu_sync_peek_fence() [all …]
|
| A D | amdgpu_ib.c | 151 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule() 152 job->base.s_fence->scheduled.context : 0; in amdgpu_ib_schedule() 162 af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; in amdgpu_ib_schedule()
|
| A D | amdgpu_trace.h | 36 job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) 180 __entry->context = job->base.s_fence->finished.context; 181 __entry->seqno = job->base.s_fence->finished.seqno; 203 __entry->context = job->base.s_fence->finished.context; 204 __entry->seqno = job->base.s_fence->finished.seqno;
|
| A D | amdgpu_ctx.c | 172 struct drm_sched_fence *s_fence; in amdgpu_ctx_fence_time() local 178 s_fence = to_drm_sched_fence(fence); in amdgpu_ctx_fence_time() 179 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags)) in amdgpu_ctx_fence_time() 183 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags)) in amdgpu_ctx_fence_time() 184 return ktime_sub(ktime_get(), s_fence->scheduled.timestamp); in amdgpu_ctx_fence_time() 186 return ktime_sub(s_fence->finished.timestamp, in amdgpu_ctx_fence_time() 187 s_fence->scheduled.timestamp); in amdgpu_ctx_fence_time()
|
| A D | amdgpu_cs.c | 446 struct drm_sched_fence *s_fence; in amdgpu_cs_p2_dependencies() local 449 s_fence = to_drm_sched_fence(fence); in amdgpu_cs_p2_dependencies() 450 fence = dma_fence_get(&s_fence->scheduled); in amdgpu_cs_p2_dependencies() 1267 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); in amdgpu_cs_sync_rings() local 1275 if (!s_fence || s_fence->sched != sched) { in amdgpu_cs_sync_rings() 1327 fence = &p->jobs[i]->base.s_fence->scheduled; in amdgpu_cs_submit() 1362 p->fence = dma_fence_get(&leader->base.s_fence->finished); in amdgpu_cs_submit() 1373 &p->jobs[i]->base.s_fence->finished, in amdgpu_cs_submit()
|
| A D | amdgpu_ids.c | 316 r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished, in amdgpu_vmid_grab_reserved() 376 &job->base.s_fence->finished, in amdgpu_vmid_grab_used() 429 &job->base.s_fence->finished, in amdgpu_vmid_grab()
|
| /drivers/gpu/drm/xe/ |
| A D | xe_gpu_scheduler.h | 57 struct drm_sched_fence *s_fence = s_job->s_fence; in xe_sched_resubmit_jobs() local 58 struct dma_fence *hw_fence = s_fence->parent; in xe_sched_resubmit_jobs()
|
| A D | xe_exec.c | 312 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, in xe_exec_ioctl() 317 xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished); in xe_exec_ioctl() 324 xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished); in xe_exec_ioctl()
|
| A D | xe_pxp_submit.c | 305 fence = dma_fence_get(&job->drm.s_fence->finished); in xe_pxp_submit_session_termination() 368 fence = dma_fence_get(&job->drm.s_fence->finished); in pxp_pkt_submit()
|
| A D | xe_gsc_submit.c | 208 fence = dma_fence_get(&job->drm.s_fence->finished); in xe_gsc_pkt_submit_kernel()
|
| /drivers/gpu/drm/lima/ |
| A D | lima_trace.h | 23 __entry->context = task->base.s_fence->finished.context; 24 __entry->seqno = task->base.s_fence->finished.seqno;
|
| A D | lima_sched.c | 175 struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished); in lima_sched_context_queue_task() 213 if (job->s_fence->finished.error < 0) in lima_sched_run_job()
|
| /drivers/gpu/drm/etnaviv/ |
| A D | etnaviv_sched.c | 26 if (likely(!sched_job->s_fence->finished.error)) in etnaviv_sched_run_job() 121 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished); in etnaviv_sched_push_job()
|
| /drivers/gpu/drm/imagination/ |
| A D | pvr_queue.c | 487 if (f == &job->base.s_fence->scheduled) in pvr_queue_get_paired_frag_job_dep() 635 &job->paired_job->base.s_fence->scheduled == fence) in pvr_queue_submit_job_to_cccb() 785 WARN_ON(job->base.s_fence->parent); in pvr_queue_start() 786 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_start() 837 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_timedout_job() 1135 return &job->base.s_fence->finished; in pvr_queue_job_arm() 1150 if (job->base.s_fence) in pvr_queue_job_cleanup() 1170 queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled); in pvr_queue_job_push()
|
| A D | pvr_sync.c | 231 struct drm_sched_fence *s_fence = to_drm_sched_fence(uf); in pvr_sync_add_dep_to_job() local 237 dma_fence_get(&s_fence->scheduled)); in pvr_sync_add_dep_to_job()
|
| A D | pvr_job.c | 586 dma_resv_add_fence(obj->resv, &job->base.s_fence->finished, usage); in update_job_resvs() 612 &geom_job->base.s_fence->scheduled); in can_combine_jobs() 627 return dma_fence_get(&job->base.s_fence->scheduled); in get_last_queued_job_scheduled_fence()
|
| /drivers/gpu/drm/scheduler/tests/ |
| A D | sched_tests.h | 220 return dma_fence_wait_timeout(&job->base.s_fence->scheduled, in drm_mock_sched_job_wait_scheduled()
|
| A D | tests_basic.c | 142 dma_fence_get(&prev->base.s_fence->finished)); in drm_sched_basic_test() 180 dma_fence_get(&prev->base.s_fence->finished)); in drm_sched_basic_entity_cleanup()
|
| /drivers/gpu/drm/v3d/ |
| A D | v3d_sched.c | 233 if (unlikely(job->base.base.s_fence->finished.error)) { in v3d_bin_job_run() 292 if (unlikely(job->base.base.s_fence->finished.error)) { in v3d_render_job_run() 340 if (unlikely(job->base.base.s_fence->finished.error)) { in v3d_tfu_job_run() 388 if (unlikely(job->base.base.s_fence->finished.error)) { in v3d_csd_job_run()
|
| /drivers/gpu/drm/msm/ |
| A D | msm_gem_submit.c | 97 if (submit->base.s_fence) in __msm_gem_submit_destroy() 735 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished); in msm_ioctl_gem_submit()
|