Lines Matching refs:submit

258 static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit)  in crashstate_get_bos()  argument
262 if (msm_context_is_vmbind(submit->queue->ctx)) { in crashstate_get_bos()
271 drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm)); in crashstate_get_bos()
274 drm_gpuvm_for_each_va (vma, submit->vm) { in crashstate_get_bos()
285 drm_gpuvm_for_each_va (vma, submit->vm) in crashstate_get_bos()
290 drm_gpuvm_for_each_va (vma, submit->vm) { in crashstate_get_bos()
303 state->bos = kcalloc(submit->nr_bos, in crashstate_get_bos()
306 for (int i = 0; state->bos && i < submit->nr_bos; i++) { in crashstate_get_bos()
307 struct drm_gem_object *obj = submit->bos[i].obj;; in crashstate_get_bos()
308 bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP); in crashstate_get_bos()
311 msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova, in crashstate_get_bos()
361 struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info, in msm_gpu_crashstate_capture() argument
384 if (submit && state->fault_info.ttbr0) { in msm_gpu_crashstate_capture()
386 struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu; in msm_gpu_crashstate_capture()
393 if (submit) { in msm_gpu_crashstate_capture()
394 crashstate_get_vm_logs(state, to_msm_vm(submit->vm)); in msm_gpu_crashstate_capture()
395 crashstate_get_bos(state, submit); in msm_gpu_crashstate_capture()
406 struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info, in msm_gpu_crashstate_capture() argument
419 struct msm_gem_submit *submit; in find_submit() local
423 list_for_each_entry(submit, &ring->submits, node) { in find_submit()
424 if (submit->seqno == fence) { in find_submit()
426 return submit; in find_submit()
436 static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd) in get_comm_cmdline() argument
438 struct msm_context *ctx = submit->queue->ctx; in get_comm_cmdline()
441 WARN_ON(!mutex_is_locked(&submit->gpu->lock)); in get_comm_cmdline()
447 task = get_pid_task(submit->pid, PIDTYPE_PID); in get_comm_cmdline()
465 struct msm_gem_submit *submit; in recover_worker() local
474 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); in recover_worker()
480 if (!submit) in recover_worker()
484 submit->queue->faults++; in recover_worker()
485 if (submit->vm) { in recover_worker()
486 struct msm_gem_vm *vm = to_msm_vm(submit->vm); in recover_worker()
497 msm_gem_vm_unusable(submit->vm); in recover_worker()
500 get_comm_cmdline(submit, &comm, &cmd); in recover_worker()
506 msm_rd_dump_submit(priv->hangrd, submit, in recover_worker()
511 msm_rd_dump_submit(priv->hangrd, submit, NULL); in recover_worker()
516 msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd); in recover_worker()
556 list_for_each_entry(submit, &ring->submits, node) in recover_worker()
557 gpu->funcs->submit(gpu, submit); in recover_worker()
572 struct msm_gem_submit *submit; in msm_gpu_fault_crashstate_capture() local
578 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); in msm_gpu_fault_crashstate_capture()
579 if (submit && submit->fault_dumped) in msm_gpu_fault_crashstate_capture()
582 if (submit) { in msm_gpu_fault_crashstate_capture()
583 get_comm_cmdline(submit, &comm, &cmd); in msm_gpu_fault_crashstate_capture()
589 submit->fault_dumped = true; in msm_gpu_fault_crashstate_capture()
594 msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd); in msm_gpu_fault_crashstate_capture()
763 struct msm_gem_submit *submit) in retire_submit() argument
765 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in retire_submit()
783 submit->queue->ctx->elapsed_ns += elapsed; in retire_submit()
784 submit->queue->ctx->cycles += cycles; in retire_submit()
786 trace_msm_gpu_submit_retired(submit, elapsed, clock, in retire_submit()
789 msm_submit_retire(submit); in retire_submit()
794 list_del(&submit->node); in retire_submit()
808 msm_gem_submit_put(submit); in retire_submit()
820 struct msm_gem_submit *submit = NULL; in retire_submits() local
824 submit = list_first_entry_or_null(&ring->submits, in retire_submits()
833 if (submit && dma_fence_is_signaled(submit->hw_fence)) { in retire_submits()
834 retire_submit(gpu, ring, submit); in retire_submits()
864 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in msm_gpu_submit() argument
866 struct msm_ringbuffer *ring = submit->ring; in msm_gpu_submit()
875 submit->seqno = submit->hw_fence->seqno; in msm_gpu_submit()
883 msm_gem_submit_get(submit); in msm_gpu_submit()
886 list_add_tail(&submit->node, &ring->submits); in msm_gpu_submit()
898 gpu->funcs->submit(gpu, submit); in msm_gpu_submit()
899 submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno; in msm_gpu_submit()