| /drivers/gpu/drm/msm/ |
| A D | msm_gem_submit.c | 49 if (!submit) in submit_create() 55 kfree(submit); in submit_create() 71 submit->cmd = (void *)&submit->bos[nr_bos]; in submit_create() 82 return submit; in submit_create() 102 idr_remove(&submit->queue->fence_idr, submit->fence_id); in __msm_gem_submit_destroy() 284 drm_exec_init(&submit->exec, flags, submit->nr_bos); in submit_lock_objects() 301 drm_exec_init(&submit->exec, flags, submit->nr_bos); in submit_lock_objects() 408 drm_gpuvm_resv_add_fence(submit->vm, &submit->exec, in submit_attach_object_fences() 708 submit->cmd[i].nr_relocs, submit->cmd[i].relocs); in msm_ioctl_gem_submit() 745 &submit->fence_id, submit->fence_id, in msm_ioctl_gem_submit() [all …]
|
| A D | msm_gpu.c | 393 if (submit) { in msm_gpu_crashstate_capture() 426 return submit; in find_submit() 480 if (!submit) in recover_worker() 485 if (submit->vm) { in recover_worker() 557 gpu->funcs->submit(gpu, submit); in recover_worker() 579 if (submit && submit->fault_dumped) in msm_gpu_fault_crashstate_capture() 582 if (submit) { in msm_gpu_fault_crashstate_capture() 833 if (submit && dma_fence_is_signaled(submit->hw_fence)) { in retire_submits() 875 submit->seqno = submit->hw_fence->seqno; in msm_gpu_submit() 898 gpu->funcs->submit(gpu, submit); in msm_gpu_submit() [all …]
|
| A D | msm_ringbuffer.c | 18 struct msm_gpu *gpu = submit->gpu; in msm_job_run() 20 unsigned nr_cmds = submit->nr_cmds; in msm_job_run() 23 msm_fence_init(submit->hw_fence, fctx); in msm_job_run() 27 for (i = 0; i < submit->nr_bos; i++) { in msm_job_run() 33 submit->bos_pinned = false; in msm_job_run() 40 if (submit->queue->ctx->closed) in msm_job_run() 41 submit->nr_cmds = 0; in msm_job_run() 43 msm_gpu_submit(gpu, submit); in msm_job_run() 45 submit->nr_cmds = nr_cmds; in msm_job_run() 49 return dma_fence_get(submit->hw_fence); in msm_job_run() [all …]
|
| A D | msm_rd.c | 362 task = pid_task(submit->pid, PIDTYPE_PID); in msm_rd_dump_submit() 366 pid_nr(submit->pid), submit->seqno); in msm_rd_dump_submit() 369 pid_nr(submit->pid), submit->seqno); in msm_rd_dump_submit() 378 drm_gpuvm_resv_assert_held(submit->vm); in msm_rd_dump_submit() 392 for (i = 0; i < submit->nr_bos; i++) { in msm_rd_dump_submit() 399 for (i = 0; i < submit->nr_cmds; i++) { in msm_rd_dump_submit() 401 int idx = submit->cmd[i].idx; in msm_rd_dump_submit() 407 size_t offset = submit->cmd[i].iova - submit->bos[idx].iova; in msm_rd_dump_submit() 415 for (i = 0; i < submit->nr_cmds; i++) { in msm_rd_dump_submit() 416 uint64_t iova = submit->cmd[i].iova; in msm_rd_dump_submit() [all …]
|
| A D | msm_gpu_trace.h | 34 TP_PROTO(struct msm_gem_submit *submit, u64 ticks), 35 TP_ARGS(submit, ticks), 44 __entry->pid = pid_nr(submit->pid); 45 __entry->id = submit->ident; 46 __entry->ringid = submit->ring->id; 47 __entry->seqno = submit->seqno; 59 TP_ARGS(submit, elapsed, clock, start, end), 71 __entry->pid = pid_nr(submit->pid); 72 __entry->id = submit->ident; 73 __entry->ringid = submit->ring->id; [all …]
|
| A D | msm_gem.h | 489 static inline void msm_gem_submit_get(struct msm_gem_submit *submit) in msm_gem_submit_get() argument 491 kref_get(&submit->ref); in msm_gem_submit_get() 494 static inline void msm_gem_submit_put(struct msm_gem_submit *submit) in msm_gem_submit_put() argument 496 kref_put(&submit->ref, __msm_gem_submit_destroy); in msm_gem_submit_put() 499 void msm_submit_retire(struct msm_gem_submit *submit);
|
| /drivers/gpu/drm/virtio/ |
| A D | virtgpu_submit.c | 51 u64 context = submit->fence_ctx + submit->ring_idx; in virtio_gpu_do_fence_wait() 331 virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs); in virtio_gpu_cleanup_submit() 332 virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs); in virtio_gpu_cleanup_submit() 333 virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs); in virtio_gpu_cleanup_submit() 338 if (submit->buflist) in virtio_gpu_cleanup_submit() 353 virtio_gpu_cmd_submit(submit->vgdev, submit->buf, submit->exbuf->size, in virtio_gpu_submit() 354 submit->vfpriv->ctx_id, submit->buflist, in virtio_gpu_submit() 361 submit->buf = NULL; in virtio_gpu_complete_submit() 379 memset(submit, 0, sizeof(*submit)); in virtio_gpu_init_submit() 460 submit->exbuf->fence_fd = submit->out_fence_fd; in virtio_gpu_install_out_fence_fd() [all …]
|
| /drivers/gpu/drm/etnaviv/ |
| A D | etnaviv_gem_submit.c | 34 size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit)); in submit_create() 37 if (!submit) in submit_create() 42 if (!submit->pmrs) { in submit_create() 43 kfree(submit); in submit_create() 48 submit->gpu = gpu; in submit_create() 51 return submit; in submit_create() 104 submit->nr_bos = i; in submit_lookup_objects() 396 xa_erase(&submit->gpu->user_fences, submit->out_fence_id); in submit_cleanup() 403 kfree(submit); in submit_cleanup() 518 if (!submit) { in etnaviv_ioctl_gem_submit() [all …]
|
| A D | etnaviv_sched.c | 27 fence = etnaviv_gpu_submit(submit); in etnaviv_sched_run_job() 38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() 56 if (submit->exec_state == ETNA_PIPE_3D) { in etnaviv_sched_timedout_job() 83 etnaviv_core_dump(submit); in etnaviv_sched_timedout_job() 84 etnaviv_gpu_recover_hang(submit); in etnaviv_sched_timedout_job() 98 etnaviv_submit_put(submit); in etnaviv_sched_free_job() 109 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_push_job() 119 drm_sched_job_arm(&submit->sched_job); in etnaviv_sched_push_job() 121 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished); in etnaviv_sched_push_job() 123 submit->out_fence, xa_limit_32b, in etnaviv_sched_push_job() [all …]
|
| A D | etnaviv_dump.c | 120 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_core_dump() 133 mutex_lock(&submit->mmu_context->lock); in etnaviv_core_dump() 145 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump() 146 obj = submit->bos[i].obj; in etnaviv_core_dump() 164 mutex_unlock(&submit->mmu_context->lock); in etnaviv_core_dump() 180 &submit->mmu_context->cmdbuf_mapping)); in etnaviv_core_dump() 183 submit->cmdbuf.vaddr, submit->cmdbuf.size, in etnaviv_core_dump() 187 mutex_unlock(&submit->mmu_context->lock); in etnaviv_core_dump() 200 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump() 205 obj = submit->bos[i].obj; in etnaviv_core_dump() [all …]
|
| A D | etnaviv_gpu.c | 1338 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample() local 1369 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample_post() local 1407 if (submit->nr_pmrs) in etnaviv_gpu_submit() 1430 if (submit->prev_mmu_context) in etnaviv_gpu_submit() 1434 if (submit->nr_pmrs) { in etnaviv_gpu_submit() 1436 kref_get(&submit->refcount); in etnaviv_gpu_submit() 1437 gpu->event[event[1]].submit = submit; in etnaviv_gpu_submit() 1442 submit->cmdbuf.user_size = submit->cmdbuf.size - 8; in etnaviv_gpu_submit() 1443 etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context, in etnaviv_gpu_submit() 1446 if (submit->nr_pmrs) { in etnaviv_gpu_submit() [all …]
|
| A D | etnaviv_gpu.h | 88 struct etnaviv_gem_submit *submit; member 216 void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit); 223 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
|
| /drivers/dma/ti/ |
| A D | cppi41.c | 119 u16 submit; member 157 [ 0] = { .submit = 32, .complete = 93}, 158 [ 1] = { .submit = 34, .complete = 94}, 159 [ 2] = { .submit = 36, .complete = 95}, 160 [ 3] = { .submit = 38, .complete = 96}, 161 [ 4] = { .submit = 40, .complete = 97}, 162 [ 5] = { .submit = 42, .complete = 98}, 235 [0] = { .submit = 1, .complete = 26}, 236 [1] = { .submit = 3, .complete = 26}, 237 [2] = { .submit = 5, .complete = 26}, [all …]
|
| /drivers/gpu/drm/lima/ |
| A D | lima_gem.c | 285 if (!submit->in_sync[i]) in lima_gem_add_deps() 289 submit->in_sync[i], 0); in lima_gem_add_deps() 305 struct lima_bo **bos = submit->lbos; in lima_gem_submit() 307 if (submit->out_sync) { in lima_gem_submit() 338 submit->nr_bos, &ctx); in lima_gem_submit() 343 submit->task, submit->ctx->context + submit->pipe, in lima_gem_submit() 354 submit->task, bos[i], in lima_gem_submit() 370 submit->nr_bos, &ctx); in lima_gem_submit() 372 for (i = 0; i < submit->nr_bos; i++) in lima_gem_submit() 385 lima_sched_task_fini(submit->task); in lima_gem_submit() [all …]
|
| A D | lima_drv.c | 111 struct lima_submit submit = {0}; in lima_ioctl_gem_submit() local 125 bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL); in lima_ioctl_gem_submit() 157 submit.pipe = args->pipe; in lima_ioctl_gem_submit() 158 submit.bos = bos; in lima_ioctl_gem_submit() 159 submit.lbos = (void *)bos + size; in lima_ioctl_gem_submit() 160 submit.nr_bos = args->nr_bos; in lima_ioctl_gem_submit() 161 submit.task = task; in lima_ioctl_gem_submit() 162 submit.ctx = ctx; in lima_ioctl_gem_submit() 163 submit.flags = args->flags; in lima_ioctl_gem_submit() 164 submit.in_sync[0] = args->in_sync[0]; in lima_ioctl_gem_submit() [all …]
|
| /drivers/gpu/drm/msm/adreno/ |
| A D | a5xx_gpu.c | 75 for (i = 0; i < submit->nr_cmds; i++) { in a5xx_submit_in_rb() 76 switch (submit->cmd[i].type) { in a5xx_submit_in_rb() 85 obj = submit->bos[submit->cmd[i].idx].obj; in a5xx_submit_in_rb() 86 dwords = submit->cmd[i].size; in a5xx_submit_in_rb() 123 ring->memptrs->fence = submit->seqno; in a5xx_submit_in_rb() 138 a5xx_submit_in_rb(gpu, submit); in a5xx_submit() 172 switch (submit->cmd[i].type) { in a5xx_submit() 183 OUT_RING(ring, submit->cmd[i].size); in a5xx_submit() 217 OUT_RING(ring, submit->seqno); in a5xx_submit() 229 OUT_RING(ring, submit->seqno); in a5xx_submit() [all …]
|
| A D | a2xx_gpu.c | 13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a2xx_submit() argument 15 struct msm_ringbuffer *ring = submit->ring; in a2xx_submit() 18 for (i = 0; i < submit->nr_cmds; i++) { in a2xx_submit() 19 switch (submit->cmd[i].type) { in a2xx_submit() 25 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a2xx_submit() 30 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a2xx_submit() 31 OUT_RING(ring, submit->cmd[i].size); in a2xx_submit() 38 OUT_RING(ring, submit->seqno); in a2xx_submit() 47 OUT_RING(ring, submit->seqno); in a2xx_submit() 497 .submit = a2xx_submit,
|
| A D | a6xx_gpu.c | 254 switch (submit->cmd[i].type) { in a6xx_submit() 288 OUT_RING(ring, submit->seqno); in a6xx_submit() 299 OUT_RING(ring, submit->seqno); in a6xx_submit() 391 switch (submit->cmd[i].type) { in a7xx_submit() 430 OUT_RING(ring, submit->seqno); in a7xx_submit() 452 OUT_RING(ring, submit->seqno); in a7xx_submit() 466 OUT_RING(ring, submit->seqno); in a7xx_submit() 475 OUT_RING(ring, submit->seqno); in a7xx_submit() 2393 .submit = a6xx_submit, 2424 .submit = a6xx_submit, [all …]
|
| A D | a3xx_gpu.c | 31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument 33 struct msm_ringbuffer *ring = submit->ring; in a3xx_submit() 36 for (i = 0; i < submit->nr_cmds; i++) { in a3xx_submit() 37 switch (submit->cmd[i].type) { in a3xx_submit() 43 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a3xx_submit() 48 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a3xx_submit() 49 OUT_RING(ring, submit->cmd[i].size); in a3xx_submit() 56 OUT_RING(ring, submit->seqno); in a3xx_submit() 73 OUT_RING(ring, submit->seqno); in a3xx_submit() 519 .submit = a3xx_submit,
|
| A D | a4xx_gpu.c | 25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument 27 struct msm_ringbuffer *ring = submit->ring; in a4xx_submit() 30 for (i = 0; i < submit->nr_cmds; i++) { in a4xx_submit() 31 switch (submit->cmd[i].type) { in a4xx_submit() 37 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a4xx_submit() 42 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a4xx_submit() 43 OUT_RING(ring, submit->cmd[i].size); in a4xx_submit() 50 OUT_RING(ring, submit->seqno); in a4xx_submit() 67 OUT_RING(ring, submit->seqno); in a4xx_submit() 638 .submit = a4xx_submit,
|
| /drivers/gpu/drm/i915/selftests/ |
| A D | i915_active.c | 81 struct i915_sw_fence *submit; in __live_active_setup() local 90 submit = heap_fence_create(GFP_KERNEL); in __live_active_setup() 91 if (!submit) { in __live_active_setup() 109 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, in __live_active_setup() 110 submit, in __live_active_setup() 135 i915_sw_fence_commit(submit); in __live_active_setup() 136 heap_fence_put(submit); in __live_active_setup()
|
| A D | i915_request.c | 348 if (!submit) { in __igt_breadcrumbs_smoketest() 355 i915_sw_fence_commit(submit); in __igt_breadcrumbs_smoketest() 356 heap_fence_put(submit); in __igt_breadcrumbs_smoketest() 381 submit, in __igt_breadcrumbs_smoketest() 400 i915_sw_fence_commit(submit); in __igt_breadcrumbs_smoketest() 434 heap_fence_put(submit); in __igt_breadcrumbs_smoketest() 2269 struct i915_sw_fence *submit; in measure_inter_request() local 2291 if (!submit) { in measure_inter_request() 2308 submit, in measure_inter_request() 2329 heap_fence_put(submit); in measure_inter_request() [all …]
|
| /drivers/gpu/drm/tegra/ |
| A D | firewall.c | 9 struct tegra_drm_submit_data *submit; member 31 for (i = 0; i < fw->submit->num_used_mappings; i++) { in fw_check_addr_valid() 32 struct tegra_drm_mapping *m = fw->submit->used_mappings[i].mapping; in fw_check_addr_valid() 145 u32 words, struct tegra_drm_submit_data *submit, in tegra_drm_fw_validate() argument 149 .submit = submit, in tegra_drm_fw_validate()
|
| /drivers/gpu/drm/i915/ |
| A D | i915_request.c | 132 i915_sw_fence_fini(&rq->submit); in i915_fence_release() 514 cb->fence = &rq->submit; in __await_execution() 775 container_of(fence, typeof(*request), submit); in submit_notify() 1205 struct i915_sw_fence *wait = &to->submit; in emit_semaphore_wait() 1461 &from->submit, in await_request_submit() 1629 i915_sw_fence_await_sw_fence(&rq->submit, in i915_request_await_huc() 1645 i915_sw_fence_await_sw_fence(&rq->submit, in __i915_request_ensure_parallel_ordering() 1646 &prev->submit, in __i915_request_ensure_parallel_ordering() 1697 i915_sw_fence_await_sw_fence(&rq->submit, in __i915_request_ensure_ordering() 1698 &prev->submit, in __i915_request_ensure_ordering() [all …]
|
| /drivers/md/ |
| A D | raid5.c | 1366 struct async_submit_ctl submit; in async_copy_data() local 1412 submit.depend_tx = tx; in async_copy_data() 1463 struct async_submit_ctl submit; in ops_run_biofill() local 1559 struct async_submit_ctl submit; in ops_run_compute5() local 1770 &submit); in ops_run_compute6_2() 1792 init_async_submit(&submit, in ops_run_compute6_2() 1798 &submit); in ops_run_compute6_2() 1806 &submit); in ops_run_compute6_2() 1817 blocks, offs, &submit); in ops_run_compute6_2() 1823 blocks, offs, &submit); in ops_run_compute6_2() [all …]
|