Lines Matching refs:queue
116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release()
125 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name()
148 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name()
263 struct pvr_queue *queue, in pvr_queue_fence_init() argument
269 pvr_context_get(queue->ctx); in pvr_queue_fence_init()
270 fence->queue = queue; in pvr_queue_fence_init()
287 pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue) in pvr_queue_cccb_fence_init() argument
289 pvr_queue_fence_init(fence, queue, &pvr_queue_cccb_fence_ops, in pvr_queue_cccb_fence_init()
290 &queue->cccb_fence_ctx.base); in pvr_queue_cccb_fence_init()
305 pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue) in pvr_queue_job_fence_init() argument
307 pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops, in pvr_queue_job_fence_init()
308 &queue->job_fence_ctx); in pvr_queue_job_fence_init()
388 pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_cccb_fence() argument
399 mutex_lock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_get_job_cccb_fence()
403 if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_get_job_cccb_fence()
412 if (WARN_ON(queue->cccb_fence_ctx.job)) in pvr_queue_get_job_cccb_fence()
413 pvr_job_put(queue->cccb_fence_ctx.job); in pvr_queue_get_job_cccb_fence()
415 queue->cccb_fence_ctx.job = pvr_job_get(job); in pvr_queue_get_job_cccb_fence()
419 if (!WARN_ON(cccb_fence->queue)) in pvr_queue_get_job_cccb_fence()
420 pvr_queue_cccb_fence_init(job->cccb_fence, queue); in pvr_queue_get_job_cccb_fence()
423 mutex_unlock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_get_job_cccb_fence()
441 pvr_queue_get_job_kccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_kccb_fence() argument
443 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_get_job_kccb_fence()
461 pvr_queue_get_paired_frag_job_dep(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_paired_frag_job_dep() argument
483 return frag_job->base.sched->ops->prepare_job(&frag_job->base, &queue->entity); in pvr_queue_get_paired_frag_job_dep()
499 struct pvr_queue *queue = container_of(s_entity, struct pvr_queue, entity); in pvr_queue_prepare_job() local
525 pvr_queue_job_fence_init(job->done_fence, queue); in pvr_queue_prepare_job()
531 internal_dep = pvr_queue_get_job_cccb_fence(queue, job); in pvr_queue_prepare_job()
537 internal_dep = pvr_queue_get_job_kccb_fence(queue, job); in pvr_queue_prepare_job()
548 internal_dep = pvr_queue_get_paired_frag_job_dep(queue, job); in pvr_queue_prepare_job()
560 static void pvr_queue_update_active_state_locked(struct pvr_queue *queue) in pvr_queue_update_active_state_locked() argument
562 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state_locked()
570 if (list_empty(&queue->node)) in pvr_queue_update_active_state_locked()
573 if (!atomic_read(&queue->in_flight_job_count)) in pvr_queue_update_active_state_locked()
574 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_update_active_state_locked()
576 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_update_active_state_locked()
592 static void pvr_queue_update_active_state(struct pvr_queue *queue) in pvr_queue_update_active_state() argument
594 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state()
597 pvr_queue_update_active_state_locked(queue); in pvr_queue_update_active_state()
603 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_submit_job_to_cccb() local
605 struct pvr_cccb *cccb = &queue->cccb; in pvr_queue_submit_job_to_cccb()
615 atomic_inc(&queue->in_flight_job_count); in pvr_queue_submit_job_to_cccb()
616 pvr_queue_update_active_state(queue); in pvr_queue_submit_job_to_cccb()
631 pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
646 pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
663 pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
673 pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, &ufos[0].addr); in pvr_queue_submit_job_to_cccb()
745 struct pvr_queue *queue = container_of(job->base.sched, in pvr_queue_run_job() local
748 pvr_cccb_send_kccb_kick(pvr_dev, &queue->cccb, in pvr_queue_run_job()
749 pvr_context_get_fw_addr(job->ctx) + queue->ctx_offset, in pvr_queue_run_job()
756 static void pvr_queue_stop(struct pvr_queue *queue, struct pvr_job *bad_job) in pvr_queue_stop() argument
758 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); in pvr_queue_stop()
761 static void pvr_queue_start(struct pvr_queue *queue) in pvr_queue_start() argument
768 *queue->timeline_ufo.value = atomic_read(&queue->job_fence_ctx.seqno); in pvr_queue_start()
770 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) { in pvr_queue_start()
781 atomic_set(&queue->ctx->faulty, 1); in pvr_queue_start()
785 drm_sched_start(&queue->scheduler); in pvr_queue_start()
802 struct pvr_queue *queue = container_of(sched, struct pvr_queue, scheduler); in pvr_queue_timedout_job() local
803 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_timedout_job()
820 list_del_init(&queue->node); in pvr_queue_timedout_job()
830 WARN_ON(atomic_read(&queue->in_flight_job_count) != job_count); in pvr_queue_timedout_job()
837 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_timedout_job()
839 atomic_set(&queue->in_flight_job_count, job_count); in pvr_queue_timedout_job()
840 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_timedout_job()
841 pvr_queue_process(queue); in pvr_queue_timedout_job()
902 pvr_queue_signal_done_fences(struct pvr_queue *queue) in pvr_queue_signal_done_fences() argument
907 spin_lock(&queue->scheduler.job_list_lock); in pvr_queue_signal_done_fences()
908 cur_seqno = *queue->timeline_ufo.value; in pvr_queue_signal_done_fences()
909 list_for_each_entry_safe(job, tmp_job, &queue->scheduler.pending_list, base.list) { in pvr_queue_signal_done_fences()
916 atomic_dec(&queue->in_flight_job_count); in pvr_queue_signal_done_fences()
919 spin_unlock(&queue->scheduler.job_list_lock); in pvr_queue_signal_done_fences()
932 pvr_queue_check_job_waiting_for_cccb_space(struct pvr_queue *queue) in pvr_queue_check_job_waiting_for_cccb_space() argument
938 mutex_lock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_check_job_waiting_for_cccb_space()
939 job = queue->cccb_fence_ctx.job; in pvr_queue_check_job_waiting_for_cccb_space()
953 if (WARN_ON(!cccb_fence->queue)) { in pvr_queue_check_job_waiting_for_cccb_space()
963 if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_check_job_waiting_for_cccb_space()
971 queue->cccb_fence_ctx.job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
974 mutex_unlock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_check_job_waiting_for_cccb_space()
985 void pvr_queue_process(struct pvr_queue *queue) in pvr_queue_process() argument
987 lockdep_assert_held(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_process()
989 pvr_queue_check_job_waiting_for_cccb_space(queue); in pvr_queue_process()
990 pvr_queue_signal_done_fences(queue); in pvr_queue_process()
991 pvr_queue_update_active_state_locked(queue); in pvr_queue_process()
994 static u32 get_dm_type(struct pvr_queue *queue) in get_dm_type() argument
996 switch (queue->type) { in get_dm_type()
1018 static void init_fw_context(struct pvr_queue *queue, void *fw_ctx_map) in init_fw_context() argument
1020 struct pvr_context *ctx = queue->ctx; in init_fw_context()
1023 struct pvr_cccb *cccb = &queue->cccb; in init_fw_context()
1025 cctx_fw = fw_ctx_map + queue->ctx_offset; in init_fw_context()
1029 cctx_fw->dm = get_dm_type(queue); in init_fw_context()
1038 pvr_fw_object_get_fw_addr(queue->reg_state_obj, &cctx_fw->context_state_addr); in init_fw_context()
1049 static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue) in pvr_queue_cleanup_fw_context() argument
1051 if (!queue->ctx->fw_obj) in pvr_queue_cleanup_fw_context()
1054 return pvr_fw_structure_cleanup(queue->ctx->pvr_dev, in pvr_queue_cleanup_fw_context()
1056 queue->ctx->fw_obj, queue->ctx_offset); in pvr_queue_cleanup_fw_context()
1075 struct pvr_queue *queue; in pvr_queue_job_init() local
1081 queue = pvr_context_get_queue_for_job(job->ctx, job->type); in pvr_queue_job_init()
1082 if (!queue) in pvr_queue_job_init()
1085 if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count))) in pvr_queue_job_init()
1088 err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE); in pvr_queue_job_init()
1151 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_job_push() local
1154 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_job_push()
1155 queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled); in pvr_queue_job_push()
1163 struct pvr_queue *queue = priv; in reg_state_init() local
1165 if (queue->type == DRM_PVR_JOB_TYPE_GEOMETRY) { in reg_state_init()
1169 queue->callstack_addr; in reg_state_init()
1214 struct pvr_queue *queue; in pvr_queue_create() local
1243 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in pvr_queue_create()
1244 if (!queue) in pvr_queue_create()
1247 queue->type = type; in pvr_queue_create()
1248 queue->ctx_offset = get_ctx_offset(type); in pvr_queue_create()
1249 queue->ctx = ctx; in pvr_queue_create()
1250 queue->callstack_addr = args->callstack_addr; in pvr_queue_create()
1251 sched = &queue->scheduler; in pvr_queue_create()
1252 INIT_LIST_HEAD(&queue->node); in pvr_queue_create()
1253 mutex_init(&queue->cccb_fence_ctx.job_lock); in pvr_queue_create()
1254 pvr_queue_fence_ctx_init(&queue->cccb_fence_ctx.base); in pvr_queue_create()
1255 pvr_queue_fence_ctx_init(&queue->job_fence_ctx); in pvr_queue_create()
1257 err = pvr_cccb_init(pvr_dev, &queue->cccb, props[type].cccb_size, props[type].name); in pvr_queue_create()
1263 reg_state_init, queue, &queue->reg_state_obj); in pvr_queue_create()
1267 init_fw_context(queue, fw_ctx_map); in pvr_queue_create()
1275 cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value), in pvr_queue_create()
1277 NULL, NULL, &queue->timeline_ufo.fw_obj); in pvr_queue_create()
1283 queue->timeline_ufo.value = cpu_map; in pvr_queue_create()
1285 err = drm_sched_init(&queue->scheduler, in pvr_queue_create()
1294 err = drm_sched_entity_init(&queue->entity, in pvr_queue_create()
1301 list_add_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_create()
1304 return queue; in pvr_queue_create()
1307 drm_sched_fini(&queue->scheduler); in pvr_queue_create()
1310 pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); in pvr_queue_create()
1313 pvr_fw_object_destroy(queue->reg_state_obj); in pvr_queue_create()
1316 pvr_cccb_fini(&queue->cccb); in pvr_queue_create()
1319 mutex_destroy(&queue->cccb_fence_ctx.job_lock); in pvr_queue_create()
1320 kfree(queue); in pvr_queue_create()
1327 struct pvr_queue *queue; in pvr_queue_device_pre_reset() local
1330 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_pre_reset()
1331 pvr_queue_stop(queue, NULL); in pvr_queue_device_pre_reset()
1332 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_pre_reset()
1333 pvr_queue_stop(queue, NULL); in pvr_queue_device_pre_reset()
1339 struct pvr_queue *queue; in pvr_queue_device_post_reset() local
1342 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_post_reset()
1343 pvr_queue_start(queue); in pvr_queue_device_post_reset()
1344 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_post_reset()
1345 pvr_queue_start(queue); in pvr_queue_device_post_reset()
1358 void pvr_queue_kill(struct pvr_queue *queue) in pvr_queue_kill() argument
1360 drm_sched_entity_destroy(&queue->entity); in pvr_queue_kill()
1361 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_kill()
1362 queue->last_queued_job_scheduled_fence = NULL; in pvr_queue_kill()
1372 void pvr_queue_destroy(struct pvr_queue *queue) in pvr_queue_destroy() argument
1374 if (!queue) in pvr_queue_destroy()
1377 mutex_lock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1378 list_del_init(&queue->node); in pvr_queue_destroy()
1379 mutex_unlock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1381 drm_sched_fini(&queue->scheduler); in pvr_queue_destroy()
1382 drm_sched_entity_fini(&queue->entity); in pvr_queue_destroy()
1384 if (WARN_ON(queue->last_queued_job_scheduled_fence)) in pvr_queue_destroy()
1385 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_destroy()
1387 pvr_queue_cleanup_fw_context(queue); in pvr_queue_destroy()
1389 pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); in pvr_queue_destroy()
1390 pvr_fw_object_destroy(queue->reg_state_obj); in pvr_queue_destroy()
1391 pvr_cccb_fini(&queue->cccb); in pvr_queue_destroy()
1392 mutex_destroy(&queue->cccb_fence_ctx.job_lock); in pvr_queue_destroy()
1393 kfree(queue); in pvr_queue_destroy()