Lines Matching refs:q
37 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
40 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument
42 if (xe_exec_queue_uses_pxp(q)) in __xe_exec_queue_free()
43 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in __xe_exec_queue_free()
44 if (q->vm) in __xe_exec_queue_free()
45 xe_vm_put(q->vm); in __xe_exec_queue_free()
47 if (q->xef) in __xe_exec_queue_free()
48 xe_file_put(q->xef); in __xe_exec_queue_free()
50 kfree(q); in __xe_exec_queue_free()
59 struct xe_exec_queue *q; in __xe_exec_queue_alloc() local
66 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); in __xe_exec_queue_alloc()
67 if (!q) in __xe_exec_queue_alloc()
70 kref_init(&q->refcount); in __xe_exec_queue_alloc()
71 q->flags = flags; in __xe_exec_queue_alloc()
72 q->hwe = hwe; in __xe_exec_queue_alloc()
73 q->gt = gt; in __xe_exec_queue_alloc()
74 q->class = hwe->class; in __xe_exec_queue_alloc()
75 q->width = width; in __xe_exec_queue_alloc()
76 q->msix_vec = XE_IRQ_DEFAULT_MSIX; in __xe_exec_queue_alloc()
77 q->logical_mask = logical_mask; in __xe_exec_queue_alloc()
78 q->fence_irq = >->fence_irq[hwe->class]; in __xe_exec_queue_alloc()
79 q->ring_ops = gt->ring_ops[hwe->class]; in __xe_exec_queue_alloc()
80 q->ops = gt->exec_queue_ops; in __xe_exec_queue_alloc()
81 INIT_LIST_HEAD(&q->lr.link); in __xe_exec_queue_alloc()
82 INIT_LIST_HEAD(&q->multi_gt_link); in __xe_exec_queue_alloc()
83 INIT_LIST_HEAD(&q->hw_engine_group_link); in __xe_exec_queue_alloc()
84 INIT_LIST_HEAD(&q->pxp.link); in __xe_exec_queue_alloc()
86 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; in __xe_exec_queue_alloc()
87 q->sched_props.preempt_timeout_us = in __xe_exec_queue_alloc()
89 q->sched_props.job_timeout_ms = in __xe_exec_queue_alloc()
91 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && in __xe_exec_queue_alloc()
92 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) in __xe_exec_queue_alloc()
93 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; in __xe_exec_queue_alloc()
95 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; in __xe_exec_queue_alloc()
98 q->vm = xe_vm_get(vm); in __xe_exec_queue_alloc()
105 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
107 __xe_exec_queue_free(q); in __xe_exec_queue_alloc()
112 return q; in __xe_exec_queue_alloc()
115 static int __xe_exec_queue_init(struct xe_exec_queue *q) in __xe_exec_queue_init() argument
126 if (xe_exec_queue_uses_pxp(q) && in __xe_exec_queue_init()
127 (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) { in __xe_exec_queue_init()
128 if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20) in __xe_exec_queue_init()
134 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init()
135 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags); in __xe_exec_queue_init()
136 if (IS_ERR(q->lrc[i])) { in __xe_exec_queue_init()
137 err = PTR_ERR(q->lrc[i]); in __xe_exec_queue_init()
142 err = q->ops->init(q); in __xe_exec_queue_init()
150 xe_lrc_put(q->lrc[i]); in __xe_exec_queue_init()
159 struct xe_exec_queue *q; in xe_exec_queue_create() local
165 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
167 if (IS_ERR(q)) in xe_exec_queue_create()
168 return q; in xe_exec_queue_create()
170 err = __xe_exec_queue_init(q); in xe_exec_queue_create()
181 if (xe_exec_queue_uses_pxp(q)) { in xe_exec_queue_create()
182 err = xe_pxp_exec_queue_add(xe->pxp, q); in xe_exec_queue_create()
187 return q; in xe_exec_queue_create()
190 __xe_exec_queue_free(q); in xe_exec_queue_create()
242 struct xe_exec_queue *q; in xe_exec_queue_create_bind() local
257 q = xe_exec_queue_create(xe, migrate_vm, in xe_exec_queue_create_bind()
261 q = xe_exec_queue_create_class(xe, gt, migrate_vm, in xe_exec_queue_create_bind()
267 return q; in xe_exec_queue_create_bind()
273 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); in xe_exec_queue_destroy() local
276 if (xe_exec_queue_uses_pxp(q)) in xe_exec_queue_destroy()
277 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in xe_exec_queue_destroy()
279 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_destroy()
280 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { in xe_exec_queue_destroy()
281 list_for_each_entry_safe(eq, next, &q->multi_gt_list, in xe_exec_queue_destroy()
286 q->ops->fini(q); in xe_exec_queue_destroy()
289 void xe_exec_queue_fini(struct xe_exec_queue *q) in xe_exec_queue_fini() argument
297 xe_exec_queue_update_run_ticks(q); in xe_exec_queue_fini()
298 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal)) in xe_exec_queue_fini()
299 wake_up_var(&q->xef->exec_queue.pending_removal); in xe_exec_queue_fini()
301 for (i = 0; i < q->width; ++i) in xe_exec_queue_fini()
302 xe_lrc_put(q->lrc[i]); in xe_exec_queue_fini()
304 __xe_exec_queue_free(q); in xe_exec_queue_fini()
307 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) in xe_exec_queue_assign_name() argument
309 switch (q->class) { in xe_exec_queue_assign_name()
311 snprintf(q->name, sizeof(q->name), "rcs%d", instance); in xe_exec_queue_assign_name()
314 snprintf(q->name, sizeof(q->name), "vcs%d", instance); in xe_exec_queue_assign_name()
317 snprintf(q->name, sizeof(q->name), "vecs%d", instance); in xe_exec_queue_assign_name()
320 snprintf(q->name, sizeof(q->name), "bcs%d", instance); in xe_exec_queue_assign_name()
323 snprintf(q->name, sizeof(q->name), "ccs%d", instance); in xe_exec_queue_assign_name()
326 snprintf(q->name, sizeof(q->name), "gsccs%d", instance); in xe_exec_queue_assign_name()
329 XE_WARN_ON(q->class); in xe_exec_queue_assign_name()
335 struct xe_exec_queue *q; in xe_exec_queue_lookup() local
338 q = xa_load(&xef->exec_queue.xa, id); in xe_exec_queue_lookup()
339 if (q) in xe_exec_queue_lookup()
340 xe_exec_queue_get(q); in xe_exec_queue_lookup()
343 return q; in xe_exec_queue_lookup()
353 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
362 q->sched_props.priority = value; in exec_queue_set_priority()
418 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
423 xe_exec_queue_get_prop_minmax(q->hwe->eclass, in exec_queue_set_timeslice()
430 q->sched_props.timeslice_us = value; in exec_queue_set_timeslice()
435 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value) in exec_queue_set_pxp_type() argument
447 return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM); in exec_queue_set_pxp_type()
451 struct xe_exec_queue *q,
461 struct xe_exec_queue *q, in exec_queue_user_ext_set_property() argument
485 return exec_queue_set_property_funcs[idx](xe, q, ext.value); in exec_queue_user_ext_set_property()
489 struct xe_exec_queue *q,
497 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
519 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); in exec_queue_user_extensions()
524 return exec_queue_user_extensions(xe, q, ext.next_extension, in exec_queue_user_extensions()
593 struct xe_exec_queue *q = NULL; in xe_exec_queue_create_ioctl() local
636 if (q) in xe_exec_queue_create_ioctl()
641 q = new; in xe_exec_queue_create_ioctl()
644 &q->multi_gt_link); in xe_exec_queue_create_ioctl()
673 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
678 if (IS_ERR(q)) in xe_exec_queue_create_ioctl()
679 return PTR_ERR(q); in xe_exec_queue_create_ioctl()
682 q->lr.context = dma_fence_context_alloc(1); in xe_exec_queue_create_ioctl()
684 err = xe_vm_add_compute_exec_queue(vm, q); in xe_exec_queue_create_ioctl()
689 if (q->vm && q->hwe->hw_engine_group) { in xe_exec_queue_create_ioctl()
690 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_create_ioctl()
696 q->xef = xe_file_get(xef); in xe_exec_queue_create_ioctl()
699 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); in xe_exec_queue_create_ioctl()
708 xe_exec_queue_kill(q); in xe_exec_queue_create_ioctl()
710 xe_exec_queue_put(q); in xe_exec_queue_create_ioctl()
720 struct xe_exec_queue *q; in xe_exec_queue_get_property_ioctl() local
726 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_queue_get_property_ioctl()
727 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
732 args->value = q->ops->reset_status(q); in xe_exec_queue_get_property_ioctl()
739 xe_exec_queue_put(q); in xe_exec_queue_get_property_ioctl()
750 bool xe_exec_queue_is_lr(struct xe_exec_queue *q) in xe_exec_queue_is_lr() argument
752 return q->vm && xe_vm_in_lr_mode(q->vm) && in xe_exec_queue_is_lr()
753 !(q->flags & EXEC_QUEUE_FLAG_VM); in xe_exec_queue_is_lr()
756 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q) in xe_exec_queue_num_job_inflight() argument
758 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; in xe_exec_queue_num_job_inflight()
767 bool xe_exec_queue_ring_full(struct xe_exec_queue *q) in xe_exec_queue_ring_full() argument
769 struct xe_lrc *lrc = q->lrc[0]; in xe_exec_queue_ring_full()
772 return xe_exec_queue_num_job_inflight(q) >= max_job; in xe_exec_queue_ring_full()
789 bool xe_exec_queue_is_idle(struct xe_exec_queue *q) in xe_exec_queue_is_idle() argument
791 if (xe_exec_queue_is_parallel(q)) { in xe_exec_queue_is_idle()
794 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
795 if (xe_lrc_seqno(q->lrc[i]) != in xe_exec_queue_is_idle()
796 q->lrc[i]->fence_ctx.next_seqno - 1) in xe_exec_queue_is_idle()
803 return xe_lrc_seqno(q->lrc[0]) == in xe_exec_queue_is_idle()
804 q->lrc[0]->fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
815 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) in xe_exec_queue_update_run_ticks() argument
817 struct xe_device *xe = gt_to_xe(q->gt); in xe_exec_queue_update_run_ticks()
826 if (!q->xef) in xe_exec_queue_update_run_ticks()
840 lrc = q->lrc[0]; in xe_exec_queue_update_run_ticks()
842 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width; in xe_exec_queue_update_run_ticks()
856 void xe_exec_queue_kill(struct xe_exec_queue *q) in xe_exec_queue_kill() argument
858 struct xe_exec_queue *eq = q, *next; in xe_exec_queue_kill()
862 q->ops->kill(eq); in xe_exec_queue_kill()
863 xe_vm_remove_compute_exec_queue(q->vm, eq); in xe_exec_queue_kill()
866 q->ops->kill(q); in xe_exec_queue_kill()
867 xe_vm_remove_compute_exec_queue(q->vm, q); in xe_exec_queue_kill()
876 struct xe_exec_queue *q; in xe_exec_queue_destroy_ioctl() local
883 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); in xe_exec_queue_destroy_ioctl()
884 if (q) in xe_exec_queue_destroy_ioctl()
888 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
891 if (q->vm && q->hwe->hw_engine_group) in xe_exec_queue_destroy_ioctl()
892 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_destroy_ioctl()
894 xe_exec_queue_kill(q); in xe_exec_queue_destroy_ioctl()
896 trace_xe_exec_queue_close(q); in xe_exec_queue_destroy_ioctl()
897 xe_exec_queue_put(q); in xe_exec_queue_destroy_ioctl()
902 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, in xe_exec_queue_last_fence_lockdep_assert() argument
905 if (q->flags & EXEC_QUEUE_FLAG_VM) { in xe_exec_queue_last_fence_lockdep_assert()
909 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_lockdep_assert()
918 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_put() argument
920 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_put()
922 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_last_fence_put()
931 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) in xe_exec_queue_last_fence_put_unlocked() argument
933 if (q->last_fence) { in xe_exec_queue_last_fence_put_unlocked()
934 dma_fence_put(q->last_fence); in xe_exec_queue_last_fence_put_unlocked()
935 q->last_fence = NULL; in xe_exec_queue_last_fence_put_unlocked()
948 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get() argument
953 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_get()
955 if (q->last_fence && in xe_exec_queue_last_fence_get()
956 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get()
957 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_get()
959 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get()
975 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get_for_resume() argument
980 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_get_for_resume()
982 if (q->last_fence && in xe_exec_queue_last_fence_get_for_resume()
983 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get_for_resume()
984 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_last_fence_get_for_resume()
986 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get_for_resume()
1000 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, in xe_exec_queue_last_fence_set() argument
1003 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_set()
1005 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_set()
1006 q->last_fence = dma_fence_get(fence); in xe_exec_queue_last_fence_set()
1017 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_test_dep() argument
1022 fence = xe_exec_queue_last_fence_get(q, vm); in xe_exec_queue_last_fence_test_dep()