Lines Matching refs:vops

783 static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)  in xe_vma_ops_alloc()  argument
788 if (!vops->pt_update_ops[i].num_ops) in xe_vma_ops_alloc()
791 vops->pt_update_ops[i].ops = in xe_vma_ops_alloc()
792 kmalloc_array(vops->pt_update_ops[i].num_ops, in xe_vma_ops_alloc()
793 sizeof(*vops->pt_update_ops[i].ops), in xe_vma_ops_alloc()
795 if (!vops->pt_update_ops[i].ops) in xe_vma_ops_alloc()
813 static void xe_vma_svm_prefetch_ops_fini(struct xe_vma_ops *vops) in xe_vma_svm_prefetch_ops_fini() argument
817 if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH)) in xe_vma_svm_prefetch_ops_fini()
820 list_for_each_entry(op, &vops->list, link) in xe_vma_svm_prefetch_ops_fini()
824 static void xe_vma_ops_fini(struct xe_vma_ops *vops) in xe_vma_ops_fini() argument
828 xe_vma_svm_prefetch_ops_fini(vops); in xe_vma_ops_fini()
831 kfree(vops->pt_update_ops[i].ops); in xe_vma_ops_fini()
834 static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, int inc_val) in xe_vma_ops_incr_pt_update_ops() argument
843 vops->pt_update_ops[i].num_ops += inc_val; in xe_vma_ops_incr_pt_update_ops()
862 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma, in xe_vm_ops_add_rebind() argument
872 list_add_tail(&op->link, &vops->list); in xe_vm_ops_add_rebind()
873 xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1); in xe_vm_ops_add_rebind()
879 struct xe_vma_ops *vops);
880 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
888 struct xe_vma_ops vops; in xe_vm_rebind() local
897 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_rebind()
899 vops.pt_update_ops[i].wait_vm_bookkeep = true; in xe_vm_rebind()
910 err = xe_vm_ops_add_rebind(&vops, vma, in xe_vm_rebind()
916 err = xe_vma_ops_alloc(&vops, false); in xe_vm_rebind()
920 fence = ops_execute(vm, &vops); in xe_vm_rebind()
930 list_for_each_entry_safe(op, next_op, &vops.list, link) { in xe_vm_rebind()
934 xe_vma_ops_fini(&vops); in xe_vm_rebind()
942 struct xe_vma_ops vops; in xe_vma_rebind() local
952 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vma_rebind()
954 vops.pt_update_ops[id].wait_vm_bookkeep = true; in xe_vma_rebind()
955 vops.pt_update_ops[tile->id].q = in xe_vma_rebind()
959 err = xe_vm_ops_add_rebind(&vops, vma, tile_mask); in xe_vma_rebind()
963 err = xe_vma_ops_alloc(&vops, false); in xe_vma_rebind()
969 fence = ops_execute(vm, &vops); in xe_vma_rebind()
972 list_for_each_entry_safe(op, next_op, &vops.list, link) { in xe_vma_rebind()
976 xe_vma_ops_fini(&vops); in xe_vma_rebind()
995 xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops, in xe_vm_ops_add_range_rebind() argument
1007 list_add_tail(&op->link, &vops->list); in xe_vm_ops_add_range_rebind()
1008 xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1); in xe_vm_ops_add_range_rebind()
1031 struct xe_vma_ops vops; in xe_vm_range_rebind() local
1042 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_range_rebind()
1044 vops.pt_update_ops[id].wait_vm_bookkeep = true; in xe_vm_range_rebind()
1045 vops.pt_update_ops[tile->id].q = in xe_vm_range_rebind()
1049 err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask); in xe_vm_range_rebind()
1053 err = xe_vma_ops_alloc(&vops, false); in xe_vm_range_rebind()
1059 fence = ops_execute(vm, &vops); in xe_vm_range_rebind()
1062 list_for_each_entry_safe(op, next_op, &vops.list, link) { in xe_vm_range_rebind()
1066 xe_vma_ops_fini(&vops); in xe_vm_range_rebind()
1082 xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops, in xe_vm_ops_add_range_unbind() argument
1092 list_add_tail(&op->link, &vops->list); in xe_vm_ops_add_range_unbind()
1093 xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1); in xe_vm_ops_add_range_unbind()
1112 struct xe_vma_ops vops; in xe_vm_range_unbind() local
1125 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_range_unbind()
1127 vops.pt_update_ops[id].wait_vm_bookkeep = true; in xe_vm_range_unbind()
1128 vops.pt_update_ops[tile->id].q = in xe_vm_range_unbind()
1132 err = xe_vm_ops_add_range_unbind(&vops, range); in xe_vm_range_unbind()
1136 err = xe_vma_ops_alloc(&vops, false); in xe_vm_range_unbind()
1142 fence = ops_execute(vm, &vops); in xe_vm_range_unbind()
1145 list_for_each_entry_safe(op, next_op, &vops.list, link) { in xe_vm_range_unbind()
1149 xe_vma_ops_fini(&vops); in xe_vm_range_unbind()
2297 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_create() argument
2422 vops->flags |= XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH; in vm_bind_ioctl_ops_create()
2593 struct xe_vma_ops *vops) in vm_bind_ioctl_ops_parse() argument
2612 list_add_tail(&op->link, &vops->list); in vm_bind_ioctl_ops_parse()
2636 xe_vma_ops_incr_pt_update_ops(vops, in vm_bind_ioctl_ops_parse()
2734 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, num_remap_ops); in vm_bind_ioctl_ops_parse()
2746 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1); in vm_bind_ioctl_ops_parse()
2758 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, in vm_bind_ioctl_ops_parse()
2761 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1); in vm_bind_ioctl_ops_parse()
3000 static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_bind_ioctl_ops_prefetch_ranges() argument
3005 if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH)) in vm_bind_ioctl_ops_prefetch_ranges()
3008 list_for_each_entry(op, &vops->list, link) { in vm_bind_ioctl_ops_prefetch_ranges()
3021 struct xe_vma_ops *vops) in vm_bind_ioctl_ops_lock_and_prep() argument
3030 list_for_each_entry(op, &vops->list, link) { in vm_bind_ioctl_ops_lock_and_prep()
3037 if (vops->inject_error && in vm_bind_ioctl_ops_lock_and_prep()
3071 static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops) in trace_xe_vm_ops_execute() argument
3075 list_for_each_entry(op, &vops->list, link) in trace_xe_vm_ops_execute()
3079 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_ops_setup_tile_args() argument
3081 struct xe_exec_queue *q = vops->q; in vm_ops_setup_tile_args()
3087 if (vops->pt_update_ops[id].num_ops) in vm_ops_setup_tile_args()
3090 if (vops->pt_update_ops[id].q) in vm_ops_setup_tile_args()
3094 vops->pt_update_ops[id].q = q; in vm_ops_setup_tile_args()
3098 vops->pt_update_ops[id].q = vm->q[id]; in vm_ops_setup_tile_args()
3106 struct xe_vma_ops *vops) in ops_execute() argument
3115 number_tiles = vm_ops_setup_tile_args(vm, vops); in ops_execute()
3129 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
3132 err = xe_pt_update_ops_prepare(tile, vops); in ops_execute()
3139 trace_xe_vm_ops_execute(vops); in ops_execute()
3142 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
3145 fence = xe_pt_update_ops_run(tile, vops); in ops_execute()
3167 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
3170 xe_pt_update_ops_fini(tile, vops); in ops_execute()
3177 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
3180 xe_pt_update_ops_abort(tile, vops); in ops_execute()
3222 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_fini() argument
3225 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q); in vm_bind_ioctl_ops_fini()
3230 ufence = find_ufence_get(vops->syncs, vops->num_syncs); in vm_bind_ioctl_ops_fini()
3231 list_for_each_entry(op, &vops->list, link) { in vm_bind_ioctl_ops_fini()
3244 for (i = 0; i < vops->num_syncs; i++) in vm_bind_ioctl_ops_fini()
3245 xe_sync_entry_signal(vops->syncs + i, fence); in vm_bind_ioctl_ops_fini()
3251 struct xe_vma_ops *vops) in vm_bind_ioctl_ops_execute() argument
3262 err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops); in vm_bind_ioctl_ops_execute()
3269 fence = ops_execute(vm, vops); in vm_bind_ioctl_ops_execute()
3272 vm_bind_ioctl_ops_fini(vm, vops, NULL); in vm_bind_ioctl_ops_execute()
3276 vm_bind_ioctl_ops_fini(vm, vops, fence); in vm_bind_ioctl_ops_execute()
3450 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm, in xe_vma_ops_init() argument
3454 memset(vops, 0, sizeof(*vops)); in xe_vma_ops_init()
3455 INIT_LIST_HEAD(&vops->list); in xe_vma_ops_init()
3456 vops->vm = vm; in xe_vma_ops_init()
3457 vops->q = q; in xe_vma_ops_init()
3458 vops->syncs = syncs; in xe_vma_ops_init()
3459 vops->num_syncs = num_syncs; in xe_vma_ops_init()
3460 vops->flags = 0; in xe_vma_ops_init()
3531 struct xe_vma_ops vops; in xe_vm_bind_ioctl() local
3658 xe_vma_ops_init(&vops, vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
3668 ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset, in xe_vm_bind_ioctl()
3677 err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); in xe_vm_bind_ioctl()
3683 vops.inject_error = true; in xe_vm_bind_ioctl()
3692 if (list_empty(&vops.list)) { in xe_vm_bind_ioctl()
3697 err = xe_vma_ops_alloc(&vops, args->num_binds > 1); in xe_vm_bind_ioctl()
3701 err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops); in xe_vm_bind_ioctl()
3705 fence = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_ioctl()
3714 xe_vma_ops_fini(&vops); in xe_vm_bind_ioctl()
3760 struct xe_vma_ops vops; in xe_vm_bind_kernel_bo() local
3772 xe_vma_ops_init(&vops, vm, q, NULL, 0); in xe_vm_bind_kernel_bo()
3774 ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo), in xe_vm_bind_kernel_bo()
3782 err = vm_bind_ioctl_ops_parse(vm, ops, &vops); in xe_vm_bind_kernel_bo()
3786 xe_assert(vm->xe, !list_empty(&vops.list)); in xe_vm_bind_kernel_bo()
3788 err = xe_vma_ops_alloc(&vops, false); in xe_vm_bind_kernel_bo()
3792 fence = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_kernel_bo()
3800 xe_vma_ops_fini(&vops); in xe_vm_bind_kernel_bo()