Lines Matching refs:vops

725 static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)  in xe_vma_ops_alloc()  argument
730 if (!vops->pt_update_ops[i].num_ops) in xe_vma_ops_alloc()
733 vops->pt_update_ops[i].ops = in xe_vma_ops_alloc()
734 kmalloc_array(vops->pt_update_ops[i].num_ops, in xe_vma_ops_alloc()
735 sizeof(*vops->pt_update_ops[i].ops), in xe_vma_ops_alloc()
737 if (!vops->pt_update_ops[i].ops) in xe_vma_ops_alloc()
744 static void xe_vma_ops_fini(struct xe_vma_ops *vops) in xe_vma_ops_fini() argument
749 kfree(vops->pt_update_ops[i].ops); in xe_vma_ops_fini()
752 static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask) in xe_vma_ops_incr_pt_update_ops() argument
758 ++vops->pt_update_ops[i].num_ops; in xe_vma_ops_incr_pt_update_ops()
777 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma, in xe_vm_ops_add_rebind() argument
787 list_add_tail(&op->link, &vops->list); in xe_vm_ops_add_rebind()
788 xe_vma_ops_incr_pt_update_ops(vops, tile_mask); in xe_vm_ops_add_rebind()
794 struct xe_vma_ops *vops);
795 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
803 struct xe_vma_ops vops; in xe_vm_rebind() local
812 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_rebind()
814 vops.pt_update_ops[i].wait_vm_bookkeep = true; in xe_vm_rebind()
825 err = xe_vm_ops_add_rebind(&vops, vma, in xe_vm_rebind()
831 err = xe_vma_ops_alloc(&vops, false); in xe_vm_rebind()
835 fence = ops_execute(vm, &vops); in xe_vm_rebind()
845 list_for_each_entry_safe(op, next_op, &vops.list, link) { in xe_vm_rebind()
849 xe_vma_ops_fini(&vops); in xe_vm_rebind()
857 struct xe_vma_ops vops; in xe_vma_rebind() local
867 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vma_rebind()
869 vops.pt_update_ops[id].wait_vm_bookkeep = true; in xe_vma_rebind()
870 vops.pt_update_ops[tile->id].q = in xe_vma_rebind()
874 err = xe_vm_ops_add_rebind(&vops, vma, tile_mask); in xe_vma_rebind()
878 err = xe_vma_ops_alloc(&vops, false); in xe_vma_rebind()
884 fence = ops_execute(vm, &vops); in xe_vma_rebind()
887 list_for_each_entry_safe(op, next_op, &vops.list, link) { in xe_vma_rebind()
891 xe_vma_ops_fini(&vops); in xe_vma_rebind()
2130 struct xe_vma_ops *vops) in vm_bind_ioctl_ops_parse() argument
2149 list_add_tail(&op->link, &vops->list); in vm_bind_ioctl_ops_parse()
2169 xe_vma_ops_incr_pt_update_ops(vops, in vm_bind_ioctl_ops_parse()
2216 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); in vm_bind_ioctl_ops_parse()
2254 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); in vm_bind_ioctl_ops_parse()
2257 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); in vm_bind_ioctl_ops_parse()
2263 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); in vm_bind_ioctl_ops_parse()
2444 struct xe_vma_ops *vops) in vm_bind_ioctl_ops_lock_and_prep() argument
2453 list_for_each_entry(op, &vops->list, link) { in vm_bind_ioctl_ops_lock_and_prep()
2460 if (vops->inject_error && in vm_bind_ioctl_ops_lock_and_prep()
2492 static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops) in trace_xe_vm_ops_execute() argument
2496 list_for_each_entry(op, &vops->list, link) in trace_xe_vm_ops_execute()
2500 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_ops_setup_tile_args() argument
2502 struct xe_exec_queue *q = vops->q; in vm_ops_setup_tile_args()
2508 if (vops->pt_update_ops[id].num_ops) in vm_ops_setup_tile_args()
2511 if (vops->pt_update_ops[id].q) in vm_ops_setup_tile_args()
2515 vops->pt_update_ops[id].q = q; in vm_ops_setup_tile_args()
2519 vops->pt_update_ops[id].q = vm->q[id]; in vm_ops_setup_tile_args()
2527 struct xe_vma_ops *vops) in ops_execute() argument
2536 number_tiles = vm_ops_setup_tile_args(vm, vops); in ops_execute()
2550 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
2553 err = xe_pt_update_ops_prepare(tile, vops); in ops_execute()
2560 trace_xe_vm_ops_execute(vops); in ops_execute()
2563 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
2566 fence = xe_pt_update_ops_run(tile, vops); in ops_execute()
2588 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
2591 xe_pt_update_ops_fini(tile, vops); in ops_execute()
2598 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
2601 xe_pt_update_ops_abort(tile, vops); in ops_execute()
2643 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_fini() argument
2646 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q); in vm_bind_ioctl_ops_fini()
2651 ufence = find_ufence_get(vops->syncs, vops->num_syncs); in vm_bind_ioctl_ops_fini()
2652 list_for_each_entry(op, &vops->list, link) { in vm_bind_ioctl_ops_fini()
2664 for (i = 0; i < vops->num_syncs; i++) in vm_bind_ioctl_ops_fini()
2665 xe_sync_entry_signal(vops->syncs + i, fence); in vm_bind_ioctl_ops_fini()
2671 struct xe_vma_ops *vops) in vm_bind_ioctl_ops_execute() argument
2682 err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops); in vm_bind_ioctl_ops_execute()
2687 fence = ops_execute(vm, vops); in vm_bind_ioctl_ops_execute()
2693 vm_bind_ioctl_ops_fini(vm, vops, fence); in vm_bind_ioctl_ops_execute()
2853 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm, in xe_vma_ops_init() argument
2857 memset(vops, 0, sizeof(*vops)); in xe_vma_ops_init()
2858 INIT_LIST_HEAD(&vops->list); in xe_vma_ops_init()
2859 vops->vm = vm; in xe_vma_ops_init()
2860 vops->q = q; in xe_vma_ops_init()
2861 vops->syncs = syncs; in xe_vma_ops_init()
2862 vops->num_syncs = num_syncs; in xe_vma_ops_init()
2927 struct xe_vma_ops vops; in xe_vm_bind_ioctl() local
3047 xe_vma_ops_init(&vops, vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
3066 err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); in xe_vm_bind_ioctl()
3072 vops.inject_error = true; in xe_vm_bind_ioctl()
3081 if (list_empty(&vops.list)) { in xe_vm_bind_ioctl()
3086 err = xe_vma_ops_alloc(&vops, args->num_binds > 1); in xe_vm_bind_ioctl()
3090 err = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_ioctl()
3095 xe_vma_ops_fini(&vops); in xe_vm_bind_ioctl()