Lines Matching refs:start

519 	u64 start;  member
556 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ argument
557 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
559 node = interval_tree_iter_next(node, start, last)) \
573 if (WARN_ON_ONCE(range->end <= range->start)) in kvm_handle_hva_range()
592 range->start, range->end - 1) { in kvm_handle_hva_range()
596 hva_start = max_t(unsigned long, range->start, slot->userspace_addr); in kvm_handle_hva_range()
618 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); in kvm_handle_hva_range()
651 unsigned long start, in kvm_age_hva_range() argument
658 .start = start, in kvm_age_hva_range()
671 unsigned long start, in kvm_age_hva_range_no_flush() argument
675 return kvm_age_hva_range(mn, start, end, handler, false); in kvm_age_hva_range_no_flush()
694 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end) in kvm_mmu_invalidate_range_add() argument
701 kvm->mmu_invalidate_range_start = start; in kvm_mmu_invalidate_range_add()
714 min(kvm->mmu_invalidate_range_start, start); in kvm_mmu_invalidate_range_add()
722 kvm_mmu_invalidate_range_add(kvm, range->start, range->end); in kvm_mmu_unmap_gfn_range()
731 .start = range->start, in kvm_mmu_notifier_invalidate_range_start()
739 trace_kvm_unmap_hva_range(range->start, range->end); in kvm_mmu_notifier_invalidate_range_start()
763 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end); in kvm_mmu_notifier_invalidate_range_start()
807 .start = range->start, in kvm_mmu_notifier_invalidate_range_end()
835 unsigned long start, in kvm_mmu_notifier_clear_flush_young() argument
838 trace_kvm_age_hva(start, end); in kvm_mmu_notifier_clear_flush_young()
840 return kvm_age_hva_range(mn, start, end, kvm_age_gfn, in kvm_mmu_notifier_clear_flush_young()
846 unsigned long start, in kvm_mmu_notifier_clear_young() argument
849 trace_kvm_age_hva(start, end); in kvm_mmu_notifier_clear_young()
864 return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn); in kvm_mmu_notifier_clear_young()
1550 new->hva_node[idx].start = new->userspace_addr; in kvm_replace_memslot()
1980 gfn_t start, gfn_t end) in kvm_check_memslot_overlap() argument
1984 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { in kvm_check_memslot_overlap()
2432 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, in kvm_range_has_memory_attributes() argument
2435 XA_STATE(xas, &kvm->mem_attr_array, start); in kvm_range_has_memory_attributes()
2443 if (end == start + 1) in kvm_range_has_memory_attributes()
2444 return (kvm_get_memory_attributes(kvm, start) & mask) == attrs; in kvm_range_has_memory_attributes()
2450 for (index = start; index < end; index++) { in kvm_range_has_memory_attributes()
2488 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) { in kvm_handle_gfn_range()
2492 gfn_range.start = max(range->start, slot->base_gfn); in kvm_handle_gfn_range()
2494 if (gfn_range.start >= gfn_range.end) in kvm_handle_gfn_range()
2529 kvm_mmu_invalidate_range_add(kvm, range->start, range->end); in kvm_pre_set_memory_attributes()
2535 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, in kvm_vm_set_mem_attributes() argument
2539 .start = start, in kvm_vm_set_mem_attributes()
2548 .start = start, in kvm_vm_set_mem_attributes()
2561 trace_kvm_vm_set_mem_attributes(start, end, attributes); in kvm_vm_set_mem_attributes()
2566 if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes)) in kvm_vm_set_mem_attributes()
2573 for (i = start; i < end; i++) { in kvm_vm_set_mem_attributes()
2583 for (i = start; i < end; i++) { in kvm_vm_set_mem_attributes()
2600 gfn_t start, end; in kvm_vm_ioctl_set_mem_attributes() local
2612 start = attrs->address >> PAGE_SHIFT; in kvm_vm_ioctl_set_mem_attributes()
2622 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes); in kvm_vm_ioctl_set_mem_attributes()
3671 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, in update_halt_poll_stats() argument
3675 u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); in update_halt_poll_stats()
3721 ktime_t start, cur, poll_end; in kvm_vcpu_halt() local
3731 start = cur = poll_end = ktime_get(); in kvm_vcpu_halt()
3733 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); in kvm_vcpu_halt()
3754 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); in kvm_vcpu_halt()
3762 update_halt_poll_stats(vcpu, start, poll_end, !waited); in kvm_vcpu_halt()
3958 int nr_vcpus, start, i, idx, yielded; in kvm_vcpu_on_spin() local
3991 start = READ_ONCE(kvm->last_boosted_vcpu) + 1; in kvm_vcpu_on_spin()
3993 idx = (start + i) % nr_vcpus; in kvm_vcpu_on_spin()