Lines Matching refs:end

520 	u64 end;  member
573 if (WARN_ON_ONCE(range->end <= range->start)) in kvm_handle_hva_range()
592 range->start, range->end - 1) { in kvm_handle_hva_range()
597 hva_end = min_t(unsigned long, range->end, in kvm_handle_hva_range()
619 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); in kvm_handle_hva_range()
652 unsigned long end, in kvm_age_hva_range() argument
659 .end = end, in kvm_age_hva_range()
672 unsigned long end, in kvm_age_hva_range_no_flush() argument
675 return kvm_age_hva_range(mn, start, end, handler, false); in kvm_age_hva_range_no_flush()
694 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end) in kvm_mmu_invalidate_range_add() argument
702 kvm->mmu_invalidate_range_end = end; in kvm_mmu_invalidate_range_add()
716 max(kvm->mmu_invalidate_range_end, end); in kvm_mmu_invalidate_range_add()
722 kvm_mmu_invalidate_range_add(kvm, range->start, range->end); in kvm_mmu_unmap_gfn_range()
732 .end = range->end, in kvm_mmu_notifier_invalidate_range_start()
739 trace_kvm_unmap_hva_range(range->start, range->end); in kvm_mmu_notifier_invalidate_range_start()
763 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end); in kvm_mmu_notifier_invalidate_range_start()
808 .end = range->end, in kvm_mmu_notifier_invalidate_range_end()
836 unsigned long end) in kvm_mmu_notifier_clear_flush_young() argument
838 trace_kvm_age_hva(start, end); in kvm_mmu_notifier_clear_flush_young()
840 return kvm_age_hva_range(mn, start, end, kvm_age_gfn, in kvm_mmu_notifier_clear_flush_young()
847 unsigned long end) in kvm_mmu_notifier_clear_young() argument
849 trace_kvm_age_hva(start, end); in kvm_mmu_notifier_clear_young()
864 return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn); in kvm_mmu_notifier_clear_young()
1980 gfn_t start, gfn_t end) in kvm_check_memslot_overlap() argument
1984 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { in kvm_check_memslot_overlap()
2432 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, in kvm_range_has_memory_attributes() argument
2443 if (end == start + 1) in kvm_range_has_memory_attributes()
2448 return !xas_find(&xas, end - 1); in kvm_range_has_memory_attributes()
2450 for (index = start; index < end; index++) { in kvm_range_has_memory_attributes()
2488 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) { in kvm_handle_gfn_range()
2493 gfn_range.end = min(range->end, slot->base_gfn + slot->npages); in kvm_handle_gfn_range()
2494 if (gfn_range.start >= gfn_range.end) in kvm_handle_gfn_range()
2529 kvm_mmu_invalidate_range_add(kvm, range->start, range->end); in kvm_pre_set_memory_attributes()
2535 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, in kvm_vm_set_mem_attributes() argument
2540 .end = end, in kvm_vm_set_mem_attributes()
2549 .end = end, in kvm_vm_set_mem_attributes()
2561 trace_kvm_vm_set_mem_attributes(start, end, attributes); in kvm_vm_set_mem_attributes()
2566 if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes)) in kvm_vm_set_mem_attributes()
2573 for (i = start; i < end; i++) { in kvm_vm_set_mem_attributes()
2583 for (i = start; i < end; i++) { in kvm_vm_set_mem_attributes()
2600 gfn_t start, end; in kvm_vm_ioctl_set_mem_attributes() local
2613 end = (attrs->address + attrs->size) >> PAGE_SHIFT; in kvm_vm_ioctl_set_mem_attributes()
2622 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes); in kvm_vm_ioctl_set_mem_attributes()
3672 ktime_t end, bool success) in update_halt_poll_stats() argument
3675 u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); in update_halt_poll_stats()