Lines Matching refs:addr

2684 	unsigned long addr, size;  in kvm_host_page_size()  local
2688 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); in kvm_host_page_size()
2689 if (kvm_is_error_hva(addr)) in kvm_host_page_size()
2693 vma = find_vma(current->mm, addr); in kvm_host_page_size()
3071 unsigned long addr; in kvm_prefetch_pages() local
3074 addr = gfn_to_hva_many(slot, gfn, &entry); in kvm_prefetch_pages()
3075 if (kvm_is_error_hva(addr)) in kvm_prefetch_pages()
3081 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); in kvm_prefetch_pages()
3182 unsigned long addr; in __kvm_read_guest_page() local
3187 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); in __kvm_read_guest_page()
3188 if (kvm_is_error_hva(addr)) in __kvm_read_guest_page()
3190 r = __copy_from_user(data, (void __user *)addr + offset, len); in __kvm_read_guest_page()
3258 unsigned long addr; in __kvm_read_guest_atomic() local
3263 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); in __kvm_read_guest_atomic()
3264 if (kvm_is_error_hva(addr)) in __kvm_read_guest_atomic()
3267 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); in __kvm_read_guest_atomic()
3291 unsigned long addr; in __kvm_write_guest_page() local
3296 addr = gfn_to_hva_memslot(memslot, gfn); in __kvm_write_guest_page()
3297 if (kvm_is_error_hva(addr)) in __kvm_write_guest_page()
3299 r = __copy_to_user((void __user *)addr + offset, data, len); in __kvm_write_guest_page()
5775 gpa_t addr1 = r1->addr; in kvm_io_bus_cmp()
5776 gpa_t addr2 = r2->addr; in kvm_io_bus_cmp()
5803 gpa_t addr, int len) in kvm_io_bus_get_first_dev() argument
5809 .addr = addr, in kvm_io_bus_get_first_dev()
5831 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); in __kvm_io_bus_write()
5837 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, in __kvm_io_bus_write()
5846 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_write() argument
5854 .addr = addr, in kvm_io_bus_write()
5867 gpa_t addr, int len, const void *val, long cookie) in kvm_io_bus_write_cookie() argument
5873 .addr = addr, in kvm_io_bus_write_cookie()
5884 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, in kvm_io_bus_write_cookie()
5900 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); in __kvm_io_bus_read()
5906 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, in __kvm_io_bus_read()
5915 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_read() argument
5923 .addr = addr, in kvm_io_bus_read()
5935 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_register_dev() argument
5958 .addr = addr, in kvm_io_bus_register_dev()
6028 gpa_t addr) in kvm_io_bus_get_dev() argument
6040 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); in kvm_io_bus_get_dev()