Lines Matching refs:size

2607 	if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)  in kvm_vm_ioctl_set_mem_attributes()
2609 if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size)) in kvm_vm_ioctl_set_mem_attributes()
2613 end = (attrs->address + attrs->size) >> PAGE_SHIFT; in kvm_vm_ioctl_set_mem_attributes()
2684 unsigned long addr, size; in kvm_host_page_size() local
2686 size = PAGE_SIZE; in kvm_host_page_size()
2697 size = vma_kernel_pagesize(vma); in kvm_host_page_size()
2702 return size; in kvm_host_page_size()
4280 size_t size, loff_t *offset) in kvm_vcpu_stats_read() argument
4286 sizeof(vcpu->stat), user_buffer, size, offset); in kvm_vcpu_stats_read()
4341 !PAGE_ALIGNED(range->size) || in kvm_vcpu_pre_fault_memory()
4342 range->gpa + range->size <= range->gpa) in kvm_vcpu_pre_fault_memory()
4348 full_size = range->size; in kvm_vcpu_pre_fault_memory()
4362 range->size -= r; in kvm_vcpu_pre_fault_memory()
4365 } while (range->size); in kvm_vcpu_pre_fault_memory()
4371 return full_size == range->size ? r : 0; in kvm_vcpu_pre_fault_memory()
4928 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) in kvm_vm_ioctl_enable_dirty_log_ring() argument
4936 if (!size || (size & (size - 1))) in kvm_vm_ioctl_enable_dirty_log_ring()
4940 if (size < kvm_dirty_ring_get_rsvd_entries(kvm) * in kvm_vm_ioctl_enable_dirty_log_ring()
4941 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) in kvm_vm_ioctl_enable_dirty_log_ring()
4944 if (size > KVM_DIRTY_RING_MAX_ENTRIES * in kvm_vm_ioctl_enable_dirty_log_ring()
4958 kvm->dirty_ring_size = size; in kvm_vm_ioctl_enable_dirty_log_ring()
5081 size_t size, loff_t *offset) in kvm_vm_stats_read() argument
5087 sizeof(kvm->stat), user_buffer, size, offset); in kvm_vm_stats_read()
5160 unsigned long size; in kvm_vm_ioctl() local
5168 size = sizeof(struct kvm_userspace_memory_region); in kvm_vm_ioctl()
5170 size = sizeof(struct kvm_userspace_memory_region2); in kvm_vm_ioctl()
5181 if (copy_from_user(&mem, argp, size)) in kvm_vm_ioctl()