Searched refs:offset (Results 1 – 6 of 6) sorted by relevance
| /virt/kvm/ |
| A D | dirty_ring.c | 53 static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask) in kvm_reset_dirty_gfn() argument 66 if (!memslot || (offset + __fls(mask)) >= memslot->npages) in kvm_reset_dirty_gfn() 70 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); in kvm_reset_dirty_gfn() 143 next_offset = READ_ONCE(entry->offset); in kvm_dirty_ring_reset() 218 void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset) in kvm_dirty_ring_push() argument 229 entry->offset = offset; in kvm_dirty_ring_push() 237 trace_kvm_dirty_ring_push(ring, slot, offset); in kvm_dirty_ring_push() 262 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset) in kvm_dirty_ring_get_page() argument 264 return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE); in kvm_dirty_ring_get_page()
|
| A D | guest_memfd.c | 157 pgoff_t start = offset >> PAGE_SHIFT; in kvm_gmem_punch_hole() 158 pgoff_t end = (offset + len) >> PAGE_SHIFT; in kvm_gmem_punch_hole() 170 truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1); in kvm_gmem_punch_hole() 187 if (offset + len > i_size_read(inode)) in kvm_gmem_allocate() 192 start = offset >> PAGE_SHIFT; in kvm_gmem_allocate() 193 end = (offset + len) >> PAGE_SHIFT; in kvm_gmem_allocate() 238 if (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len)) in kvm_gmem_fallocate() 465 unsigned int fd, loff_t offset) in kvm_gmem_bind() argument 489 if (offset < 0 || !PAGE_ALIGNED(offset) || in kvm_gmem_bind() 490 offset + size > i_size_read(inode)) in kvm_gmem_bind() [all …]
|
| A D | kvm_main.c | 2266 gfn_t offset; in kvm_get_dirty_log_protect() local 2277 offset, mask); in kvm_get_dirty_log_protect() 2335 gfn_t offset; in kvm_clear_dirty_log_protect() local 2375 for (offset = log->first_page, i = offset / BITS_PER_LONG, in kvm_clear_dirty_log_protect() 2394 offset, mask); in kvm_clear_dirty_log_protect() 3225 offset = 0; in kvm_read_guest() 3245 offset = 0; in kvm_vcpu_read_guest() 3336 offset = 0; in kvm_write_guest() 3357 offset = 0; in kvm_vcpu_write_guest() 3398 ghc->hva += offset; in __kvm_gfn_to_hva_cache_init() [all …]
|
| A D | binary_stats.c | 55 char __user *user_buffer, size_t size, loff_t *offset) in kvm_stats_read() argument 63 loff_t pos = *offset; in kvm_stats_read() 142 *offset = pos; in kvm_stats_read()
|
| A D | kvm_mm.h | 74 unsigned int fd, loff_t offset); 84 unsigned int fd, loff_t offset) in kvm_gmem_bind() argument
|
| A D | pfncache.c | 63 unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) : in kvm_gpc_is_valid_len() local 70 return offset + len <= PAGE_SIZE; in kvm_gpc_is_valid_len()
|
Completed in 26 milliseconds