Lines Matching refs:gfn

92 	gfn_t gfn;  member
100 gfn_t gfn; member
130 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument
133 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT, in gvt_unpin_guest_page()
138 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument
151 dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT; in gvt_pin_guest_page()
175 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page()
179 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument
186 ret = gvt_pin_guest_page(vgpu, gfn, size, &page); in gvt_dma_map_page()
195 gvt_unpin_guest_page(vgpu, gfn, size); in gvt_dma_map_page()
202 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_unmap_page() argument
208 gvt_unpin_guest_page(vgpu, gfn, size); in gvt_dma_unmap_page()
230 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) in __gvt_cache_find_gfn() argument
238 if (gfn < itr->gfn) in __gvt_cache_find_gfn()
240 else if (gfn > itr->gfn) in __gvt_cache_find_gfn()
248 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, in __gvt_cache_add() argument
259 new->gfn = gfn; in __gvt_cache_add()
270 if (gfn < itr->gfn) in __gvt_cache_add()
319 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); in gvt_cache_destroy()
351 __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn) in __kvmgt_protect_table_find() argument
355 hash_for_each_possible(info->ptable, p, hnode, gfn) { in __kvmgt_protect_table_find()
356 if (gfn == p->gfn) { in __kvmgt_protect_table_find()
365 static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn) in kvmgt_gfn_is_write_protected() argument
369 p = __kvmgt_protect_table_find(info, gfn); in kvmgt_gfn_is_write_protected()
373 static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn) in kvmgt_protect_table_add() argument
377 if (kvmgt_gfn_is_write_protected(info, gfn)) in kvmgt_protect_table_add()
381 if (WARN(!p, "gfn: 0x%llx\n", gfn)) in kvmgt_protect_table_add()
384 p->gfn = gfn; in kvmgt_protect_table_add()
385 hash_add(info->ptable, &p->hnode, gfn); in kvmgt_protect_table_add()
388 static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn) in kvmgt_protect_table_del() argument
392 p = __kvmgt_protect_table_find(info, gfn); in kvmgt_protect_table_del()
626 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr, in intel_vgpu_dma_unmap()
1547 int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) in intel_gvt_page_track_add() argument
1557 slot = gfn_to_memslot(kvm, gfn); in intel_gvt_page_track_add()
1565 if (kvmgt_gfn_is_write_protected(info, gfn)) in intel_gvt_page_track_add()
1568 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); in intel_gvt_page_track_add()
1569 kvmgt_protect_table_add(info, gfn); in intel_gvt_page_track_add()
1577 int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) in intel_gvt_page_track_remove() argument
1587 slot = gfn_to_memslot(kvm, gfn); in intel_gvt_page_track_remove()
1595 if (!kvmgt_gfn_is_write_protected(info, gfn)) in intel_gvt_page_track_remove()
1598 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); in intel_gvt_page_track_remove()
1599 kvmgt_protect_table_del(info, gfn); in intel_gvt_page_track_remove()
1624 gfn_t gfn; in kvmgt_page_track_flush_slot() local
1630 gfn = slot->base_gfn + i; in kvmgt_page_track_flush_slot()
1631 if (kvmgt_gfn_is_write_protected(info, gfn)) { in kvmgt_page_track_flush_slot()
1632 kvm_slot_page_track_remove_page(kvm, slot, gfn, in kvmgt_page_track_flush_slot()
1634 kvmgt_protect_table_del(info, gfn); in kvmgt_page_track_flush_slot()
1656 int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in intel_gvt_dma_map_guest_page() argument
1667 entry = __gvt_cache_find_gfn(vgpu, gfn); in intel_gvt_dma_map_guest_page()
1669 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); in intel_gvt_dma_map_guest_page()
1673 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size); in intel_gvt_dma_map_guest_page()
1678 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size); in intel_gvt_dma_map_guest_page()
1681 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); in intel_gvt_dma_map_guest_page()
1685 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size); in intel_gvt_dma_map_guest_page()
1697 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); in intel_gvt_dma_map_guest_page()
1726 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr, in __gvt_dma_release()