Home
last modified time | relevance | path

Searched refs:gpte (Results 1 – 9 of 9) sorted by relevance

/linux/arch/powerpc/kvm/
A Dbook3s_64_mmu.c213 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_xlate()
215 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); in kvmppc_mmu_book3s_64_xlate()
216 gpte->raddr &= KVM_PAM; in kvmppc_mmu_book3s_64_xlate()
218 gpte->may_read = true; in kvmppc_mmu_book3s_64_xlate()
219 gpte->may_write = true; in kvmppc_mmu_book3s_64_xlate()
221 gpte->wimg = HPTE_R_M; in kvmppc_mmu_book3s_64_xlate()
293 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_xlate()
303 gpte->may_read = false; in kvmppc_mmu_book3s_64_xlate()
318 gpte->may_read = true; in kvmppc_mmu_book3s_64_xlate()
324 eaddr, avpn, gpte->vpage, gpte->raddr); in kvmppc_mmu_book3s_64_xlate()
[all …]
A Dbook3s_64_mmu_radix.c199 gpte->page_size = ps; in kvmppc_mmu_walk_radix_tree()
200 gpte->page_shift = offset; in kvmppc_mmu_walk_radix_tree()
202 gpte->eaddr = eaddr; in kvmppc_mmu_walk_radix_tree()
203 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree()
206 gpte->may_read = !!(pte & _PAGE_READ); in kvmppc_mmu_walk_radix_tree()
284 gpte->may_read = 0; in kvmppc_mmu_radix_xlate()
285 gpte->may_write = 0; in kvmppc_mmu_radix_xlate()
286 gpte->may_execute = 0; in kvmppc_mmu_radix_xlate()
292 gpte->may_read = 0; in kvmppc_mmu_radix_xlate()
294 gpte->may_write = 0; in kvmppc_mmu_radix_xlate()
[all …]
A Dbook3s_hv_nested.c1418 struct kvmppc_pte gpte, in kvmhv_handle_nested_set_rc() argument
1430 if (pgflags & ~gpte.rc) in kvmhv_handle_nested_set_rc()
1436 gpte.raddr, kvm->arch.lpid); in kvmhv_handle_nested_set_rc()
1486 struct kvmppc_pte gpte; in __kvmhv_nested_page_fault() local
1525 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr); in __kvmhv_nested_page_fault()
1542 l1_shift = gpte.page_shift; in __kvmhv_nested_page_fault()
1549 gpa = gpte.raddr; in __kvmhv_nested_page_fault()
1607 perm |= gpte.may_read ? 0UL : _PAGE_READ; in __kvmhv_nested_page_fault()
1608 perm |= gpte.may_write ? 0UL : _PAGE_WRITE; in __kvmhv_nested_page_fault()
1609 perm |= gpte.may_execute ? 0UL : _PAGE_EXEC; in __kvmhv_nested_page_fault()
[all …]
A Dbook3s_64_mmu_hv.c333 struct kvmppc_pte *gpte, bool data, bool iswrite) in kvmppc_mmu_book3s_64_hv_xlate() argument
345 return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); in kvmppc_mmu_book3s_64_hv_xlate()
375 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_hv_xlate()
376 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); in kvmppc_mmu_book3s_64_hv_xlate()
384 gpte->may_read = hpte_read_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate()
385 gpte->may_write = hpte_write_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate()
386 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); in kvmppc_mmu_book3s_64_hv_xlate()
392 gpte->may_read = 0; in kvmppc_mmu_book3s_64_hv_xlate()
394 gpte->may_write = 0; in kvmppc_mmu_book3s_64_hv_xlate()
398 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); in kvmppc_mmu_book3s_64_hv_xlate()
/linux/arch/x86/kvm/mmu/
A Dpaging_tmpl.h105 unsigned gpte) in FNAME()
191 u64 gpte) in FNAME()
193 if (!FNAME(is_present_gpte)(gpte)) in FNAME()
229 access ^= (gpte >> PT64_NX_SHIFT); in FNAME()
301 pte_t pte = {.pte = gpte}; in FNAME()
333 gpte |= level - PG_LEVEL_4K - 1; in FNAME()
335 return gpte & PT_PAGE_SIZE_MASK; in FNAME()
574 gfn = gpte_to_gfn(gpte); in FNAME()
972 pt_element_t gpte; in FNAME() local
1097 pt_element_t gpte; in FNAME() local
[all …]
A Dmmu.c349 static gfn_t pse36_gfn_delta(u32 gpte) in pse36_gfn_delta() argument
353 return (gpte & PT32_DIR_PSE36_MASK) << shift; in pse36_gfn_delta()
/linux/arch/powerpc/include/asm/
A Dkvm_book3s.h186 struct kvmppc_pte *gpte, u64 root,
189 struct kvmppc_pte *gpte, u64 table,
192 struct kvmppc_pte *gpte, bool data, bool iswrite);
/linux/Documentation/virt/kvm/
A Dmmu.rst54 gpte guest pte (referring to gfns)
168 first or second 512-gpte block in the guest page table. For second-level
169 page tables, each 32-bit gpte is converted to two 64-bit sptes
275 protected, and synchronize sptes to gptes when a gpte is written.
339 - if successful, we can let the guest continue and modify the gpte
378 we cannot map the permissions for gpte.u=1, gpte.w=0 to any spte (the
396 spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when
A Dlocking.rst82 | gpte = gfn1 |
84 | spte is the shadow page table entry corresponding with gpte and |
101 | | gpte is changed to point to |

Completed in 40 milliseconds