| /arch/powerpc/kvm/ |
| A D | book3s_32_mmu.c | 75 static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) in find_sr() argument 77 return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); in find_sr() 90 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); in kvmppc_mmu_book3s_32_ea_to_vp() 94 u32 sre, gva_t eaddr, in kvmppc_mmu_book3s_32_get_pteg() argument 101 page = (eaddr & 0x0FFFFFFF) >> 12; in kvmppc_mmu_book3s_32_get_pteg() 152 data ? 'd' : 'i', i, eaddr, bat->bepi, in kvmppc_mmu_book3s_32_xlate_bat() 155 if ((eaddr & bat->bepi_mask) == bat->bepi) { in kvmppc_mmu_book3s_32_xlate_bat() 158 eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_xlate_bat() 160 pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; in kvmppc_mmu_book3s_32_xlate_bat() 194 sre = find_sr(vcpu, eaddr); in kvmppc_mmu_book3s_32_xlate_pte() [all …]
|
| A D | book3s_64_mmu.c | 29 gva_t eaddr) in kvmppc_mmu_book3s_64_find_slbe() argument 32 u64 esid = GET_ESID(eaddr); in kvmppc_mmu_book3s_64_find_slbe() 33 u64 esid_1t = GET_ESID_1T(eaddr); in kvmppc_mmu_book3s_64_find_slbe() 49 eaddr, esid, esid_1t); in kvmppc_mmu_book3s_64_find_slbe() 75 eaddr &= kvmppc_slb_offset_mask(slb); in kvmppc_slb_calc_vpn() 77 return (eaddr >> VPN_SHIFT) | in kvmppc_slb_calc_vpn() 90 return kvmppc_slb_calc_vpn(slb, eaddr); in kvmppc_mmu_book3s_64_ea_to_vp() 117 struct kvmppc_slb *slbe, gva_t eaddr, in kvmppc_mmu_book3s_64_get_pteg() argument 128 vpn = kvmppc_slb_calc_vpn(slbe, eaddr); in kvmppc_mmu_book3s_64_get_pteg() 213 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_xlate() [all …]
|
| A D | trace_pr.h | 39 __field( unsigned long, eaddr ) 49 __entry->eaddr = orig_pte->eaddr; 57 __entry->flag_w, __entry->flag_x, __entry->eaddr, 70 __field( ulong, eaddr ) 79 __entry->eaddr = pte->pte.eaddr; 88 __entry->host_vpn, __entry->pfn, __entry->eaddr, 99 __field( ulong, eaddr ) 108 __entry->eaddr = pte->pte.eaddr; 117 __entry->host_vpn, __entry->pfn, __entry->eaddr,
|
| A D | book3s_32_mmu_host.c | 59 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); in kvmppc_mmu_invalidate_pte() 112 page = (eaddr & ~ESID_MASK) >> 12; in kvmppc_mmu_get_pteg() 137 u32 eaddr = orig_pte->eaddr; in kvmppc_mmu_map_page() local 157 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 160 kvmppc_mmu_map_segment(vcpu, eaddr); in kvmppc_mmu_map_page() 167 ((eaddr & ~ESID_MASK) >> VPN_SHIFT); in kvmppc_mmu_map_page() 175 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); in kvmppc_mmu_map_page() 193 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | in kvmppc_mmu_map_page() 242 orig_pte->eaddr, (ulong)pteg, vpn, in kvmppc_mmu_map_page() 301 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) in kvmppc_mmu_map_segment() argument [all …]
|
| A D | e500_mmu.c | 92 eaddr > vcpu_e500->tlb1_max_eaddr) in kvmppc_e500_tlb_index() 104 if (eaddr < get_tlb_eaddr(tlbe)) in kvmppc_e500_tlb_index() 107 if (eaddr > get_tlb_end(tlbe)) in kvmppc_e500_tlb_index() 127 gva_t eaddr, int as) in kvmppc_e500_deliver_tlb_miss() argument 155 gva_t eaddr; in kvmppc_recalc_tlb1map_range() local 169 eaddr = get_tlb_eaddr(tlbe); in kvmppc_recalc_tlb1map_range() 173 eaddr = get_tlb_end(tlbe); in kvmppc_recalc_tlb1map_range() 435 u64 eaddr = get_tlb_eaddr(gtlbe); in kvmppc_e500_emul_tlbwe() local 473 gva_t eaddr; in kvmppc_core_vcpu_translate() local 477 eaddr = tr->linear_address; in kvmppc_core_vcpu_translate() [all …]
|
| A D | book3s_mmu_hpte.c | 26 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) in kvmppc_mmu_hash_pte() argument 28 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); in kvmppc_mmu_hash_pte() 31 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr) in kvmppc_mmu_hash_pte_long() argument 33 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE, in kvmppc_mmu_hash_pte_long() 66 index = kvmppc_mmu_hash_pte(pte->pte.eaddr); in kvmppc_mmu_hpte_cache_map() 70 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); in kvmppc_mmu_hpte_cache_map() 157 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) in kvmppc_mmu_pte_flush_page() 177 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) in kvmppc_mmu_pte_flush_long()
|
| A D | book3s_64_mmu_host.c | 108 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 111 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); in kvmppc_mmu_map_page() 117 vsid, orig_pte->eaddr); in kvmppc_mmu_map_page() 123 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); in kvmppc_mmu_map_page() 219 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_unmap_page() 312 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) in kvmppc_mmu_map_segment() argument 315 u64 esid = eaddr >> SID_SHIFT; in kvmppc_mmu_map_segment() 316 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; in kvmppc_mmu_map_segment() 323 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); in kvmppc_mmu_map_segment()
|
| A D | book3s_64_mmu_radix.c | 36 gva_t eaddr, void *to, void *from, in __kvmhv_copy_tofrom_guest_radix() argument 52 if (eaddr & (0xFFFUL << 52)) in __kvmhv_copy_tofrom_guest_radix() 59 from = (void *) (eaddr | (quadrant << 62)); in __kvmhv_copy_tofrom_guest_radix() 61 to = (void *) (eaddr | (quadrant << 62)); in __kvmhv_copy_tofrom_guest_radix() 106 if (eaddr & (0x3FFUL << 52)) in kvmhv_copy_tofrom_guest_radix() 114 if (((eaddr >> 62) & 0x3) == 0x3) in kvmhv_copy_tofrom_guest_radix() 119 eaddr &= ~(0xFFFUL << 52); in kvmhv_copy_tofrom_guest_radix() 172 index = (eaddr >> offset) & ((1UL << bits) - 1); in kvmppc_mmu_walk_radix_tree() 207 gpa |= eaddr & ((1ul << offset) - 1); in kvmppc_mmu_walk_radix_tree() 214 gpte->eaddr = eaddr; in kvmppc_mmu_walk_radix_tree() [all …]
|
| A D | e500_mmu_host.c | 108 static u32 get_host_mas0(unsigned long eaddr) in get_host_mas0() argument 118 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); in get_host_mas0() 548 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, in kvmppc_mmu_map() argument 568 &priv->ref, eaddr, &stlbe); in kvmppc_mmu_map() 575 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, in kvmppc_mmu_map() 593 hva_t eaddr; in kvmppc_load_last_inst() local 670 eaddr = (unsigned long)kmap_atomic(page); in kvmppc_load_last_inst() 671 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK)); in kvmppc_load_last_inst() 672 kunmap_atomic((u32 *)eaddr); in kvmppc_load_last_inst()
|
| A D | booke.c | 1284 unsigned long eaddr = vcpu->arch.fault_dear; in kvmppc_handle_exit() local 1301 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); in kvmppc_handle_exit() 1332 vcpu->arch.vaddr_accessed = eaddr; in kvmppc_handle_exit() 1342 unsigned long eaddr = vcpu->arch.regs.nip; in kvmppc_handle_exit() local 1350 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); in kvmppc_handle_exit() 1989 pte->eaddr = eaddr; in kvmppc_xlate() 1991 (eaddr & ~PAGE_MASK); in kvmppc_xlate() 1992 pte->vpage = eaddr >> PAGE_SHIFT; in kvmppc_xlate() 2004 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); in kvmppc_xlate() 2019 pte->eaddr = eaddr; in kvmppc_xlate() [all …]
|
| A D | e500mc.c | 60 gva_t eaddr; in kvmppc_e500_tlbil_one() local 69 eaddr = get_tlb_eaddr(gtlbe); in kvmppc_e500_tlbil_one() 76 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); in kvmppc_e500_tlbil_one()
|
| A D | e500.c | 238 u32 val, eaddr; in kvmppc_e500_tlbil_one() local 270 eaddr = get_tlb_eaddr(gtlbe); in kvmppc_e500_tlbil_one() 275 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); in kvmppc_e500_tlbil_one()
|
| A D | book3s.c | 451 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, in kvmppc_xlate() argument 460 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); in kvmppc_xlate() 462 pte->eaddr = eaddr; in kvmppc_xlate() 463 pte->raddr = eaddr & KVM_PAM; in kvmppc_xlate() 464 pte->vpage = VSID_REAL | eaddr >> 12; in kvmppc_xlate() 473 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) in kvmppc_xlate()
|
| A D | book3s_64_mmu_hv.c | 312 gva_t eaddr) in kvmppc_mmu_book3s_hv_find_slbe() argument 326 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) in kvmppc_mmu_book3s_hv_find_slbe() 341 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_64_hv_xlate() argument 354 return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); in kvmppc_mmu_book3s_64_hv_xlate() 358 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_hv_xlate() 369 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, in kvmppc_mmu_book3s_64_hv_xlate() 384 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_hv_xlate() 385 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); in kvmppc_mmu_book3s_64_hv_xlate() 407 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); in kvmppc_mmu_book3s_64_hv_xlate()
|
| A D | book3s_hv_rm_mmu.c | 1063 unsigned long eaddr, unsigned long slb_v, long mmio_update) in mmio_cache_search() argument 1073 if ((entry->eaddr >> pshift) == (eaddr >> pshift) && in mmio_cache_search() 1097 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, in kvmppc_hv_find_lock_hpte() argument 1126 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt); in kvmppc_hv_find_lock_hpte() 1128 avpn |= (eaddr & somask) >> 16; in kvmppc_hv_find_lock_hpte() 1285 cache_entry->eaddr = addr; in kvmppc_hpte_hv_fault()
|
| A D | powerpc.c | 348 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_st() argument 358 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st() 364 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_st() 369 *eaddr = pte.raddr; in kvmppc_st() 379 magic += pte.eaddr & 0xfff; in kvmppc_st() 391 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_ld() argument 401 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld() 407 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_ld() 412 *eaddr = pte.raddr; in kvmppc_ld() 425 magic += pte.eaddr & 0xfff; in kvmppc_ld()
|
| A D | book3s_pr.c | 681 ulong eaddr, int vec) in kvmppc_handle_pagefault() argument 699 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); in kvmppc_handle_pagefault() 704 pte.raddr = eaddr & KVM_PAM; in kvmppc_handle_pagefault() 705 pte.eaddr = eaddr; in kvmppc_handle_pagefault() 706 pte.vpage = eaddr >> 12; in kvmppc_handle_pagefault() 722 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_handle_pagefault() 755 kvmppc_core_queue_data_storage(vcpu, 0, eaddr, flags); in kvmppc_handle_pagefault() 787 vcpu->arch.vaddr_accessed = pte.eaddr; in kvmppc_handle_pagefault()
|
| A D | book3s_hv_nested.c | 601 gva_t eaddr = kvmppc_get_gpr(vcpu, 6); in kvmhv_copy_tofrom_guest_nested() local 612 if (eaddr & (0xFFFUL << 52)) in kvmhv_copy_tofrom_guest_nested() 630 eaddr, buf, NULL, n); in kvmhv_copy_tofrom_guest_nested() 650 eaddr, NULL, buf, n); in kvmhv_copy_tofrom_guest_nested()
|
| /arch/arm64/kernel/ |
| A D | compat_alignment.c | 118 unsigned long eaddr, newaddr; in do_alignment_ldmstm() local 125 newaddr = eaddr = regs->regs[rn]; in do_alignment_ldmstm() 131 eaddr = newaddr; in do_alignment_ldmstm() 134 eaddr += 4; in do_alignment_ldmstm() 140 if (get_user(val, (u32 __user *)eaddr)) in do_alignment_ldmstm() 155 if (put_user(val, (u32 __user *)eaddr)) in do_alignment_ldmstm() 158 eaddr += 4; in do_alignment_ldmstm()
|
| /arch/arm/mm/ |
| A D | alignment.c | 503 unsigned long eaddr, newaddr; in do_alignment_ldmstm() local 517 newaddr = eaddr = regs->uregs[rn]; in do_alignment_ldmstm() 523 eaddr = newaddr; in do_alignment_ldmstm() 526 eaddr += 4; in do_alignment_ldmstm() 540 if (addr != eaddr) { in do_alignment_ldmstm() 543 instruction_pointer(regs), instr, addr, eaddr); in do_alignment_ldmstm() 555 get32t_unaligned_check(val, eaddr); in do_alignment_ldmstm() 559 eaddr += 4; in do_alignment_ldmstm() 568 get32_unaligned_check(val, eaddr); in do_alignment_ldmstm() 571 put32_unaligned_check(regs->uregs[rd], eaddr); in do_alignment_ldmstm() [all …]
|
| /arch/powerpc/platforms/pseries/ |
| A D | ras.c | 572 unsigned long eaddr = 0, paddr = 0; in mce_handle_err_virtmode() local 642 eaddr = be64_to_cpu(mce_log->effective_address); in mce_handle_err_virtmode() 649 pfn = addr_to_pfn(regs, eaddr); in mce_handle_err_virtmode() 670 eaddr = be64_to_cpu(mce_log->effective_address); in mce_handle_err_virtmode() 687 eaddr = be64_to_cpu(mce_log->effective_address); in mce_handle_err_virtmode() 704 eaddr = be64_to_cpu(mce_log->effective_address); in mce_handle_err_virtmode() 725 eaddr = be64_to_cpu(mce_log->effective_address); in mce_handle_err_virtmode() 734 &mce_err, regs->nip, eaddr, paddr); in mce_handle_err_virtmode()
|
| /arch/powerpc/include/asm/ |
| A D | kvm_book3s.h | 156 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 157 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 161 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, 180 gva_t eaddr, void *to, void *from, 182 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, 184 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, 186 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, 189 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, 192 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 227 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
|
| A D | kvm_host.h | 377 ulong eaddr; member 394 int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb); 400 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, 404 u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data); 472 unsigned long eaddr; member
|
| A D | kvm_ppc.h | 91 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 93 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 110 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 111 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 113 gva_t eaddr); 116 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, 312 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, 314 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
|
| /arch/arm64/kvm/vgic/ |
| A D | vgic-its.c | 871 gpa_t *eaddr) in vgic_its_check_id() argument 901 if (eaddr) in vgic_its_check_id() 902 *eaddr = addr; in vgic_its_check_id() 931 if (eaddr) in vgic_its_check_id() 932 *eaddr = indirect_ptr; in vgic_its_check_id() 2360 gpa_t eaddr; in vgic_its_save_device_tables() local 2363 dev->device_id, &eaddr)) in vgic_its_save_device_tables() 2370 ret = vgic_its_save_dte(its, dev, eaddr); in vgic_its_save_device_tables()
|