/linux-master/arch/powerpc/kvm/ |
H A D | book3s_32_mmu.c | 69 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 75 static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) argument 77 return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); 80 static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, argument 86 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) 89 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 90 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); 94 u32 sre, gva_t eaddr, 101 page = (eaddr & 0x0FFFFFFF) >> 12; 111 dprintk("MMU: pc=0x%lx eaddr 93 kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu, u32 sre, gva_t eaddr, bool primary) argument 121 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary) argument 127 kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data, bool iswrite) argument 182 kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data, bool iswrite, bool primary) argument 290 kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data, bool iswrite) argument [all...] |
H A D | book3s_64_mmu.c | 29 gva_t eaddr) 32 u64 esid = GET_ESID(eaddr); 33 u64 esid_1t = GET_ESID_1T(eaddr); 49 eaddr, esid, esid_1t); 73 static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr) argument 75 eaddr &= kvmppc_slb_offset_mask(slb); 77 return (eaddr >> VPN_SHIFT) | 81 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, argument 86 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); 90 return kvmppc_slb_calc_vpn(slb, eaddr); 27 kvmppc_mmu_book3s_64_find_slbe( struct kvm_vcpu *vcpu, gva_t eaddr) argument 109 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) argument 116 kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu, struct kvmppc_slb *slbe, gva_t eaddr, bool second) argument 155 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) argument 191 kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) argument 412 kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb) argument [all...] |
H A D | trace_pr.h | 39 __field( unsigned long, eaddr ) 49 __entry->eaddr = orig_pte->eaddr; 57 __entry->flag_w, __entry->flag_x, __entry->eaddr, 70 __field( ulong, eaddr ) 79 __entry->eaddr = pte->pte.eaddr; 88 __entry->host_vpn, __entry->pfn, __entry->eaddr, 99 __field( ulong, eaddr ) 108 __entry->eaddr [all...] |
H A D | book3s_32_mmu_host.c | 59 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); 106 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, argument 112 page = (eaddr & ~ESID_MASK) >> 12; 138 u32 eaddr = orig_pte->eaddr; local 158 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 161 kvmppc_mmu_map_segment(vcpu, eaddr); 168 ((eaddr & ~ESID_MASK) >> VPN_SHIFT); 176 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); 194 pteg0 = ((eaddr 302 kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) argument [all...] |
H A D | e500_mmu.c | 81 gva_t eaddr, int tlbsel, unsigned int pid, int as) 88 set_base = gtlb0_set_base(vcpu_e500, eaddr); 91 if (eaddr < vcpu_e500->tlb1_min_eaddr || 92 eaddr > vcpu_e500->tlb1_max_eaddr) 104 if (eaddr < get_tlb_eaddr(tlbe)) 107 if (eaddr > get_tlb_end(tlbe)) 127 gva_t eaddr, int as) 143 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) 155 gva_t eaddr; local 169 eaddr 80 kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t eaddr, int tlbsel, unsigned int pid, int as) argument 126 kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, gva_t eaddr, int as) argument 435 u64 eaddr = get_tlb_eaddr(gtlbe); local 453 kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, int as) argument 473 gva_t eaddr; local 495 kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) argument 502 kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) argument 523 kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, gva_t eaddr) argument [all...] |
H A D | book3s_mmu_hpte.c | 26 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) argument 28 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); 31 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr) argument 33 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE, 66 index = kvmppc_mmu_hash_pte(pte->pte.eaddr); 70 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); 163 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) 183 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
|
H A D | book3s_64_mmu_host.c | 107 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 110 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); 116 vsid, orig_pte->eaddr); 122 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); 219 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); 312 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) argument 315 u64 esid = eaddr >> SID_SHIFT; 316 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; 323 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
|
H A D | book3s_64_mmu_radix.c | 36 gva_t eaddr, void *to, void *from, 48 return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, 52 if (eaddr & (0xFFFUL << 52)) 59 from = (void *) (eaddr | (quadrant << 62)); 61 to = (void *) (eaddr | (quadrant << 62)); 99 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, argument 106 if (eaddr & (0x3FFUL << 52)) 114 if (((eaddr >> 62) & 0x3) == 0x3) 119 eaddr &= ~(0xFFFUL << 52); 121 return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, t 35 __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, gva_t eaddr, void *to, void *from, unsigned long n) argument 124 kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, unsigned long n) argument 136 kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from, unsigned long n) argument 142 kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 root, u64 *pte_ret_p) argument 238 kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 table, int table_index, u64 *pte_ret_p) argument 269 kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) argument [all...] |
H A D | e500_mmu_host.c | 105 static u32 get_host_mas0(unsigned long eaddr) argument 115 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); 586 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, argument 606 &priv->ref, eaddr, &stlbe); 613 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, 631 hva_t eaddr; local 708 eaddr = (unsigned long)kmap_atomic(page); 709 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK)); 710 kunmap_atomic((u32 *)eaddr);
|
H A D | e500.c | 238 u32 val, eaddr; local 270 eaddr = get_tlb_eaddr(gtlbe); 275 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
|
H A D | e500mc.c | 60 gva_t eaddr; local 69 eaddr = get_tlb_eaddr(gtlbe); 76 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
|
H A D | booke.c | 1284 unsigned long eaddr = vcpu->arch.fault_dear; local 1291 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { 1301 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); 1315 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1325 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); 1332 vcpu->arch.vaddr_accessed = eaddr; 1342 unsigned long eaddr = vcpu->arch.regs.nip; local 1350 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); 1363 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1373 kvmppc_mmu_map(vcpu, eaddr, gpadd 1980 kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, enum xlate_readwrite xlrw, struct kvmppc_pte *pte) argument [all...] |
H A D | book3s.c | 454 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, argument 463 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); 465 pte->eaddr = eaddr; 466 pte->raddr = eaddr & KVM_PAM; 467 pte->vpage = VSID_REAL | eaddr >> 12; 476 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
|
H A D | book3s_64_mmu_hv.c | 312 gva_t eaddr) 326 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) 341 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, argument 354 return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); 358 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); 369 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, 384 gpte->eaddr = eaddr; 385 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); 407 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); 311 kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, gva_t eaddr) argument [all...] |
/linux-master/fs/freevxfs/ |
H A D | vxfs_olt.c | 58 char *oaddr, *eaddr; local 81 eaddr = bp->b_data + (infp->vsi_oltsize * sbp->s_blocksize); 83 while (oaddr < eaddr) {
|
/linux-master/arch/arm64/kernel/ |
H A D | compat_alignment.c | 118 unsigned long eaddr, newaddr; local 125 newaddr = eaddr = regs->regs[rn]; 131 eaddr = newaddr; 134 eaddr += 4; 140 if (get_user(val, (u32 __user *)eaddr)) 155 if (put_user(val, (u32 __user *)eaddr)) 158 eaddr += 4;
|
/linux-master/drivers/edac/ |
H A D | igen6_edac.c | 141 u64 (*err_addr_to_sys_addr)(u64 eaddr, int mc); 143 u64 (*err_addr_to_imc_addr)(u64 eaddr, int mc); 301 static u64 ehl_err_addr_to_sys_addr(u64 eaddr, int mc) argument 303 return eaddr; 306 static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc) argument 308 if (eaddr < igen6_tolud) 309 return eaddr; 312 return eaddr + igen6_tolud - _4GB; 314 if (eaddr < _4GB) 315 return eaddr 400 tgl_err_addr_to_mem_addr(u64 eaddr, int mc) argument 423 tgl_err_addr_to_sys_addr(u64 eaddr, int mc) argument 430 tgl_err_addr_to_imc_addr(u64 eaddr, int mc) argument 435 adl_err_addr_to_sys_addr(u64 eaddr, int mc) argument 440 adl_err_addr_to_imc_addr(u64 eaddr, int mc) argument 847 u64 eaddr; local [all...] |
/linux-master/arch/powerpc/platforms/pseries/ |
H A D | ras.c | 572 unsigned long eaddr = 0, paddr = 0; local 642 eaddr = be64_to_cpu(mce_log->effective_address); 649 pfn = addr_to_pfn(regs, eaddr); 670 eaddr = be64_to_cpu(mce_log->effective_address); 687 eaddr = be64_to_cpu(mce_log->effective_address); 704 eaddr = be64_to_cpu(mce_log->effective_address); 725 eaddr = be64_to_cpu(mce_log->effective_address); 734 &mce_err, regs->nip, eaddr, paddr);
|
/linux-master/arch/arm/mm/ |
H A D | alignment.c | 503 unsigned long eaddr, newaddr; local 517 newaddr = eaddr = regs->uregs[rn]; 523 eaddr = newaddr; 526 eaddr += 4; 537 * This is a "hint" - we already have eaddr worked out by the 540 if (addr != eaddr) { 542 "addr = %08lx, eaddr = %08lx\n", 543 instruction_pointer(regs), instr, addr, eaddr); 555 get32t_unaligned_check(val, eaddr); 558 put32t_unaligned_check(regs->uregs[rd], eaddr); [all...] |
/linux-master/arch/powerpc/include/asm/ |
H A D | kvm_book3s.h | 156 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 157 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 161 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, 180 gva_t eaddr, void *to, void *from, 182 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, 184 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, 186 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, 189 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, 192 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 227 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, in [all...] |
H A D | kvm_ppc.h | 91 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 93 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 110 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 111 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 113 gva_t eaddr); 116 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, 313 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, 315 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
|
H A D | kvm_host.h | 377 ulong eaddr; member in struct:kvmppc_pte 394 int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb); 400 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, 404 u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data); 472 unsigned long eaddr; member in struct:kvmppc_slb::mmio_hpte_cache_entry
|
/linux-master/drivers/slimbus/ |
H A D | core.c | 176 struct slim_eaddr *eaddr, 186 sbdev->e_addr = *eaddr; 348 struct slim_eaddr *eaddr) 353 dev = device_find_child(ctrl->dev, eaddr, slim_match_dev); 175 slim_alloc_device(struct slim_controller *ctrl, struct slim_eaddr *eaddr, struct device_node *node) argument 347 find_slim_device(struct slim_controller *ctrl, struct slim_eaddr *eaddr) argument
|
/linux-master/net/mac802154/ |
H A D | cfg.c | 393 u64 eaddr; local 406 eaddr = swab64((__force u64)child->extended_addr); 409 &eaddr, ret); 418 eaddr = swab64((__force u64)wpan_dev->parent->extended_addr); 421 &eaddr, ret);
|
/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_vm.c | 1672 uint64_t eaddr; local 1680 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; 1682 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 1686 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, 1696 mapping->last = eaddr; 1730 uint64_t eaddr; local 1749 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; 1752 mapping->last = eaddr; 1835 uint64_t eaddr; local 1843 eaddr [all...] |