Searched refs:paddr (Results 1 - 25 of 520) sorted by last modified time

1234567891011>>

/linux-master/arch/x86/kvm/
H A Dx86.c9913 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, argument
9941 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
/linux-master/arch/x86/kvm/svm/
H A Dsev.c495 unsigned long paddr, next_paddr; local
499 paddr = __sme_page_pa(inpages[idx]);
502 if ((paddr + PAGE_SIZE) == next_paddr) {
504 paddr = next_paddr;
831 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, argument
841 !IS_ALIGNED(paddr, 16) ||
850 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
855 offset = paddr & 15;
867 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, argument
888 paddr
[all...]
/linux-master/kernel/dma/
H A Dswiotlb.c578 phys_addr_t paddr; local
585 paddr = page_to_phys(page);
586 if (paddr + bytes - 1 > phys_limit) {
591 vaddr = phys_to_virt(paddr);
768 * @paddr: Physical address within the DMA buffer.
773 * Return: Memory pool which contains @paddr, or %NULL if none.
775 struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr) argument
782 if (paddr >= pool->start && paddr < pool->end)
787 if (paddr >
1538 swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir, unsigned long attrs) argument
[all...]
/linux-master/drivers/irqchip/
H A Dirq-gic-v3-its.c3105 phys_addr_t paddr; local
3118 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3119 paddr &= GENMASK_ULL(51, 12);
3120 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3123 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3124 paddr &= GENMASK_ULL(51, 16);
3126 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3133 paddr = page_to_phys(pend_page);
3231 &paddr);
5331 phys_addr_t paddr local
[all...]
/linux-master/drivers/iommu/
H A Dmtk_iommu.c799 phys_addr_t paddr, size_t pgsize, size_t pgcount,
806 paddr |= BIT_ULL(32);
809 return dom->iop->map_pages(dom->iop, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
798 mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) argument
H A Dmtk_iommu_v1.c338 phys_addr_t paddr, size_t pgsize, size_t pgcount,
345 u32 pabase = (u32)paddr;
337 mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) argument
/linux-master/drivers/iommu/intel/
H A Diommu.c4055 unsigned long iova, phys_addr_t paddr,
4066 if (!IS_ALIGNED(iova | paddr, pgsize))
4069 ret = intel_iommu_map(domain, iova, paddr, size, prot, gfp);
4054 intel_iommu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) argument
/linux-master/drivers/iommu/amd/
H A Diommu.c1069 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); local
1072 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
1073 cmd->data[1] = upper_32_bits(paddr);
2497 phys_addr_t paddr, size_t pgsize, size_t pgcount,
2515 ret = ops->map_pages(ops, iova, paddr, pgsize,
2496 amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int iommu_prot, gfp_t gfp, size_t *mapped) argument
H A Dinit.c3808 unsigned long paddr, pfn; local
3810 paddr = iommu_virt_to_phys(page);
3811 /* Cbit maybe set in the paddr */
3812 pfn = __sme_clr(paddr) >> PAGE_SHIFT;
/linux-master/include/linux/
H A Dmm.h4035 static inline bool arch_is_platform_page(u64 paddr)
4202 phys_addr_t paddr = pfn << PAGE_SHIFT;
4204 return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
4033 arch_is_platform_page(u64 paddr) argument
4200 phys_addr_t paddr = pfn << PAGE_SHIFT; local
H A Dhyperv.h1791 phys_addr_t paddr; local
1794 paddr = page_to_phys(vmalloc_to_page(addr)) +
1797 paddr = __pa(addr);
1799 return paddr >> HV_HYP_PAGE_SHIFT;
/linux-master/drivers/net/ethernet/amazon/ena/
H A Dena_xdp.c58 ena_buf->paddr = dma;
H A Dena_netdev.c560 ena_buf->paddr = dma + headroom;
700 dma_unmap_addr(ena_buf, paddr),
709 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
951 ena_buf->paddr += buf_len;
1007 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
1066 pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
1259 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
2547 ena_buf->paddr = dma;
2574 ena_buf->paddr = dma;
/linux-master/drivers/gpu/drm/panfrost/
H A Dpanfrost_mmu.c299 unsigned long paddr = sg_dma_address(sgl); local
302 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
306 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
308 ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
313 paddr += mapped;
/linux-master/arch/loongarch/include/asm/
H A Dpage.h82 #define phys_to_pfn(paddr) __phys_to_pfn(paddr)
85 #define phys_to_page(paddr) pfn_to_page(phys_to_pfn(paddr))
H A Dio.h78 #define phys_to_virt(paddr) \
81 (unlikely(__kfence_pool == NULL)) ? __va((unsigned long)paddr) : \
82 page_address(phys_to_page((unsigned long)paddr)) + offset_in_page((unsigned long)paddr);\
/linux-master/drivers/crypto/ccp/
H A Dsev-dev.c368 static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked) argument
372 paddr = __sme_clr(ALIGN_DOWN(paddr, PAGE_SIZE));
374 for (i = 0; i < npages; i++, paddr += PAGE_SIZE) {
377 data.paddr = paddr;
387 ret = rmp_make_shared(__phys_to_pfn(paddr), PG_LEVEL_4K);
399 snp_leak_pages(__phys_to_pfn(paddr), npages - i);
403 static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked) argument
405 unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIF
429 unsigned long npages = 1ul << order, paddr; local
465 unsigned long paddr, npages = 1ul << order; local
[all...]
/linux-master/arch/x86/virt/svm/
H A Dsev.c339 unsigned long paddr; local
353 paddr = PFN_PHYS(pte_pfn(*pte)) | (hva & ~page_level_mask(level));
354 dump_rmpentry(PHYS_PFN(paddr));
363 unsigned long paddr = pfn << PAGE_SHIFT; local
375 : "a" (paddr)
472 unsigned long paddr = pfn << PAGE_SHIFT; local
487 : "a" (paddr), "c" ((unsigned long)state)
/linux-master/arch/x86/kernel/
H A Dsev.c514 unsigned long vaddr, phys_addr_t *paddr)
543 *paddr = pa;
707 early_set_pages_state(unsigned long vaddr, unsigned long paddr, argument
716 paddr = paddr & PAGE_MASK;
717 paddr_end = paddr + (npages << PAGE_SHIFT);
719 while (paddr < paddr_end) {
723 if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
731 sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
742 "Failed to change page state to '%s' paddr
513 vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, unsigned long vaddr, phys_addr_t *paddr) argument
764 early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) argument
783 early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) argument
1436 phys_addr_t paddr; local
[all...]
/linux-master/arch/x86/include/asm/
H A Dsev.h217 void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
219 void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
243 early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } argument
245 early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } argument
/linux-master/arch/x86/mm/pat/
H A Dmemtype.c662 * @paddr: physical address of which memory type needs to be looked up
669 static enum page_cache_mode lookup_memtype(u64 paddr) argument
674 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
677 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
680 page = pfn_to_page(paddr >> PAGE_SHIFT);
686 entry = memtype_lookup(paddr);
871 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, argument
879 is_ram = pat_pagerange_is_ram(paddr, padd
941 free_pfn_range(u64 paddr, unsigned long size) argument
950 get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, pgprot_t *pgprot) argument
991 resource_size_t paddr; local
1014 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; local
1072 resource_size_t paddr; local
[all...]
/linux-master/tools/testing/selftests/kvm/include/x86_64/
H A Dprocessor.h1336 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
1337 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
/linux-master/arch/x86/mm/
H A Dmem_encrypt_amd.c58 unsigned long paddr, bool decrypt)
64 * @paddr needs to be accessed decrypted, mark the page shared in
67 early_snp_set_memory_shared((unsigned long)__va(paddr), paddr, npages);
72 early_snp_set_memory_private((unsigned long)__va(paddr), paddr, npages);
75 * @paddr need to be accessed encrypted, no need for the page state
91 static void __init __sme_early_enc_dec(resource_size_t paddr, argument
113 src = enc ? early_memremap_decrypted_wp(paddr, len) :
114 early_memremap_encrypted_wp(paddr, le
57 snp_memcpy(void *dst, void *src, size_t sz, unsigned long paddr, bool decrypt) argument
146 sme_early_encrypt(resource_size_t paddr, unsigned long size) argument
151 sme_early_decrypt(resource_size_t paddr, unsigned long size) argument
159 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; local
[all...]
/linux-master/drivers/scsi/lpfc/
H A Dlpfc_sli.c3456 dma_addr_t paddr; local
3558 paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3561 paddr);
3563 paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3567 paddr);
H A Dlpfc_els.c10816 dma_addr_t paddr; local
10869 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
10872 paddr);
10874 paddr = getPaddr(icmd->un.cont64[1].addrHigh,
10878 paddr);

Completed in 479 milliseconds

1234567891011>>