Searched refs:pgt (Results 1 - 24 of 24) sorted by relevance

/linux-master/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum_pgt.c32 mutex_lock(&mlxsw_sp->pgt->lock);
33 index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0,
34 mlxsw_sp->pgt->end_index, GFP_KERNEL);
42 mutex_unlock(&mlxsw_sp->pgt->lock);
46 mutex_unlock(&mlxsw_sp->pgt->lock);
52 mutex_lock(&mlxsw_sp->pgt->lock);
53 WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base));
54 mutex_unlock(&mlxsw_sp->pgt->lock);
63 mutex_lock(&mlxsw_sp->pgt->lock);
65 mid_base = idr_get_cursor(&mlxsw_sp->pgt
113 mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe) argument
139 mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt, struct mlxsw_sp_pgt_entry *pgt_entry) argument
152 mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe) argument
163 mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid) argument
313 struct mlxsw_sp_pgt *pgt; local
[all...]
H A Dspectrum.h210 struct mlxsw_sp_pgt *pgt; member in struct:mlxsw_sp
/linux-master/arch/arm64/kvm/hyp/
H A Dpgtable.c112 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) argument
114 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
115 u64 mask = BIT(pgt->ia_bits) - 1;
122 struct kvm_pgtable pgt = { local
127 return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
301 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data) argument
305 u64 limit = BIT(pgt->ia_bits);
310 if (!pgt->pgd)
313 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
314 kvm_pteref_t pteref = &pgt
324 kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker) argument
361 kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, kvm_pte_t *ptep, s8 *level) argument
489 kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot) argument
552 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) argument
568 kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, struct kvm_pgtable_mm_ops *mm_ops) argument
607 kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) argument
687 stage2_has_fwb(struct kvm_pgtable *pgt) argument
717 stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, kvm_pte_t *ptep) argument
875 stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt) argument
892 struct kvm_pgtable *pgt = ctx->arg; local
914 stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte) argument
962 struct kvm_pgtable *pgt = data->mmu->pgt; local
1079 kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, void *mc, enum kvm_pgtable_walk_flags flags) argument
1110 kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc, u8 owner_id) argument
1138 struct kvm_pgtable *pgt = ctx->arg; local
1178 kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) argument
1239 stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, u64 size, kvm_pte_t attr_set, kvm_pte_t attr_clr, kvm_pte_t *orig_pte, s8 *level, enum kvm_pgtable_walk_flags flags) argument
1268 kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) argument
1275 kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) argument
1324 kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold) argument
1340 kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot) argument
1370 struct kvm_pgtable *pgt = ctx->arg; local
1382 kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) argument
1396 kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level, enum kvm_pgtable_prot prot, void *mc, bool force_pte) argument
1539 kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_mmu_memory_cache *mc) argument
1551 __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb) argument
1604 kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) argument
[all...]
/linux-master/arch/arm64/include/asm/
H A Dkvm_pgtable.h386 * @pgt: Uninitialised page-table structure to initialise.
392 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
397 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
402 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
406 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
423 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
428 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
445 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
473 * @pgt: Uninitialised page-table structure to initialise.
482 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struc
[all...]
H A Dkvm_host.h160 struct kvm_pgtable *pgt; member in struct:kvm_s2_mmu
/linux-master/drivers/firmware/efi/libstub/
H A Dx86-5lvl.c68 u64 *pgt = (void *)la57_toggle + PAGE_SIZE; local
81 new_cr3 = memset(pgt, 0, PAGE_SIZE);
89 new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE);
/linux-master/drivers/gpu/drm/nouveau/nvkm/engine/dma/
H A Dusernv04.c52 struct nvkm_memory *pgt = local
55 return nvkm_gpuobj_wrap(pgt, pgpuobj);
56 nvkm_kmap(pgt);
57 offset = nvkm_ro32(pgt, 8 + (offset >> 10));
59 nvkm_done(pgt);
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c30 struct nvkm_vmm_pt *pgt = *ppgt; local
31 if (pgt) {
32 kvfree(pgt->pde);
33 kfree(pgt);
44 struct nvkm_vmm_pt *pgt; local
56 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
58 pgt->page = page ? page->shift : 0;
59 pgt->sparse = sparse;
62 pgt
143 struct nvkm_vmm_pt *pgt = it->pt[it->lvl]; local
197 nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt, const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes) argument
262 struct nvkm_vmm_pt *pgt = it->pt[0]; local
296 nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt, const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes) argument
368 struct nvkm_vmm_pt *pgt = it->pt[0]; local
381 nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc, struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes) argument
417 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; local
487 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; local
532 struct nvkm_vmm_pt *pgt = it.pt[it.lvl]; local
[all...]
H A Dvmmgp100.c238 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; local
242 if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0]))
244 if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1]))
365 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; local
369 if (!gp100_vmm_pde(pgt->pt[0], &data))
H A Dvmmnv50.c106 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata) argument
110 if (pgt && (pt = pgt->pt[0])) {
111 switch (pgt->page) {
H A Dvmmgf100.c108 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; local
113 if ((pt = pgt->pt[0])) {
127 if ((pt = pgt->pt[1])) {
/linux-master/arch/arm64/kvm/
H A Dmmu.c66 struct kvm_pgtable *pgt = mmu->pgt; local
67 if (!pgt)
71 ret = fn(pgt, addr, next - addr);
118 struct kvm_pgtable *pgt; local
145 pgt = kvm->arch.mmu.pgt;
146 if (!pgt)
150 ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache);
804 struct kvm_pgtable pgt local
872 struct kvm_pgtable *pgt; local
1014 struct kvm_pgtable *pgt = NULL; local
1073 struct kvm_pgtable *pgt = mmu->pgt; local
1395 struct kvm_pgtable *pgt; local
[all...]
/linux-master/arch/arm64/kvm/hyp/include/nvhe/
H A Dpkvm.h35 struct kvm_pgtable pgt; member in struct:pkvm_hyp_vm
H A Dmem_protect.h50 struct kvm_pgtable pgt; member in struct:host_mmu
/linux-master/arch/arm64/kvm/hyp/nvhe/
H A Dmem_protect.c151 ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu,
157 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd);
158 mmu->pgt = &host_mmu.pgt;
258 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
264 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd);
275 kvm_pgtable_stage2_destroy(&vm->pgt);
328 struct kvm_pgtable *pgt = &host_mmu.pgt; local
336 ret = kvm_pgtable_stage2_unmap(pgt, add
589 check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, struct check_walk_data *data) argument
[all...]
H A Dpkvm.c386 mmu->pgt = &hyp_vm->pgt;
/linux-master/arch/s390/include/asm/
H A Dgmap.h135 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
138 unsigned long *pgt, int *dat_protection, int *fake);
/linux-master/arch/s390/kvm/
H A Dgaccess.c1359 * @pgt: pointer to the beginning of the page table for the given address if
1363 * @fake: pgt references contiguous guest memory block, not a pgtable
1366 unsigned long *pgt, int *dat_protection,
1422 *pgt = ptr + vaddr.rfx * 8;
1450 *pgt = ptr + vaddr.rsx * 8;
1479 *pgt = ptr + vaddr.rtx * 8;
1517 *pgt = ptr + vaddr.sx * 8;
1544 *pgt = ptr;
1567 unsigned long pgt = 0; local
1579 rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt,
1365 kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, unsigned long *pgt, int *dat_protection, int *fake) argument
[all...]
/linux-master/arch/s390/mm/
H A Dgmap.c1328 * @pgt: pointer to the start of a shadow page table
1333 unsigned long *pgt)
1339 pgt[i] = _PAGE_INVALID;
1352 phys_addr_t sto, pgt; local
1362 pgt = *ste & _SEGMENT_ENTRY_ORIGIN;
1364 __gmap_unshadow_pgt(sg, raddr, __va(pgt));
1366 ptdesc = page_ptdesc(phys_to_page(pgt));
1383 phys_addr_t pgt; local
1390 pgt = sgt[i] & _REGION_ENTRY_ORIGIN;
1392 __gmap_unshadow_pgt(sg, raddr, __va(pgt));
1332 __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr, unsigned long *pgt) argument
2018 gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt, int *dat_protection, int *fake) argument
2058 gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, int fake) argument
[all...]
/linux-master/arch/powerpc/kvm/
H A Dbook3s_64_mmu_radix.c1309 pgd_t *pgt; local
1347 pgt = NULL;
1351 pgt = NULL;
1361 if (!pgt) {
1363 pgt = kvm->arch.pgtable;
1370 pgt = nested->shadow_pgtable;
1379 "pgdir: %lx\n", (unsigned long)pgt);
1384 pgdp = pgt + pgd_index(gpa);
/linux-master/arch/x86/events/intel/
H A Duncore_nhmex.c877 DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
/linux-master/drivers/accel/habanalabs/common/mmu/
H A Dmmu.c818 * - Create a shadow table for pgt
876 dev_err(hdev->dev, "Failed to allocate HOP from pgt pool\n");
940 * @pgt: pgt_info for the HOP hosting the PTE
949 u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, argument
955 return pgt->virt_addr + pte_offset;
/linux-master/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_self_test.c78 static int pgt(struct st_pred_args *args) function
395 NA, 1, 0, pgt,
/linux-master/drivers/accel/habanalabs/common/
H A Dhabanalabs.h282 * @phys_addr: physical address of the pgt.
283 * @virt_addr: host virtual address of the pgt (see above device/host resident).
286 * @num_of_ptes: indicates how many ptes are used in the pgt. used only for dynamically
3873 u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, u64 phys_pte_addr,

Completed in 232 milliseconds