Searched refs:pmd (Results 26 - 50 of 300) sorted by last modified time

1234567891011>>

/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_ptp_hw.c2239 u64 total_offset, pmd, val; local
2298 err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd);
2306 total_offset += pmd;
2308 total_offset -= pmd;
H A Dice_common.c3859 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
3860 options[i].pmd);
3864 options[i].pmd, options[i].max_lane_speed);
H A Dice_adminq_cmd.h1579 u8 pmd; member in struct:ice_aqc_get_port_options_elem
/linux-master/drivers/net/ethernet/intel/ice/devlink/
H A Ddevlink_port.c95 "%-6u", options[i].pmd);
207 if (count == options[j].pmd) {
283 attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
/linux-master/drivers/md/
H A Ddm-thin.c239 struct dm_pool_metadata *pmd; member in struct:pool
1095 r = dm_pool_block_is_shared(pool->pmd, b, &shared);
1108 r = dm_pool_block_is_shared(pool->pmd, e, &shared);
1173 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
1206 r = dm_pool_dec_data_range(pool->pmd, m->data_block,
1451 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
1471 r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1492 r = dm_pool_commit_metadata(pool->pmd);
1524 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1541 r = dm_pool_get_free_block_count(pool->pmd,
2945 struct dm_pool_metadata *pmd; local
[all...]
/linux-master/drivers/edac/
H A Dxgene_edac.c515 u32 pmd; member in struct:xgene_edac_pmd_ctx
533 ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
573 ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
617 ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
677 ctx->pmd, val, val_hi, val_lo);
729 ctx->pmd, val, val_hi, val_lo);
741 if (!((PMD0_MERR_MASK << ctx->pmd) & pcp_hp_stat))
791 PMD0_MERR_MASK << ctx->pmd);
794 PMD0_MERR_MASK << ctx->pmd);
870 snprintf(name, sizeof(name), "PMD%d", ctx->pmd);
881 xgene_edac_pmd_available(u32 efuse, int pmd) argument
893 u32 pmd; local
979 xgene_edac_pmd_remove(struct xgene_edac_pmd_ctx *pmd) argument
1812 struct xgene_edac_pmd_ctx *pmd; local
1966 struct xgene_edac_pmd_ctx *pmd; local
[all...]
/linux-master/arch/x86/mm/pat/
H A Dset_memory.c673 pmd_t *pmd; local
707 pmd = pmd_offset(pud, address);
708 if (pmd_none(*pmd))
712 if (pmd_leaf(*pmd) || !pmd_present(*pmd))
713 return (pte_t *)pmd;
715 *nx |= pmd_flags(*pmd) & _PAGE_NX;
716 *rw &= pmd_flags(*pmd) & _PAGE_RW;
720 return pte_offset_kernel(pmd, address);
739 * Note: We return pud and pmd eithe
851 pmd_t *pmd; local
1215 try_to_free_pmd_page(pmd_t *pmd) argument
1227 unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) argument
1245 __unmap_pmd_range(pud_t *pud, pmd_t *pmd, unsigned long start, unsigned long end) argument
1255 pmd_t *pmd = pmd_offset(pud, start); local
1340 alloc_pte_page(pmd_t *pmd) argument
1352 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); local
1360 populate_pte(struct cpa_data *cpa, unsigned long start, unsigned long end, unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) argument
1384 pmd_t *pmd; local
[all...]
/linux-master/arch/x86/mm/
H A Dfault.c182 pmd_t *pmd, *pmd_k; local
205 pmd = pmd_offset(pud, address);
208 if (pmd_present(*pmd) != pmd_present(*pmd_k))
209 set_pmd(pmd, *pmd_k);
214 BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
301 pmd_t *pmd; local
314 pmd = pmd_offset(pud, address);
315 pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
324 if (!low_pfn(pmd_pfn(*pmd)) || !pmd_presen
357 pmd_t *pmd; local
984 pmd_t *pmd; local
[all...]
/linux-master/arch/x86/include/asm/
H A Dpgtable_types.h284 /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
288 * Extracts the flags from a (pte|pmd|pud|pgd)val_t
396 return (pmd_t) { .pmd = val };
399 static inline pmdval_t native_pmd_val(pmd_t pmd) argument
401 return pmd.pmd;
411 static inline pmdval_t native_pmd_val(pmd_t pmd) argument
413 return native_pgd_val(pmd.pud.p4d.pgd);
451 static inline pmdval_t pmd_pfn_mask(pmd_t pmd) argument
453 if (native_pmd_val(pmd)
459 pmd_flags_mask(pmd_t pmd) argument
464 pmd_flags(pmd_t pmd) argument
[all...]
/linux-master/arch/sh/mm/
H A Dpgtable.c45 void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) argument
47 set_pud(pud, __pud((unsigned long)pmd));
55 void pmd_free(struct mm_struct *mm, pmd_t *pmd) argument
57 kmem_cache_free(pmd_cachep, pmd);
H A Dtlbex_32.c29 pmd_t *pmd; local
53 pmd = pmd_offset(pud, address);
54 if (pmd_none_or_clear_bad(pmd))
56 pte = pte_offset_kernel(pmd, address);
H A Dcache-sh4.c221 pmd_t *pmd; local
234 pmd = pmd_off(vma->vm_mm, address);
235 pte = pte_offset_kernel(pmd, address);
/linux-master/arch/arm64/mm/
H A Dproc.S378 pmd .req x13
380 kpti_map_pgtbl pmd, 2
381 kpti_mk_tbl_ng pmd, PTRS_PER_PMD
384 pmd .req pgd
390 pte_to_phys cur_ptep, pmd
408 .unreq pmd
H A Dmmu.c197 pmd_t pmd = READ_ONCE(*pmdp); local
200 BUG_ON(pmd_sect(pmd));
201 if (pmd_none(pmd)) {
214 BUG_ON(pmd_bad(pmd));
491 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
878 pmd_t *pmdp, pmd; local
883 pmd = READ_ONCE(*pmdp);
884 if (pmd_none(pmd))
887 WARN_ON(!pmd_present(pmd));
888 if (pmd_sect(pmd)) {
1025 pmd_t *pmdp, pmd; local
1248 pmd_t pmd; local
[all...]
/linux-master/arch/arm64/include/asm/
H A Dpgtable.h167 #define pmd_access_permitted(pmd, write) \
168 (pte_access_permitted(pmd_pte(pmd), (write)))
184 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) argument
186 pmd_val(pmd) &= ~pgprot_val(prot);
187 return pmd;
190 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) argument
192 pmd_val(pmd) |= pgprot_val(prot);
193 return pmd;
273 static inline pmd_t pmd_mkcont(pmd_t pmd) argument
275 return __pmd(pmd_val(pmd) | PMD_SECT_CON
462 pmd_pte(pmd_t pmd) argument
532 pmd_protnone(pmd_t pmd) argument
545 pmd_trans_huge(pmd_t pmd) argument
583 pmd_mkdevmap(pmd_t pmd) argument
614 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) argument
708 set_pmd(pmd_t *pmdp, pmd_t pmd) argument
730 pmd_page_paddr(pmd_t pmd) argument
735 pmd_page_vaddr(pmd_t pmd) argument
1103 pmd_modify(pmd_t pmd, pgprot_t newprot) argument
1139 pmd_user_accessible_page(pmd_t pmd) argument
1246 pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0)); local
1296 pmdp_establish(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t pmd) argument
[all...]
/linux-master/arch/alpha/mm/
H A Dinit.c150 pmd_t *pmd; local
191 pmd = pmd_offset(pud, VMALLOC_START);
192 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
220 if (pmd != pmd_offset(pud, vaddr)) {
222 pmd = pmd_offset(pud, vaddr);
223 pmd_set(pmd, (pte_t *)kernel_end);
226 set_pte(pte_offset_kernel(pmd, vaddr),
/linux-master/arch/s390/mm/
H A Dgmap.c560 pmd_t *pmd; local
603 pmd = pmd_offset(pud, vmaddr);
604 VM_BUG_ON(pmd_none(*pmd));
606 if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
612 ptl = pmd_lock(mm, pmd);
618 if (pmd_leaf(*pmd)) {
619 *table = (pmd_val(*pmd) &
623 *table = pmd_val(*pmd) &
627 !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
922 * and return the pmd pointe
2521 thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
2723 __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) argument
2734 pmd_t *pmd = (pmd_t *)pte; local
[all...]
H A Dvmem.c165 static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, argument
177 pte = pte_offset_kernel(pmd, addr);
207 static void try_free_pte_table(pmd_t *pmd, unsigned long start) argument
213 pte = pte_offset_kernel(pmd, start);
218 vmem_pte_free((unsigned long *) pmd_deref(*pmd));
219 pmd_clear(pmd);
229 pmd_t *pmd; local
236 pmd = pmd_offset(pud, addr);
237 for (; addr < end; addr = next, pmd++) {
240 if (pmd_none(*pmd))
307 pmd_t *pmd; local
324 pmd_t *pmd; local
576 pmd_t *pmd; local
[all...]
H A Dhugetlbpage.c30 * Convert encoding pte bits pmd / pud bits
85 * Convert encoding pmd / pud bits pte bits
236 int pmd_huge(pmd_t pmd) argument
238 return pmd_leaf(pmd);
/linux-master/arch/s390/include/asm/
H A Dpgtable.h72 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
554 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) argument
556 return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
559 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) argument
561 return __pmd(pmd_val(pmd) | pgprot_val(prot));
642 * pgd/p4d/pud/pmd/pte query functions
733 static inline bool pmd_leaf(pmd_t pmd) argument
735 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
738 static inline int pmd_bad(pmd_t pmd) argument
740 if ((pmd_val(pmd)
767 pmd_present(pmd_t pmd) argument
772 pmd_none(pmd_t pmd) argument
778 pmd_write(pmd_t pmd) argument
790 pmd_dirty(pmd_t pmd) argument
796 pmd_young(pmd_t pmd) argument
837 pmd_protnone(pmd_t pmd) argument
877 pmd_soft_dirty(pmd_t pmd) argument
882 pmd_mksoft_dirty(pmd_t pmd) argument
887 pmd_clear_soft_dirty(pmd_t pmd) argument
955 set_pmd(pmd_t *pmdp, pmd_t pmd) argument
1399 pmd_deref(pmd_t pmd) argument
1409 pmd_pfn(pmd_t pmd) argument
1495 pmd_page_vaddr(pmd_t pmd) argument
1515 pmd_wrprotect(pmd_t pmd) argument
1521 pmd_mkwrite_novma(pmd_t pmd) argument
1529 pmd_mkclean(pmd_t pmd) argument
1535 pmd_mkdirty(pmd_t pmd) argument
1589 pmd_mkyoung(pmd_t pmd) argument
1597 pmd_mkold(pmd_t pmd) argument
1603 pmd_modify(pmd_t pmd, pgprot_t newprot) argument
1727 pmd_t pmd = *pmdp; local
1749 pmd_mkhuge(pmd_t pmd) argument
1769 pmd_t pmd = *pmdp; local
1787 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); local
1796 pmd_t pmd = *pmdp; local
1813 pmd_trans_huge(pmd_t pmd) argument
[all...]
H A Dpage.h86 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon17
100 static inline unsigned long pmd_val(pmd_t pmd) argument
102 return pmd.pmd;
/linux-master/arch/s390/boot/
H A Dvmem.c141 static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr, argument
146 pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
184 static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr, argument
198 * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
281 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end, argument
287 pte = pte_offset_kernel(pmd, addr);
308 pmd_t *pmd, entry; local
311 pmd = pmd_offset(pud, addr);
312 for (; addr < end; addr = next, pmd++) {
314 if (pmd_none(*pmd)) {
342 pmd_t *pmd; local
[all...]
/linux-master/mm/
H A Dvmalloc.c93 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument
102 pte = pte_alloc_kernel_track(pmd, addr, mask);
126 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, argument
145 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
148 return pmd_set_huge(pmd, phys_addr, prot);
155 pmd_t *pmd; local
158 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
159 if (!pmd)
164 if (vmap_try_huge_pmd(pmd, add
341 vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) argument
357 pmd_t *pmd; local
478 vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) argument
513 pmd_t *pmd; local
738 pmd_t *pmd; local
[all...]
/linux-master/fs/proc/
H A Dtask_mmu.c572 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, argument
581 if (pmd_present(*pmd)) {
582 page = vm_normal_page_pmd(vma, addr, *pmd);
583 } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
584 swp_entry_t entry = pmd_to_swp_entry(*pmd);
602 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
606 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, argument
612 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument
619 ptl = pmd_trans_huge_lock(pmd, vm
1131 pmd_t old, pmd = *pmdp; local
1157 clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
1471 pmd_t pmd = *pmdp; local
1838 pagemap_thp_category(struct pagemap_scan_private *p, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument
1884 pmd_t old, pmd = *pmdp; local
2096 pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) argument
2147 pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) argument
2602 can_gather_numa_stats_pmd(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr) argument
2627 gather_pte_stats(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
[all...]
/linux-master/arch/x86/entry/vsyscall/
H A Dvsyscall_64.c339 pmd_t *pmd; local
349 pmd = pmd_offset(pud, VSYSCALL_ADDR);
350 set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));

Completed in 433 milliseconds

1234567891011>>