Searched refs:pmd (Results 276 - 300 of 300) sorted by relevance

<<1112

/linux-master/arch/powerpc/mm/nohash/
H A D8xx.c219 int pmd_clear_huge(pmd_t *pmd) argument
/linux-master/arch/riscv/mm/
H A Dinit.c212 * any allocation to happen between _end and the next pmd aligned page.
1197 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
1401 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, argument
1404 pmd_set_huge(pmd, virt_to_phys(p), PAGE_KERNEL);
1444 pmd_t *pmd; local
1462 lvl = "pmd";
1463 pmd = pmd_alloc(&init_mm, pud, addr);
1464 if (!pmd)
/linux-master/mm/
H A Dmigrate.c306 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, argument
314 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
367 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) argument
371 ptl = pmd_lock(mm, pmd);
372 if (!is_pmd_migration_entry(*pmd))
374 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
1553 * the hugepage is pmd-based or not before kicking migration.
H A Dinternal.h209 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
746 * under page table lock for the pte/pmd being added or removed.
790 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
1106 unsigned long addr, pmd_t *pmd,
H A Dswapfile.c1758 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, argument
1781 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1849 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, argument
1866 pte = pte_offset_map(pmd, addr);
1891 .pmd = pmd,
1908 ret = unuse_pte(vma, pmd, addr, entry, folio);
1929 pmd_t *pmd; local
1933 pmd = pmd_offset(pud, addr);
1937 ret = unuse_pte_range(vma, pmd, add
[all...]
H A Dksm.c615 static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, argument
624 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1376 pmd_t *pmd; local
1389 pmd = mm_find_pmd(mm, addr);
1390 if (!pmd)
1397 pmde = pmdp_get_lockless(pmd);
1405 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
H A Dpercpu.c3209 pmd_t *pmd; local
3228 pmd = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
3229 if (!pmd)
3231 pud_populate(&init_mm, pud, pmd);
3234 pmd = pmd_offset(pud, addr);
3235 if (!pmd_present(*pmd)) {
3241 pmd_populate_kernel(&init_mm, pmd, new);
H A Dmempolicy.c503 static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk) argument
508 if (unlikely(is_pmd_migration_entry(*pmd))) {
512 folio = pfn_folio(pmd_pfn(*pmd));
535 static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, argument
546 ptl = pmd_trans_huge_lock(pmd, vma);
548 queue_folios_pmd(pmd, walk);
553 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
H A Dmemcontrol.c6194 * Caller should make sure that pmd_trans_huge(pmd) is true.
6197 unsigned long addr, pmd_t pmd, union mc_target *target)
6203 if (unlikely(is_swap_pmd(pmd))) {
6205 !is_pmd_migration_entry(pmd));
6208 page = pmd_page(pmd);
6228 unsigned long addr, pmd_t pmd, union mc_target *target)
6234 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, argument
6242 ptl = pmd_trans_huge_lock(pmd, vma);
6249 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6255 pte = pte_offset_map_lock(vma->vm_mm, pmd, add
6196 get_mctgt_type_thp(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, union mc_target *target) argument
6227 get_mctgt_type_thp(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, union mc_target *target) argument
6431 mem_cgroup_move_charge_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
[all...]
H A Dfilemap.c3210 ptep = pte_offset_map(vmf->pmd, vmf->address);
3414 if (pmd_trans_huge(*vmf->pmd)) {
3420 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
3430 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
3431 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
3587 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
H A Dswap_state.c841 pte = pte_offset_map(vmf->pmd, addr);
H A Dhugetlb.c416 * participate in pmd sharing. This is only a possible
5396 * returned ptep could go away if part of a shared pmd and
5830 * for pmd sharing. And, i_mmap_rwsem is required to set up
5831 * pmd sharing. This is important as page tables for this
7114 * vma specific semaphore used for pmd sharing and fault/truncation
7347 * Determine if start,end range within vma could be mapped by shared pmd.
7349 * shared pmd mappings.
7374 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
7376 * !shared pmd case because we can allocate the pmd late
7533 pmd_t *pmd; local
[all...]
/linux-master/arch/arm/kernel/
H A Dtraps.c765 void __pmd_error(const char *file, int line, pmd_t pmd) argument
767 pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
/linux-master/arch/sh/kernel/cpu/sh3/
H A Dentry.S98 ! Find the pmd/pte entry and loadtlb
/linux-master/include/linux/
H A Drmap.h646 pmd_t *pmd; member in struct:page_vma_mapped_walk
/linux-master/fs/
H A Duserfaultfd.c298 pmd_t *pmd, _pmd; local
314 pmd = pmd_offset(pud, address);
316 _pmd = pmdp_get_lockless(pmd);
330 pte = pte_offset_map(pmd, address);
/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_devlink.c612 "%-6u", options[i].pmd);
724 if (count == options[j].pmd) {
1609 attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
H A Dice_ptp_hw.c2236 u64 total_offset, pmd, val; local
2295 err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd);
2303 total_offset += pmd;
2305 total_offset -= pmd;
H A Dice_common.c3846 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
3847 options[i].pmd);
3851 options[i].pmd, options[i].max_lane_speed);
H A Dice_adminq_cmd.h1559 u8 pmd; member in struct:ice_aqc_get_port_options_elem
/linux-master/include/trace/events/
H A Dxen.h166 __entry->pmdval = pmdval.pmd),
/linux-master/sound/soc/codecs/
H A Dcs42l43.c1467 static int cs42l43_dapm_wait_completion(struct completion *pmu, struct completion *pmd, argument
1477 reinit_completion(pmd);
1483 time_left = wait_for_completion_timeout(pmd, msecs_to_jiffies(timeout_ms));
/linux-master/arch/x86/kvm/mmu/
H A Dmmu.c3091 pmd_t pmd; local
3134 pmd = READ_ONCE(*pmd_offset(&pud, hva));
3135 if (pmd_none(pmd) || !pmd_present(pmd))
3138 if (pmd_leaf(pmd))
3208 * the pmd can't be split from under us.
/linux-master/arch/mips/mm/
H A Dtlbex.c679 unsigned int pmd, int lid)
681 UASM_i_LW(p, tmp, 0, pmd);
758 * TMP will be clobbered, PTR will hold the pmd entry.
838 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
839 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
841 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
1143 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1154 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1162 /* get pmd offset in bytes */
1170 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offse
678 build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, unsigned int pmd, int lid) argument
[all...]
/linux-master/kernel/events/
H A Dcore.c7546 pmd_t *pmdp, pmd; local
7575 pmd = pmdp_get_lockless(pmdp);
7576 if (!pmd_present(pmd))
7579 if (pmd_leaf(pmd))
7580 return pmd_leaf_size(pmd);
7582 ptep = pte_offset_map(&pmd, addr);

Completed in 393 milliseconds

<<1112