Searched refs:mdpg (Results 1 - 15 of 15) sorted by relevance

/netbsd-current/sys/uvm/pmap/
H A Dvmpagemd.h79 #define VM_PAGEMD_VMPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_VMPAGE) != 0)
80 #define VM_PAGEMD_REFERENCED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_REFERENCED) != 0)
81 #define VM_PAGEMD_MODIFIED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_MODIFIED) != 0)
82 #define VM_PAGEMD_POOLPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_POOLPAGE) != 0)
83 #define VM_PAGEMD_EXECPAGE_P(mdpg) (((mdpg)
120 pmap_pvlist_lock_addr(struct vm_page_md *mdpg) argument
127 pmap_pvlist_lock(struct vm_page_md *mdpg, uintptr_t increment) argument
136 pmap_pvlist_unlock(struct vm_page_md *mdpg) argument
144 pmap_pvlist_locked_p(struct vm_page_md *mdpg) argument
[all...]
H A Dpmap.c418 pmap_page_clear_attributes(struct vm_page_md *mdpg, u_long clear_attributes) argument
420 volatile u_long * const attrp = &mdpg->mdpg_attrs;
441 pmap_page_set_attributes(struct vm_page_md *mdpg, u_long set_attributes) argument
444 atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes);
446 mdpg->mdpg_attrs |= set_attributes;
458 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
459 pv_entry_t pv = &mdpg->mdpg_first;
467 VM_PAGEMD_PVLIST_READLOCK(mdpg);
468 pmap_pvlist_check(mdpg);
490 pmap_pvlist_check(mdpg);
895 pmap_page_remove(struct vm_page_md *mdpg) argument
1037 struct vm_page_md *mdpg = PMAP_PAGE_TO_MD(pp); local
1191 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
1274 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
1345 pmap_page_cache(struct vm_page_md *mdpg, bool cached) argument
1439 struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL); local
1584 struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL); local
1846 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
1877 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
1977 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
1984 pmap_pvlist_check(struct vm_page_md *mdpg) argument
2018 pmap_enter_pv(pmap_t pmap, vaddr_t va, paddr_t pa, struct vm_page_md *mdpg, pt_entry_t *nptep, u_int flags) argument
2163 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
2281 pmap_pvlist_lock_addr(struct vm_page_md *mdpg) argument
2319 pmap_pvlist_lock_addr(struct vm_page_md *mdpg) argument
2410 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
2439 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
[all...]
/netbsd-current/sys/arch/powerpc/include/booke/
H A Dpte.h199 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot) argument
206 if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg))
209 if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg))
219 if (mdpg != NULL && !VM_PAGEMD_MODIFIED_P(mdpg))
226 pte_flag_bits(struct vm_page_md *mdpg, int flags) argument
229 if (__predict_true(mdpg != NULL)) {
235 if (__predict_false(mdpg !
244 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, int flags, bool kernel) argument
256 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, int flags) argument
[all...]
H A Dpmap.h129 pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep) argument
135 pmap_md_vca_remove(struct vm_page_md *mdpg, vaddr_t va, bool dirty) argument
141 pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op) argument
/netbsd-current/sys/arch/m68k/include/
H A Dpte_coldfire.h181 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot) argument
187 if (mdpg != NULL && VM_PAGEMD_EXECPAGE_P(mdpg))
191 if (mdpg == NULL || VM_PAGEMD_MODIFIED_P(mdpg))
198 pte_flag_bits(struct vm_page_md *mdpg, int flags) argument
201 if (__predict_true(mdpg != NULL)) {
207 if (__predict_false(mdpg != NULL)) {
216 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, argument
221 pt_entry |= pte_flag_bits(mdpg, flag
228 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, int flags) argument
[all...]
H A Dpmap_coldfire.h117 pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep) argument
129 pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op) argument
/netbsd-current/sys/arch/mips/mips/
H A Dpmap_machdep.c173 pmap_md_map_ephemeral_page(struct vm_page_md *mdpg, bool locked_p, int prot, argument
176 KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
178 struct vm_page *pg = VM_MD_TO_PAGE(mdpg);
180 pv_entry_t pv = &mdpg->mdpg_first;
187 KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
226 const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, 0);
244 (void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
245 if (VM_PAGEMD_CACHED_P(mdpg)
256 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
265 pmap_md_unmap_ephemeral_page(struct vm_page_md *mdpg, boo argument
311 pmap_md_vca_page_wbinv(struct vm_page_md *mdpg, bool locked_p) argument
672 pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc) argument
737 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
770 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
924 pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *ptep) argument
1006 pmap_md_vca_clean(struct vm_page_md *mdpg, int op) argument
1031 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
[all...]
H A Dmips_machdep.c2500 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
2501 *colorp = atop(mdpg->mdpg_first.pv_va);
2502 return !mips_cache_badalias(pa, mdpg->mdpg_first.pv_va);
/netbsd-current/sys/arch/riscv/include/
H A Dpte.h168 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p) argument
184 pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p) argument
190 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, argument
196 pte |= pte_flag_bits(mdpg, flags, kernel_p);
197 pte |= pte_prot_bits(mdpg, prot, kernel_p);
199 if (mdpg != NULL) {
202 ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) {
209 } else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
227 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, argument
H A Dpmap.h193 pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc) argument
202 pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep) argument
208 pmap_md_vca_remove(struct vm_page_md *mdpg, vaddr_t va) argument
213 pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op) argument
/netbsd-current/sys/arch/aarch64/aarch64/
H A Dpmap_machdep.c169 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
178 pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
197 pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
284 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
285 const pv_entry_t pv = &mdpg->mdpg_first;
291 KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
310 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
312 KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
314 const pv_entry_t pv = &mdpg->mdpg_first;
624 pmap_md_page_syncicache(struct vm_page_md *mdpg, cons argument
[all...]
/netbsd-current/sys/arch/mips/include/
H A Dpte.h368 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, argument
387 pte_make_enter(paddr_t pa, const struct vm_page_md *mdpg, vm_prot_t prot, argument
404 if (mdpg != NULL) {
416 if (VM_PAGEMD_MODIFIED_P(mdpg)) {
423 if (VM_PAGEMD_MODIFIED_P(mdpg)) {
/netbsd-current/sys/arch/aarch64/include/
H A Dpmap_machdep.h416 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, argument
469 pte_make_enter(paddr_t pa, const struct vm_page_md *mdpg, vm_prot_t prot, argument
484 ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) {
491 } else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
/netbsd-current/sys/arch/powerpc/booke/
H A Dtrap.c242 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
244 if (!VM_PAGEMD_MODIFIED_P(mdpg)) {
245 pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED);
297 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local
300 if (VM_PAGEMD_EXECPAGE_P(mdpg))
312 if (!VM_PAGEMD_EXECPAGE_P(mdpg)) {
316 pmap_page_set_attributes(mdpg, VM_PAGEMD_EXECPAGE);
H A Dbooke_pmap.c90 pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc) argument
92 KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
94 struct vm_page * const pg = VM_MD_TO_PAGE(mdpg);

Completed in 148 milliseconds