Searched refs:pg (Results 1 - 25 of 413) sorted by relevance

1234567891011>>

/netbsd-current/external/gpl3/gdb.old/dist/ld/testsuite/ld-ifunc/
H A Dpr18841b.c8 void (*pg)(void) = foo;
9 if (pg != foo_impl)
11 pg();
H A Dpr18841c.c9 void (*pg)(void) = foo;
10 pg();
/netbsd-current/sys/uvm/
H A Duvm_page_status.c60 uvm_pagegetdirty(struct vm_page *pg) argument
62 struct uvm_object * const uobj __diagused = pg->uobject;
64 KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0);
65 KASSERT(uvm_page_owner_locked_p(pg, false));
66 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
67 uvm_obj_page_dirty_p(pg));
68 return pg->flags & (PG_CLEAN|PG_DIRTY);
84 uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus) argument
86 struct uvm_object * const uobj = pg->uobject;
87 const unsigned int oldstatus = uvm_pagegetdirty(pg);
155 uvm_pagecheckdirty(struct vm_page *pg, bool pgprotected) argument
[all...]
H A Duvm_pdpolicy_clockpro.c147 clockpro_setq(struct vm_page *pg, int qidx) argument
152 pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR);
156 clockpro_getq(struct vm_page *pg) argument
160 qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR;
236 pageq_insert_tail(pageq_t *q, struct vm_page *pg) argument
239 TAILQ_INSERT_TAIL(&q->q_q, pg, pdqueue);
245 pageq_insert_head(pageq_t *q, struct vm_page *pg) argument
248 TAILQ_INSERT_HEAD(&q->q_q, pg, pdqueue);
254 pageq_remove(pageq_t *q, struct vm_page *pg) argument
268 struct vm_page *pg; local
282 clockpro_insert_tail(struct clockpro_state *s, int qidx, struct vm_page *pg) argument
292 clockpro_insert_head(struct clockpro_state *s, int qidx, struct vm_page *pg) argument
495 pageobj(struct vm_page *pg) argument
514 pageidx(struct vm_page *pg) argument
522 nonresident_pagelookupremove(struct vm_page *pg) argument
542 nonresident_pagerecord(struct vm_page *pg) argument
642 clockpro_movereferencebit(struct vm_page *pg, bool locked) argument
686 clockpro_clearreferencebit(struct vm_page *pg, bool locked) argument
700 struct vm_page *pg; local
756 clockpro___enqueuetail(struct vm_page *pg) argument
777 clockpro_pageenqueue(struct vm_page *pg) argument
836 clockpro_pagequeue(struct vm_page *pg) argument
850 clockpro_pagedequeue(struct vm_page *pg) argument
873 clockpro_pagerequeue(struct vm_page *pg) argument
882 pageq_remove(clockpro_queue(s, qidx), pg); local
890 handhot_endtest(struct vm_page *pg) argument
909 struct vm_page *pg; local
1018 struct vm_page *pg; local
1135 uvmpdpol_pageactivate_locked(struct vm_page *pg) argument
1153 uvmpdpol_pageactivate(struct vm_page *pg) argument
1160 uvmpdpol_pagedeactivate_locked(struct vm_page *pg) argument
1167 uvmpdpol_pagedeactivate(struct vm_page *pg) argument
1174 uvmpdpol_pagedequeue_locked(struct vm_page *pg) argument
1185 uvmpdpol_pagedequeue(struct vm_page *pg) argument
1192 uvmpdpol_pageenqueue_locked(struct vm_page *pg) argument
1208 uvmpdpol_pageenqueue(struct vm_page *pg) argument
1215 uvmpdpol_pagerealize_locked(struct vm_page *pg) argument
1245 uvmpdpol_pagerealize(struct vm_page *pg) argument
1303 uvmpdpol_pageisqueued_p(struct vm_page *pg) argument
1311 uvmpdpol_pageactivate_p(struct vm_page *pg) argument
1340 struct vm_page *pg; local
1381 struct vm_page *pg; local
1507 struct vm_page *pg; local
1606 struct vm_page *pg; local
[all...]
H A Duvm_page.c202 * => call should have already set pg's object and offset pointers
207 uvm_pageinsert_object(struct uvm_object *uobj, struct vm_page *pg) argument
210 KASSERT(uobj == pg->uobject);
212 KASSERT((pg->flags & PG_TABLED) == 0);
214 if ((pg->flags & PG_STAT) != 0) {
216 const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY);
218 if ((pg->flags & PG_FILE) != 0) {
235 pg->flags |= PG_TABLED;
240 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg) argument
242 const uint64_t idx = pg
266 uvm_pageremove_object(struct uvm_object *uobj, struct vm_page *pg) argument
300 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg) argument
675 uvm_vm_page_to_phys(const struct vm_page *pg) argument
703 uvm_page_numa_lookup(struct vm_page *pg) argument
737 struct vm_page *pg; local
1008 struct vm_page *pg; local
1089 struct vm_page *pg; local
1137 struct vm_page *pg; local
1362 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) argument
1411 uvm_pagefree(struct vm_page *pg) argument
1583 struct vm_page *pg; local
1635 uvm_pagewait(struct vm_page *pg, krwlock_t *lock, const char *wmesg) argument
1655 uvm_pagewakeup(struct vm_page *pg) argument
1676 uvm_pagewanted_p(struct vm_page *pg) argument
1695 uvm_page_own(struct vm_page *pg, const char *tag) argument
1737 struct vm_page *pg; local
1757 uvm_pagewire(struct vm_page *pg) argument
1785 uvm_pageunwire(struct vm_page *pg) argument
1811 uvm_pagedeactivate(struct vm_page *pg) argument
1830 uvm_pageactivate(struct vm_page *pg) argument
1853 uvm_pagedequeue(struct vm_page *pg) argument
1871 uvm_pageenqueue(struct vm_page *pg) argument
1885 uvm_pagelock(struct vm_page *pg) argument
1914 uvm_pageunlock(struct vm_page *pg) argument
1972 uvm_pagezero(struct vm_page *pg) argument
2010 uvm_page_lookup_freelist(struct vm_page *pg) argument
2025 uvm_page_owner_locked_p(struct vm_page *pg, bool exclusive) argument
2046 uvm_pagereadonly_p(struct vm_page *pg) argument
2078 struct vm_page *pg; local
2121 uvm_page_printit(struct vm_page *pg, bool full, void (*pr)(const char *, ...)) argument
2203 struct vm_page *pg; local
[all...]
H A Duvm_anon.c107 struct vm_page *pg = anon->an_page, *pg2 __diagused; local
119 if (__predict_true(pg != NULL)) {
128 if (__predict_false(pg->loan_count != 0)) {
130 KASSERT(pg2 == pg);
139 if (__predict_false(pg->uobject != NULL)) {
140 mutex_enter(&pg->interlock);
141 KASSERT(pg->loan_count > 0);
142 pg->loan_count--;
143 pg->uanon = NULL;
144 mutex_exit(&pg
212 struct vm_page *pg; local
277 struct vm_page *pg; local
362 struct vm_page *pg = anon->an_page; local
[all...]
H A Duvm_object.c135 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL; local
165 pg = uvm_loanbreak(pgs[i]);
166 if (!pg) {
173 pgs[i] = pg;
219 struct vm_page *pg; local
224 pg = uvm_pagelookup(uobj, offset);
226 KASSERT(pg != NULL);
227 KASSERT(!(pg->flags & PG_RELEASED));
229 uvm_pagelock(pg);
230 uvm_pageunwire(pg);
259 uvm_obj_page_tag_p(struct vm_page *pg, int tag) argument
270 uvm_obj_page_set_tag(struct vm_page *pg, int tag) argument
281 uvm_obj_page_clear_tag(struct vm_page *pg, int tag) argument
292 uvm_obj_page_dirty_p(struct vm_page *pg) argument
299 uvm_obj_page_set_dirty(struct vm_page *pg) argument
306 uvm_obj_page_clear_dirty(struct vm_page *pg) argument
313 uvm_obj_page_writeback_p(struct vm_page *pg) argument
320 uvm_obj_page_set_writeback(struct vm_page *pg) argument
327 uvm_obj_page_clear_writeback(struct vm_page *pg) argument
343 struct vm_page *pg; local
[all...]
H A Duvm_loan.c61 * loans are tracked by pg->loan_count. an O->A page will have both
65 * each loan of a page to the kernel bumps the pg->wire_count. the
80 * locking: to read pg->loan_count either the owner or pg->interlock
81 * must be locked. to modify pg->loan_count, both the owner of the page
82 * and pg->interlock must be locked. pg->flags is (as always) locked by
88 * from dying pg->interlock should be locked. this forces us to sometimes
346 struct vm_page *pg; local
359 pg
457 struct vm_page *pg = pgpp[i]; local
602 struct vm_page *pg; local
824 struct vm_page *pg; local
952 struct vm_page *pg; local
1035 struct vm_page *pg; local
1098 struct vm_page *pg; local
[all...]
H A Duvm_pdpolicy_clock.c248 struct vm_page *pg; local
256 pg = TAILQ_NEXT(&ss->ss_marker, pdqueue);
257 if (pg == NULL) {
260 KASSERT((pg->flags & PG_MARKER) == 0);
268 mutex_enter(&pg->interlock);
269 if (uvmpdpol_pagerealize_locked(pg)) {
270 mutex_exit(&pg->interlock);
279 TAILQ_INSERT_AFTER(&pdpol_state.s_inactiveq, pg,
289 anon = pg->uanon;
290 uobj = pg
444 uvmpdpol_pagedeactivate_locked(struct vm_page *pg) argument
468 uvmpdpol_pagedeactivate(struct vm_page *pg) argument
483 uvmpdpol_pageactivate_locked(struct vm_page *pg) argument
500 uvmpdpol_pageactivate(struct vm_page *pg) argument
510 uvmpdpol_pagedequeue_locked(struct vm_page *pg) argument
531 uvmpdpol_pagedequeue(struct vm_page *pg) argument
541 uvmpdpol_pageenqueue(struct vm_page *pg) argument
556 uvmpdpol_pageisqueued_p(struct vm_page *pg) argument
574 uvmpdpol_pageactivate_p(struct vm_page *pg) argument
694 uvmpdpol_pagerealize_locked(struct vm_page *pg) argument
728 struct vm_page *pg; local
770 uvmpdpol_pagerealize(struct vm_page *pg) argument
794 struct vm_page *pg; local
[all...]
/netbsd-current/regress/sys/uvm/pdsim/
H A Dpdsim.c61 struct vm_page *pg; local
63 pg = TAILQ_FIRST(&freeq);
64 if (pg == NULL) {
67 TAILQ_REMOVE(&freeq, pg, pageq);
68 pg->offset = idx << PAGE_SHIFT;
69 pg->uanon = NULL;
70 pg->uobject = obj;
71 pg->pqflags = 0;
72 obj->pages[idx] = pg;
76 return pg;
80 pdsim_pagefree(struct vm_page *pg) argument
112 struct vm_page *pg; local
120 pdsim_pagemarkreferenced(struct vm_page *pg) argument
127 pmap_is_referenced(struct vm_page *pg) argument
134 pmap_clear_reference(struct vm_page *pg) argument
146 struct vm_page *pg; local
165 struct vm_page *pg; local
182 struct vm_page *pg; local
[all...]
/netbsd-current/external/cddl/dtracetoolkit/dist/Bin/
H A Dpgpginbyproc.d10 vminfo:::pgpgin { @pg[execname] = sum(arg0); }
H A Dpgpginbypid.d46 @pg[pid, execname] = sum(arg0);
52 printa("%6d %-16s %@16d\n", @pg);
/netbsd-current/external/cddl/dtracetoolkit/dist/Mem/
H A Dpgpginbyproc.d10 vminfo:::pgpgin { @pg[execname] = sum(arg0); }
H A Dpgpginbypid.d46 @pg[pid, execname] = sum(arg0);
52 printa("%6d %-16s %@16d\n", @pg);
/netbsd-current/sys/arch/zaurus/include/
H A Dkloader.h38 #define PG_VADDR(pg) kloader_phystov(VM_PAGE_TO_PHYS(pg))
/netbsd-current/sys/arch/hpcmips/include/
H A Dkloader.h39 #define PG_VADDR(pg) MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg))
/netbsd-current/sys/arch/hpcsh/include/
H A Dkloader.h40 #define PG_VADDR(pg) SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg))
/netbsd-current/sys/arch/landisk/include/
H A Dkloader.h38 #define PG_VADDR(pg) SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg))
/netbsd-current/sys/arch/dreamcast/include/
H A Dkloader.h40 #define PG_VADDR(pg) SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg))
/netbsd-current/sys/arch/hpcarm/include/
H A Dkloader.h39 #define PG_VADDR(pg) kloader_phystov(VM_PAGE_TO_PHYS(pg))
/netbsd-current/sys/arch/evbsh3/include/
H A Dkloader.h38 #define PG_VADDR(pg) SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg))
/netbsd-current/sys/arch/playstation2/include/
H A Dkloader.h39 #define PG_VADDR(pg) MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg))
/netbsd-current/sys/compat/common/
H A Dtty_60.c55 ptmget_to_ptmget60(struct ptmget *pg, struct compat_60_ptmget *pg60) argument
58 pg60->cfd = pg->cfd;
59 pg60->sfd = pg->sfd;
60 strlcpy(pg60->cn, pg->cn, sizeof(pg60->cn));
61 strlcpy(pg60->sn, pg->sn, sizeof(pg60->sn));
62 if (strlen(pg->cn) >= sizeof(pg60->cn)
63 || strlen(pg->sn) >= sizeof(pg60->sn))
75 struct ptmget *pg; local
87 pg = kmem_alloc(sizeof(*pg), KM_SLEE
[all...]
/netbsd-current/sys/rump/librump/rumpvfs/
H A Dvm_vfs.c43 struct vm_page *pg; local
48 pg = pgs[i];
49 KASSERT((pg->flags & PG_PAGEOUT) == 0 ||
50 (pg->flags & PG_FAKE) == 0);
52 if (pg->flags & PG_FAKE) {
54 pg->flags &= ~PG_FAKE;
55 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
56 uvm_pagelock(pg);
57 uvm_pageenqueue(pg);
58 uvm_pageunlock(pg);
[all...]
/netbsd-current/sys/arch/xen/x86/
H A Dxen_bus_dma.c83 struct vm_page *pg, *pgnext; local
104 for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.queue.tqe_next) {
105 pa = VM_PAGE_TO_PHYS(pg);
140 pg = NULL;
145 for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg
[all...]

Completed in 369 milliseconds

1234567891011>>