Searched refs:page (Results 1 - 25 of 107) sorted by relevance

12345

/haiku/src/system/kernel/vm/
H A DPageCacheLocker.cpp13 PageCacheLocker::_IgnorePage(vm_page* page) argument
15 if (page->busy || page->State() == PAGE_STATE_WIRED
16 || page->State() == PAGE_STATE_FREE || page->State() == PAGE_STATE_CLEAR
17 || page->State() == PAGE_STATE_UNUSED || page->WiredCount() > 0)
25 PageCacheLocker::Lock(vm_page* page, bool dontWait) argument
27 if (_IgnorePage(page))
31 VMCache* cache = vm_cache_acquire_locked_page_cache(page, dontWai
[all...]
H A DVMPageQueue.h32 inline void Append(vm_page* page);
33 inline void Prepend(vm_page* page);
35 vm_page* page);
36 inline void Remove(vm_page* page);
38 inline void Requeue(vm_page* page, bool tail);
40 inline void AppendUnlocked(vm_page* page);
42 inline void PrependUnlocked(vm_page* page);
43 inline void RemoveUnlocked(vm_page* page);
45 inline void RequeueUnlocked(vm_page* page, bool tail);
49 inline vm_page* Previous(vm_page* page) cons
70 Append(vm_page* page) argument
89 Prepend(vm_page* page) argument
108 InsertAfter(vm_page* insertAfter, vm_page* page) argument
127 Remove(vm_page* page) argument
148 vm_page* page = fPages.RemoveHead(); local
167 Requeue(vm_page* page, bool tail) argument
182 AppendUnlocked(vm_page* page) argument
213 PrependUnlocked(vm_page* page) argument
221 RemoveUnlocked(vm_page* page) argument
237 RequeueUnlocked(vm_page* page, bool tail) argument
[all...]
H A DPageCacheLocker.h16 inline PageCacheLocker(vm_page* page,
22 bool Lock(vm_page* page, bool dontWait = true);
26 bool _IgnorePage(vm_page* page);
32 PageCacheLocker::PageCacheLocker(vm_page* page, bool dontWait) argument
36 Lock(page, dontWait);
H A DVMTranslationMap.cpp62 vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE); local
63 if (page != NULL) {
64 DEBUG_PAGE_ACCESS_START(page);
66 DEBUG_PAGE_ACCESS_END(page);
85 pages that live in the top cache area going to be freed and the page
103 vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE); local
104 if (page != NULL) {
105 DEBUG_PAGE_ACCESS_START(page);
107 DEBUG_PAGE_ACCESS_END(page);
153 Looks up the page, update
166 vm_page* page = vm_lookup_page(pageNumber); local
229 vm_page* page = vm_lookup_page(pageNumber); local
[all...]
H A DVMCache.cpp71 vm_page* page; member in struct:VMCache::PageEventWaiter
340 InsertPage(VMCache* cache, vm_page* page, off_t offset) argument
343 fPage(page),
351 out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
363 RemovePage(VMCache* cache, vm_page* page) argument
366 fPage(page)
373 out.Print("vm cache remove page: cache: %p, page: %p", fCache,
559 vm_cache_acquire_locked_page_cache(vm_page* page, boo argument
788 vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT)); local
800 InsertPage(vm_page* page, off_t offset) argument
838 RemovePage(vm_page* page) argument
864 MovePage(vm_page* page, off_t offset) argument
896 MovePage(vm_page* page) argument
945 WaitForPageEvents(vm_page* page, uint32 events, bool relock) argument
1503 _NotifyPageEvents(vm_page* page, uint32 events) argument
[all...]
H A Dvm_page.cpp63 #define PAGE_ASSERT(page, condition) \
64 ASSERT_PRINT((condition), "page: %p", (page))
67 // this many pages will be cleared at once in the page scrubber thread
70 // maximum I/O priority of the page writer
76 // The page reserve an allocation of the certain priority must not touch.
83 // Minimum number of free pages the page daemon will try to achieve.
88 // Wait interval between page daemon runs.
98 // vm_page::usage_count buff an accessed page receives in a scan.
100 // vm_page::usage_count debuff an unaccessed page receive
434 ActivatePage(vm_page* page) argument
455 DeactivatePage(vm_page* page) argument
476 FreedPageSwap(vm_page* page) argument
509 WritePage(vm_page* page) argument
542 SetPageState(vm_page* page, uint8 newState) argument
764 list_page(vm_page* page) argument
823 struct vm_page *page; local
932 struct vm_page* page; local
1059 struct vm_page *page = queue->Head(); local
1359 track_page_usage(vm_page* page) argument
1522 free_page(vm_page* page, bool clear) argument
1593 set_page_state(vm_page *page, int pageState) argument
1694 move_page_to_appropriate_queue(vm_page *page) argument
1714 clear_page(struct vm_page *page) argument
1749 vm_page *page = &sPages[startPage + i]; local
1821 vm_page *page[SCRUB_SIZE]; local
1906 vm_page* page = sModifiedPageQueue.Head(); local
2009 SetTo(vm_page* page) argument
2111 SetTo(PageWriterRun* run, vm_page* page, int32 maxPages) argument
2129 AddPage(vm_page* page) argument
2254 AddPage(vm_page* page) argument
2406 vm_page *page = next_modified_page(maxPagesToSee); local
2557 vm_page *page; local
2590 free_cached_page(vm_page *page, bool dontWait) argument
2630 vm_page *page = find_cached_page_candidate(marker); local
2670 vm_page* page = queue.Head(); local
2763 vm_page* page = nextPage; local
2892 vm_page* page = nextPage; local
3161 vm_page* page = it.Next(); local
3246 vm_page_schedule_write_page(vm_page *page) argument
3498 vm_mark_page_inuse(page_num_t page) argument
3594 vm_page* page = queue->RemoveHeadUnlocked(); local
3716 vm_page& page = sPages[start + i]; local
3781 vm_page& page = sPages[nextIndex]; local
4000 vm_page_is_dummy(struct vm_page *page) argument
4019 vm_page_free_etc(VMCache* cache, vm_page* page, vm_page_reservation* reservation) argument
4035 vm_page_set_state(vm_page *page, int pageState) argument
4053 vm_page_requeue(struct vm_page *page, bool tail) argument
[all...]
/haiku/src/libs/print/libprint/
H A DPagesView.cpp49 int page = fReverse ? 4 - i : i; local
50 _DrawPages(position, page, 2);
62 int page; local
64 page = 1 + i;
66 page = count - i;
68 _DrawPage(position, page);
85 BPoint page[5]; local
86 page[0].x = position.x + 3;
87 page[0].y = position.y;
88 page[
[all...]
H A DPrintJobReader.cpp158 for (int32 page = 0; page < fNumberOfPages; ++page) {
159 fPageIndex[page] = fJobFile.Position();
162 && fPageIndex[page] < next_page) {
174 status_t PrintJobReader::GetPage(int32 page, PrintJobPage& pjp) argument
176 if (0 <= page && page < fNumberOfPages) {
177 PrintJobPage p(&fJobFile, fPageIndex[page]);
/haiku/headers/private/kernel/vm/
H A Dvm_types.h68 struct vm_page *page; member in struct:vm_page_mapping
125 // in page size units
227 vm_page_debug_access_start(vm_page* page) argument
230 thread_id previousThread = atomic_test_and_set(&page->accessing_thread,
233 panic("Invalid concurrent access to page 0x%" B_PRIXPHYSADDR " (start), currently "
234 "accessed by: %" B_PRId32 "@! page -m %p; sc %" B_PRId32 "; cache _cache",
235 page->physical_page_number * B_PAGE_SIZE, previousThread, page, previousThread);
241 vm_page_debug_access_end(vm_page* page) argument
244 thread_id previousThread = atomic_test_and_set(&page
256 vm_page_debug_access_check(vm_page* page) argument
268 vm_page_debug_access_transfer(vm_page* page, thread_id expectedPreviousThread) argument
[all...]
H A Dvm_page.h35 status_t vm_mark_page_inuse(page_num_t page);
37 void vm_page_free_etc(VMCache* cache, vm_page* page,
40 void vm_page_set_state(struct vm_page *page, int state);
41 void vm_page_requeue(struct vm_page *page, bool tail);
54 void vm_page_schedule_write_page(struct vm_page *page);
70 bool vm_page_is_dummy(struct vm_page *page);
78 vm_page_free(struct VMCache *cache, struct vm_page *page) argument
80 vm_page_free_etc(cache, page, NULL);
/haiku/src/system/libroot/posix/malloc_debug/
H A Dguarded_heap.cpp260 guarded_heap_print_stack_traces(guarded_heap_page& page) argument
262 if (page.alloc_stack_trace_depth > 0) {
264 page.alloc_stack_trace_depth);
265 guarded_heap_print_stack_trace(page.stack_trace,
266 page.alloc_stack_trace_depth);
269 if (page.free_stack_trace_depth > 0) {
271 page.free_stack_trace_depth);
273 &page.stack_trace[page.alloc_stack_trace_depth],
274 page
317 guarded_heap_page& page = area.pages[startPageIndex + i]; local
357 guarded_heap_page& page = area.pages[pageIndex]; local
524 guarded_heap_page* page = (guarded_heap_page*)address; local
629 guarded_heap_page& page = area.pages[pageIndex]; local
666 guarded_heap_page* page = &area.pages[pageIndex]; local
769 guarded_heap_page* page = guarded_heap_area_allocation_for(address, local
802 dump_guarded_heap_page(guarded_heap_page& page) argument
846 guarded_heap_page& page = area->pages[pageIndex]; local
922 guarded_heap_page& page = area.pages[i]; local
983 guarded_heap_page& page = area->pages[i]; local
[all...]
H A Dheap.cpp127 heap_page * page_list; // sorted so that the desired page is always first
177 B_PAGE_SIZE, /* page size */
180 8, /* min count per page */
181 16 /* max waste per page */
187 B_PAGE_SIZE * 8, /* page size */
190 4, /* min count per page */
191 64 /* max waste per page */
197 B_PAGE_SIZE * 16, /* page size */
200 1, /* min count per page */
201 256 /* max waste per page */
212 dump_page(heap_page *page) argument
291 heap_page *page = &area->page_table[i]; local
386 heap_page *page = &area->page_table[i]; local
493 heap_page *page = area->free_pages; local
570 heap_page *page = bin->page_list; local
917 heap_link_page(heap_page *page, heap_page **list) argument
928 heap_unlink_page(heap_page *page, heap_page **list) argument
988 heap_page *page = &area->page_table[i]; local
1053 heap_page *page = bin->page_list; local
1208 heap_page *page = &area->page_table[((addr_t)address - area->base) local
1422 heap_page *page = &area->page_table[((addr_t)address - area->base) local
1537 heap_page *page = &area->page_table[((addr_t)address - area->base) local
[all...]
/haiku/src/tests/kits/interface/pictureprint/
H A DDumpPrintJob.cpp54 for (int page = 0; page < pages; page ++) {
55 printf("Page: %d\n", page+1);
57 if (reader.GetPage(page, pjp) != B_OK) {
58 fprintf(stderr, "Error reading page!\n");
/haiku/src/system/kernel/
H A Dguarded_heap.cpp112 out.Print("guarded heap allocate: heap: %p; page: %p; "
138 out.Print("guarded heap free: heap: %p; page: %p", fHeap,
180 guarded_heap_page& page = area.pages[startPageIndex + i]; local
181 page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
183 page.team = (gKernelStartup ? 0 : team_get_current_team_id());
184 page.thread = find_thread(NULL);
186 page.stack_trace_depth = arch_debug_get_stack_trace(
187 page.stack_trace, GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 4,
190 page.allocation_size = allocationSize;
191 page
227 guarded_heap_page& page = area.pages[pageIndex]; local
470 guarded_heap_page& page = area.pages[pageIndex]; local
507 guarded_heap_page* page = &area.pages[pageIndex]; local
581 dump_guarded_heap_stack_trace(guarded_heap_page& page) argument
635 guarded_heap_page& page = area->pages[pageIndex]; local
722 guarded_heap_page& page = area->pages[i]; local
819 guarded_heap_page& page = area->pages[i]; local
886 guarded_heap_page& page = area->pages[i]; local
[all...]
H A Dheap.cpp103 heap_page * page_list; // sorted so that the desired page is always first
160 B_PAGE_SIZE, /* page size */
163 8, /* min count per page */
164 16 /* max waste per page */
170 B_PAGE_SIZE * 8, /* page size */
173 4, /* min count per page */
174 64 /* max waste per page */
180 B_PAGE_SIZE * 16, /* page size */
183 1, /* min count per page */
184 256 /* max waste per page */
309 dump_page(heap_page *page) argument
462 heap_page *page = &area->page_table[i]; local
570 heap_page *page = &area->page_table[i]; local
707 heap_page *page = &area->page_table[i]; local
863 heap_page *page = area->free_pages; local
937 heap_page *page = bin->page_list; local
1306 heap_link_page(heap_page *page, heap_page **list) argument
1317 heap_unlink_page(heap_page *page, heap_page **list) argument
1378 heap_page *page = &area->page_table[i]; local
1443 heap_page *page = bin->page_list; local
1625 heap_page *page = &area->page_table[((addr_t)address - area->base) local
1806 heap_page *page = &area->page_table[((addr_t)address - area->base) local
[all...]
/haiku/src/add-ons/print/drivers/pcl5/
H A DPCL5.h22 virtual bool StartPage(int page);
24 virtual bool EndPage(int page);
/haiku/src/system/kernel/arch/x86/paging/64bit/
H A DX86VMTranslationMap64Bit.cpp56 vm_page* page; local
77 page = vm_lookup_page(address / B_PAGE_SIZE);
78 if (page == NULL) {
79 panic("page table %u %u %u on invalid page %#"
83 DEBUG_PAGE_ACCESS_START(page);
84 vm_page_set_state(page, PAGE_STATE_FREE);
88 page = vm_lookup_page(address / B_PAGE_SIZE);
89 if (page == NULL) {
90 panic("page director
441 vm_page* page = vm_lookup_page( local
531 vm_page* page = mapping->page; local
[all...]
H A DX86PagingMethod64Bit.cpp75 // Create the physical page mapper.
147 // Get the page directory.
153 TRACE("X86PagingMethod64Bit::MapEarly(): creating page directory for va"
171 // Get the page table.
177 TRACE("X86PagingMethod64Bit::MapEarly(): creating page table for va"
195 // The page table entry must not already be mapped.
218 /*! Traverses down the paging structure hierarchy to find the page directory
236 vm_page* page = vm_page_allocate_page(reservation,
239 DEBUG_PAGE_ACCESS_END(page);
242 = (phys_addr_t)page
409 uint64 page = (physicalAddress & X86_64_PTE_ADDRESS_MASK) local
[all...]
/haiku/src/add-ons/kernel/file_systems/ramfs/
H A DDataContainer.cpp295 vm_page* page = pages[index]; local
296 phys_addr_t at = (page != NULL)
297 ? (page->physical_page_number * B_PAGE_SIZE) : 0;
307 page->modified = true;
310 if (page != NULL) {
352 vm_page* page = fCache->LookupPage(offset); local
353 if (page != NULL) {
354 if (page->busy) {
355 fCache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
359 DEBUG_PAGE_ACCESS_START(page);
405 vm_page* page = pages[index++]; local
[all...]
/haiku/src/add-ons/kernel/file_systems/packagefs/package/
H A DCachedDataReader.cpp187 vm_page* page = it.Next(); local
189 if (page == NULL
190 || page->cache_offset >= firstPageOffset + linePageCount) {
191 page = NULL;
194 currentPageOffset = page->cache_offset;
207 if (page != NULL) {
208 pages[pageOffset++ - firstPageOffset] = page;
209 DEBUG_PAGE_ACCESS_START(page);
210 vm_page_set_state(page, PAGE_STATE_UNUSED);
211 DEBUG_PAGE_ACCESS_END(page);
307 vm_page* page = pages[i]; local
339 vm_page* page = pages[i]; local
[all...]
/haiku/src/system/kernel/arch/riscv64/
H A DRISCV64VMTranslationMap.cpp38 WriteVmPage(vm_page* page) argument
41 (addr_t)(page->physical_page_number * B_PAGE_SIZE));
42 switch (page->State()) {
69 if (page->busy)
74 if (page->busy_writing)
79 if (page->accessed)
84 if (page->modified)
89 if (page->unused)
94 dprintf(" usage:%3u", page->usage_count);
95 dprintf(" wired:%5u", page
129 vm_page* page = vm_lookup_page(ppn); local
473 vm_page* page = vm_lookup_page(oldPte.ppn); local
566 vm_page* page = mapping->page; local
[all...]
/haiku/src/apps/bootmanager/
H A DWizardView.cpp43 WizardView::SetPage(WizardPageView* page) argument
45 if (fPage == page)
53 fPage = page;
54 if (page == NULL)
57 fPageContainer->AddChild(page);
121 fPageContainer = new BGroupView("page container");
/haiku/src/system/kernel/arch/x86/paging/32bit/
H A DX86VMTranslationMap32Bit.cpp63 vm_page* page = vm_lookup_page(address / B_PAGE_SIZE); local
64 if (!page)
65 panic("destroy_tmap: didn't find pgtable page\n");
66 DEBUG_PAGE_ACCESS_START(page);
67 vm_page_set_state(page, PAGE_STATE_FREE);
91 // allocate a physical page mapper
97 // allocate the page directory
103 // look up the page directory's physical address
112 // get the physical page mapper
155 // check to see if a page tabl
159 vm_page *page; local
439 vm_page* page = vm_lookup_page( local
528 vm_page* page = mapping->page; local
[all...]
/haiku/src/system/kernel/arch/arm/paging/32bit/
H A DARMVMTranslationMap32Bit.cpp68 vm_page* page = vm_lookup_page(address / B_PAGE_SIZE); local
69 if (!page)
70 panic("destroy_tmap: didn't find pgtable page\n");
71 DEBUG_PAGE_ACCESS_START(page);
72 vm_page_set_state(page, PAGE_STATE_FREE);
96 // allocate a physical page mapper
102 // allocate the page directory
120 // look up the page directory's physical address
129 // get the physical page mapper
172 // check to see if a page tabl
176 vm_page *page; local
456 vm_page* page = vm_lookup_page( local
545 vm_page* page = mapping->page; local
[all...]
/haiku/src/system/kernel/device_manager/
H A DIOCache.cpp59 "multiple of the page size.", cacheLineSize);
149 while (vm_page* page = fCache->pages.Root()) {
150 DEBUG_PAGE_ACCESS_START(page);
151 fCache->RemovePage(page);
152 vm_page_free(NULL, page);
289 vm_page* page = it.Next(); local
291 if (page == NULL
292 || page->cache_offset >= firstPageOffset + linePageCount) {
293 page = NULL;
296 currentPageOffset = page
606 vm_page* page = fPages[i]; local
637 vm_page* page = fPages[i]; local
750 vm_page* page = fPages[i]; local
[all...]

Completed in 244 milliseconds

12345