1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2008, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11#include <vm/VMCache.h>
12
13#include <stddef.h>
14#include <stdlib.h>
15
16#include <algorithm>
17
18#include <arch/cpu.h>
19#include <condition_variable.h>
20#include <heap.h>
21#include <int.h>
22#include <kernel.h>
23#include <slab/Slab.h>
24#include <smp.h>
25#include <thread.h>
26#include <tracing.h>
27#include <util/AutoLock.h>
28#include <vfs.h>
29#include <vm/vm.h>
30#include <vm/vm_page.h>
31#include <vm/vm_priv.h>
32#include <vm/vm_types.h>
33#include <vm/VMAddressSpace.h>
34#include <vm/VMArea.h>
35
36// needed for the factory only
37#include "VMAnonymousCache.h"
38#include "VMAnonymousNoSwapCache.h"
39#include "VMDeviceCache.h"
40#include "VMNullCache.h"
41#include "../cache/vnode_store.h"
42
43
44//#define TRACE_VM_CACHE
45#ifdef TRACE_VM_CACHE
46#	define TRACE(x) dprintf x
47#else
48#	define TRACE(x) ;
49#endif
50
51
52#if DEBUG_CACHE_LIST
53VMCache* gDebugCacheList;
54#endif
55static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
56	// The lock is also needed when the debug feature is disabled.
57
58ObjectCache* gCacheRefObjectCache;
59#if ENABLE_SWAP_SUPPORT
60ObjectCache* gAnonymousCacheObjectCache;
61#endif
62ObjectCache* gAnonymousNoSwapCacheObjectCache;
63ObjectCache* gVnodeCacheObjectCache;
64ObjectCache* gDeviceCacheObjectCache;
65ObjectCache* gNullCacheObjectCache;
66
67
68struct VMCache::PageEventWaiter {
69	Thread*				thread;
70	PageEventWaiter*	next;
71	vm_page*			page;
72	uint32				events;
73};
74
75
76#if VM_CACHE_TRACING
77
78namespace VMCacheTracing {
79
80class VMCacheTraceEntry : public AbstractTraceEntry {
81	public:
82		VMCacheTraceEntry(VMCache* cache)
83			:
84			fCache(cache)
85		{
86#if VM_CACHE_TRACING_STACK_TRACE
87			fStackTrace = capture_tracing_stack_trace(
88				VM_CACHE_TRACING_STACK_TRACE, 0, true);
89				// Don't capture userland stack trace to avoid potential
90				// deadlocks.
91#endif
92		}
93
94#if VM_CACHE_TRACING_STACK_TRACE
95		virtual void DumpStackTrace(TraceOutput& out)
96		{
97			out.PrintStackTrace(fStackTrace);
98		}
99#endif
100
101		VMCache* Cache() const
102		{
103			return fCache;
104		}
105
106	protected:
107		VMCache*	fCache;
108#if VM_CACHE_TRACING_STACK_TRACE
109		tracing_stack_trace* fStackTrace;
110#endif
111};
112
113
114class Create : public VMCacheTraceEntry {
115	public:
116		Create(VMCache* cache)
117			:
118			VMCacheTraceEntry(cache)
119		{
120			Initialized();
121		}
122
123		virtual void AddDump(TraceOutput& out)
124		{
125			out.Print("vm cache create: -> cache: %p", fCache);
126		}
127};
128
129
130class Delete : public VMCacheTraceEntry {
131	public:
132		Delete(VMCache* cache)
133			:
134			VMCacheTraceEntry(cache)
135		{
136			Initialized();
137		}
138
139		virtual void AddDump(TraceOutput& out)
140		{
141			out.Print("vm cache delete: cache: %p", fCache);
142		}
143};
144
145
146class SetMinimalCommitment : public VMCacheTraceEntry {
147	public:
148		SetMinimalCommitment(VMCache* cache, off_t commitment)
149			:
150			VMCacheTraceEntry(cache),
151			fOldCommitment(cache->committed_size),
152			fCommitment(commitment)
153		{
154			Initialized();
155		}
156
157		virtual void AddDump(TraceOutput& out)
158		{
159			out.Print("vm cache set min commitment: cache: %p, "
160				"commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache,
161				fOldCommitment, fCommitment);
162		}
163
164	private:
165		off_t	fOldCommitment;
166		off_t	fCommitment;
167};
168
169
170class Resize : public VMCacheTraceEntry {
171	public:
172		Resize(VMCache* cache, off_t size)
173			:
174			VMCacheTraceEntry(cache),
175			fOldSize(cache->virtual_end),
176			fSize(size)
177		{
178			Initialized();
179		}
180
181		virtual void AddDump(TraceOutput& out)
182		{
183			out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %"
184				B_PRIdOFF, fCache, fOldSize, fSize);
185		}
186
187	private:
188		off_t	fOldSize;
189		off_t	fSize;
190};
191
192
193class Rebase : public VMCacheTraceEntry {
194	public:
195		Rebase(VMCache* cache, off_t base)
196			:
197			VMCacheTraceEntry(cache),
198			fOldBase(cache->virtual_base),
199			fBase(base)
200		{
201			Initialized();
202		}
203
204		virtual void AddDump(TraceOutput& out)
205		{
206			out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
207				fOldBase, fBase);
208		}
209
210	private:
211		off_t	fOldBase;
212		off_t	fBase;
213};
214
215
216class AddConsumer : public VMCacheTraceEntry {
217	public:
218		AddConsumer(VMCache* cache, VMCache* consumer)
219			:
220			VMCacheTraceEntry(cache),
221			fConsumer(consumer)
222		{
223			Initialized();
224		}
225
226		virtual void AddDump(TraceOutput& out)
227		{
228			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
229				fConsumer);
230		}
231
232		VMCache* Consumer() const
233		{
234			return fConsumer;
235		}
236
237	private:
238		VMCache*	fConsumer;
239};
240
241
242class RemoveConsumer : public VMCacheTraceEntry {
243	public:
244		RemoveConsumer(VMCache* cache, VMCache* consumer)
245			:
246			VMCacheTraceEntry(cache),
247			fConsumer(consumer)
248		{
249			Initialized();
250		}
251
252		virtual void AddDump(TraceOutput& out)
253		{
254			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
255				fCache, fConsumer);
256		}
257
258	private:
259		VMCache*	fConsumer;
260};
261
262
263class Merge : public VMCacheTraceEntry {
264	public:
265		Merge(VMCache* cache, VMCache* consumer)
266			:
267			VMCacheTraceEntry(cache),
268			fConsumer(consumer)
269		{
270			Initialized();
271		}
272
273		virtual void AddDump(TraceOutput& out)
274		{
275			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
276				fCache, fConsumer);
277		}
278
279	private:
280		VMCache*	fConsumer;
281};
282
283
284class InsertArea : public VMCacheTraceEntry {
285	public:
286		InsertArea(VMCache* cache, VMArea* area)
287			:
288			VMCacheTraceEntry(cache),
289			fArea(area)
290		{
291			Initialized();
292		}
293
294		virtual void AddDump(TraceOutput& out)
295		{
296			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
297				fArea);
298		}
299
300		VMArea*	Area() const
301		{
302			return fArea;
303		}
304
305	private:
306		VMArea*	fArea;
307};
308
309
310class RemoveArea : public VMCacheTraceEntry {
311	public:
312		RemoveArea(VMCache* cache, VMArea* area)
313			:
314			VMCacheTraceEntry(cache),
315			fArea(area)
316		{
317			Initialized();
318		}
319
320		virtual void AddDump(TraceOutput& out)
321		{
322			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
323				fArea);
324		}
325
326	private:
327		VMArea*	fArea;
328};
329
330}	// namespace VMCacheTracing
331
332#	define T(x) new(std::nothrow) VMCacheTracing::x;
333
334#	if VM_CACHE_TRACING >= 2
335
336namespace VMCacheTracing {
337
338class InsertPage : public VMCacheTraceEntry {
339	public:
340		InsertPage(VMCache* cache, vm_page* page, off_t offset)
341			:
342			VMCacheTraceEntry(cache),
343			fPage(page),
344			fOffset(offset)
345		{
346			Initialized();
347		}
348
349		virtual void AddDump(TraceOutput& out)
350		{
351			out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
352				B_PRIdOFF, fCache, fPage, fOffset);
353		}
354
355	private:
356		vm_page*	fPage;
357		off_t		fOffset;
358};
359
360
361class RemovePage : public VMCacheTraceEntry {
362	public:
363		RemovePage(VMCache* cache, vm_page* page)
364			:
365			VMCacheTraceEntry(cache),
366			fPage(page)
367		{
368			Initialized();
369		}
370
371		virtual void AddDump(TraceOutput& out)
372		{
373			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
374				fPage);
375		}
376
377	private:
378		vm_page*	fPage;
379};
380
381}	// namespace VMCacheTracing
382
383#		define T2(x) new(std::nothrow) VMCacheTracing::x;
384#	else
385#		define T2(x) ;
386#	endif
387#else
388#	define T(x) ;
389#	define T2(x) ;
390#endif
391
392
393//	#pragma mark - debugger commands
394
395
396#if VM_CACHE_TRACING
397
398
399static void*
400cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
401{
402	using namespace VMCacheTracing;
403
404	// find the previous "insert area" entry for the given area
405	TraceEntryIterator iterator = baseIterator;
406	TraceEntry* entry = iterator.Current();
407	while (entry != NULL) {
408		if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
409			if (insertAreaEntry->Area() == area)
410				return insertAreaEntry->Cache();
411		}
412
413		entry = iterator.Previous();
414	}
415
416	return NULL;
417}
418
419
420static void*
421cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
422{
423	using namespace VMCacheTracing;
424
425	// find the previous "add consumer" or "create" entry for the given cache
426	TraceEntryIterator iterator = baseIterator;
427	TraceEntry* entry = iterator.Current();
428	while (entry != NULL) {
429		if (Create* createEntry = dynamic_cast<Create*>(entry)) {
430			if (createEntry->Cache() == cache)
431				return NULL;
432		} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
433			if (addEntry->Consumer() == cache)
434				return addEntry->Cache();
435		}
436
437		entry = iterator.Previous();
438	}
439
440	return NULL;
441}
442
443
444static int
445command_cache_stack(int argc, char** argv)
446{
447	if (argc < 3 || argc > 4) {
448		print_debugger_command_usage(argv[0]);
449		return 0;
450	}
451
452	bool isArea = false;
453
454	int argi = 1;
455	if (argc == 4) {
456		if (strcmp(argv[argi], "area") != 0) {
457			print_debugger_command_usage(argv[0]);
458			return 0;
459		}
460
461		argi++;
462		isArea = true;
463	}
464
465	uint64 addressValue;
466	uint64 debugEntryIndex;
467	if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
468		|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
469		return 0;
470	}
471
472	TraceEntryIterator baseIterator;
473	if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
474		kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
475		return 0;
476	}
477
478	void* address = (void*)(addr_t)addressValue;
479
480	kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
481		isArea ? "area" : "cache", address, debugEntryIndex);
482	if (isArea) {
483		address = cache_stack_find_area_cache(baseIterator, address);
484		if (address == NULL) {
485			kprintf("  cache not found\n");
486			return 0;
487		}
488	}
489
490	while (address != NULL) {
491		kprintf("  %p\n", address);
492		address = cache_stack_find_consumer(baseIterator, address);
493	}
494
495	return 0;
496}
497
498
499#endif	// VM_CACHE_TRACING
500
501
502//	#pragma mark -
503
504
505status_t
506vm_cache_init(kernel_args* args)
507{
508	// Create object caches for the structures we allocate here.
509	gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
510		0, NULL, NULL, NULL);
511#if ENABLE_SWAP_SUPPORT
512	gAnonymousCacheObjectCache = create_object_cache("anon caches",
513		sizeof(VMAnonymousCache), 0, NULL, NULL, NULL);
514#endif
515	gAnonymousNoSwapCacheObjectCache = create_object_cache(
516		"anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL,
517		NULL);
518	gVnodeCacheObjectCache = create_object_cache("vnode caches",
519		sizeof(VMVnodeCache), 0, NULL, NULL, NULL);
520	gDeviceCacheObjectCache = create_object_cache("device caches",
521		sizeof(VMDeviceCache), 0, NULL, NULL, NULL);
522	gNullCacheObjectCache = create_object_cache("null caches",
523		sizeof(VMNullCache), 0, NULL, NULL, NULL);
524
525	if (gCacheRefObjectCache == NULL
526#if ENABLE_SWAP_SUPPORT
527		|| gAnonymousCacheObjectCache == NULL
528#endif
529		|| gAnonymousNoSwapCacheObjectCache == NULL
530		|| gVnodeCacheObjectCache == NULL
531		|| gDeviceCacheObjectCache == NULL
532		|| gNullCacheObjectCache == NULL) {
533		panic("vm_cache_init(): Failed to create object caches!");
534		return B_NO_MEMORY;
535	}
536
537	return B_OK;
538}
539
540
541void
542vm_cache_init_post_heap()
543{
544#if VM_CACHE_TRACING
545	add_debugger_command_etc("cache_stack", &command_cache_stack,
546		"List the ancestors (sources) of a VMCache at the time given by "
547			"tracing entry index",
548		"[ \"area\" ] <address> <tracing entry index>\n"
549		"All ancestors (sources) of a given VMCache at the time given by the\n"
550		"tracing entry index are listed. If \"area\" is given the supplied\n"
551		"address is an area instead of a cache address. The listing will\n"
552		"start with the area's cache at that point.\n",
553		0);
554#endif	// VM_CACHE_TRACING
555}
556
557
558VMCache*
559vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
560{
561	mutex_lock(&sCacheListLock);
562
563	while (dontWait) {
564		VMCacheRef* cacheRef = page->CacheRef();
565		if (cacheRef == NULL) {
566			mutex_unlock(&sCacheListLock);
567			return NULL;
568		}
569
570		VMCache* cache = cacheRef->cache;
571		if (!cache->TryLock()) {
572			mutex_unlock(&sCacheListLock);
573			return NULL;
574		}
575
576		if (cacheRef == page->CacheRef()) {
577			mutex_unlock(&sCacheListLock);
578			cache->AcquireRefLocked();
579			return cache;
580		}
581
582		// the cache changed in the meantime
583		cache->Unlock();
584	}
585
586	while (true) {
587		VMCacheRef* cacheRef = page->CacheRef();
588		if (cacheRef == NULL) {
589			mutex_unlock(&sCacheListLock);
590			return NULL;
591		}
592
593		VMCache* cache = cacheRef->cache;
594		if (!cache->SwitchLock(&sCacheListLock)) {
595			// cache has been deleted
596			mutex_lock(&sCacheListLock);
597			continue;
598		}
599
600		mutex_lock(&sCacheListLock);
601		if (cache == page->Cache()) {
602			mutex_unlock(&sCacheListLock);
603			cache->AcquireRefLocked();
604			return cache;
605		}
606
607		// the cache changed in the meantime
608		cache->Unlock();
609	}
610}
611
612
613// #pragma mark - VMCache
614
615
616VMCacheRef::VMCacheRef(VMCache* cache)
617	:
618	cache(cache),
619	ref_count(1)
620{
621}
622
623
624// #pragma mark - VMCache
625
626
627bool
628VMCache::_IsMergeable() const
629{
630	return areas == NULL && temporary && !consumers.IsEmpty()
631		&& consumers.Head() == consumers.Tail();
632}
633
634
635VMCache::VMCache()
636	:
637	fCacheRef(NULL)
638{
639}
640
641
642VMCache::~VMCache()
643{
644	object_cache_delete(gCacheRefObjectCache, fCacheRef);
645}
646
647
648status_t
649VMCache::Init(uint32 cacheType, uint32 allocationFlags)
650{
651	mutex_init(&fLock, "VMCache");
652
653	areas = NULL;
654	fRefCount = 1;
655	source = NULL;
656	virtual_base = 0;
657	virtual_end = 0;
658	committed_size = 0;
659	temporary = 0;
660	page_count = 0;
661	fWiredPagesCount = 0;
662	type = cacheType;
663	fPageEventWaiters = NULL;
664
665#if DEBUG_CACHE_LIST
666	debug_previous = NULL;
667	debug_next = NULL;
668		// initialize in case the following fails
669#endif
670
671	fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
672	if (fCacheRef == NULL)
673		return B_NO_MEMORY;
674
675#if DEBUG_CACHE_LIST
676	mutex_lock(&sCacheListLock);
677
678	if (gDebugCacheList != NULL)
679		gDebugCacheList->debug_previous = this;
680	debug_next = gDebugCacheList;
681	gDebugCacheList = this;
682
683	mutex_unlock(&sCacheListLock);
684#endif
685
686	return B_OK;
687}
688
689
690void
691VMCache::Delete()
692{
693	if (areas != NULL)
694		panic("cache %p to be deleted still has areas", this);
695	if (!consumers.IsEmpty())
696		panic("cache %p to be deleted still has consumers", this);
697
698	T(Delete(this));
699
700	// free all of the pages in the cache
701	while (vm_page* page = pages.Root()) {
702		if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
703			panic("remove page %p from cache %p: page still has mappings!\n"
704				"@!page %p; cache %p", page, this, page, this);
705		}
706
707		// remove it
708		pages.Remove(page);
709		page->SetCacheRef(NULL);
710
711		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
712			page->physical_page_number));
713		DEBUG_PAGE_ACCESS_START(page);
714		vm_page_free(this, page);
715	}
716
717	// remove the ref to the source
718	if (source)
719		source->_RemoveConsumer(this);
720
721	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
722	// not enabled. This synchronization point is needed for
723	// vm_cache_acquire_locked_page_cache().
724	mutex_lock(&sCacheListLock);
725
726#if DEBUG_CACHE_LIST
727	if (debug_previous)
728		debug_previous->debug_next = debug_next;
729	if (debug_next)
730		debug_next->debug_previous = debug_previous;
731	if (this == gDebugCacheList)
732		gDebugCacheList = debug_next;
733#endif
734
735	mutex_destroy(&fLock);
736
737	mutex_unlock(&sCacheListLock);
738
739	DeleteObject();
740}
741
742
743void
744VMCache::Unlock(bool consumerLocked)
745{
746	while (fRefCount == 1 && _IsMergeable()) {
747		VMCache* consumer = consumers.Head();
748		if (consumerLocked) {
749			_MergeWithOnlyConsumer();
750		} else if (consumer->TryLock()) {
751			_MergeWithOnlyConsumer();
752			consumer->Unlock();
753		} else {
754			// Someone else has locked the consumer ATM. Unlock this cache and
755			// wait for the consumer lock. Increment the cache's ref count
756			// temporarily, so that no one else will try what we are doing or
757			// delete the cache.
758			fRefCount++;
759			bool consumerLockedTemp = consumer->SwitchLock(&fLock);
760			Lock();
761			fRefCount--;
762
763			if (consumerLockedTemp) {
764				if (fRefCount == 1 && _IsMergeable()
765						&& consumer == consumers.Head()) {
766					// nothing has changed in the meantime -- merge
767					_MergeWithOnlyConsumer();
768				}
769
770				consumer->Unlock();
771			}
772		}
773	}
774
775	if (fRefCount == 0) {
776		// delete this cache
777		Delete();
778	} else
779		mutex_unlock(&fLock);
780}
781
782
783vm_page*
784VMCache::LookupPage(off_t offset)
785{
786	AssertLocked();
787
788	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
789
790#if KDEBUG
791	if (page != NULL && page->Cache() != this)
792		panic("page %p not in cache %p\n", page, this);
793#endif
794
795	return page;
796}
797
798
799void
800VMCache::InsertPage(vm_page* page, off_t offset)
801{
802	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n",
803		this, page, offset));
804	AssertLocked();
805
806	if (page->CacheRef() != NULL) {
807		panic("insert page %p into cache %p: page cache is set to %p\n",
808			page, this, page->Cache());
809	}
810
811	T2(InsertPage(this, page, offset));
812
813	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
814	page_count++;
815	page->SetCacheRef(fCacheRef);
816
817#if KDEBUG
818	vm_page* otherPage = pages.Lookup(page->cache_offset);
819	if (otherPage != NULL) {
820		panic("VMCache::InsertPage(): there's already page %p with cache "
821			"offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p",
822			otherPage, page->cache_offset, this, page);
823	}
824#endif	// KDEBUG
825
826	pages.Insert(page);
827
828	if (page->WiredCount() > 0)
829		IncrementWiredPagesCount();
830}
831
832
833/*!	Removes the vm_page from this cache. Of course, the page must
834	really be in this cache or evil things will happen.
835	The cache lock must be held.
836*/
837void
838VMCache::RemovePage(vm_page* page)
839{
840	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
841	AssertLocked();
842
843	if (page->Cache() != this) {
844		panic("remove page %p from cache %p: page cache is set to %p\n", page,
845			this, page->Cache());
846	}
847
848	T2(RemovePage(this, page));
849
850	pages.Remove(page);
851	page_count--;
852	page->SetCacheRef(NULL);
853
854	if (page->WiredCount() > 0)
855		DecrementWiredPagesCount();
856}
857
858
859/*!	Moves the given page from its current cache inserts it into this cache
860	at the given offset.
861	Both caches must be locked.
862*/
863void
864VMCache::MovePage(vm_page* page, off_t offset)
865{
866	VMCache* oldCache = page->Cache();
867
868	AssertLocked();
869	oldCache->AssertLocked();
870
871	// remove from old cache
872	oldCache->pages.Remove(page);
873	oldCache->page_count--;
874	T2(RemovePage(oldCache, page));
875
876	// change the offset
877	page->cache_offset = offset >> PAGE_SHIFT;
878
879	// insert here
880	pages.Insert(page);
881	page_count++;
882	page->SetCacheRef(fCacheRef);
883
884	if (page->WiredCount() > 0) {
885		IncrementWiredPagesCount();
886		oldCache->DecrementWiredPagesCount();
887	}
888
889	T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
890}
891
892/*!	Moves the given page from its current cache inserts it into this cache.
893	Both caches must be locked.
894*/
895void
896VMCache::MovePage(vm_page* page)
897{
898	MovePage(page, page->cache_offset << PAGE_SHIFT);
899}
900
901
902/*!	Moves all pages from the given cache to this one.
903	Both caches must be locked. This cache must be empty.
904*/
905void
906VMCache::MoveAllPages(VMCache* fromCache)
907{
908	AssertLocked();
909	fromCache->AssertLocked();
910	ASSERT(page_count == 0);
911
912	std::swap(fromCache->pages, pages);
913	page_count = fromCache->page_count;
914	fromCache->page_count = 0;
915	fWiredPagesCount = fromCache->fWiredPagesCount;
916	fromCache->fWiredPagesCount = 0;
917
918	// swap the VMCacheRefs
919	mutex_lock(&sCacheListLock);
920	std::swap(fCacheRef, fromCache->fCacheRef);
921	fCacheRef->cache = this;
922	fromCache->fCacheRef->cache = fromCache;
923	mutex_unlock(&sCacheListLock);
924
925#if VM_CACHE_TRACING >= 2
926	for (VMCachePagesTree::Iterator it = pages.GetIterator();
927			vm_page* page = it.Next();) {
928		T2(RemovePage(fromCache, page));
929		T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
930	}
931#endif
932}
933
934
935/*!	Waits until one or more events happened for a given page which belongs to
936	this cache.
937	The cache must be locked. It will be unlocked by the method. \a relock
938	specifies whether the method shall re-lock the cache before returning.
939	\param page The page for which to wait.
940	\param events The mask of events the caller is interested in.
941	\param relock If \c true, the cache will be locked when returning,
942		otherwise it won't be locked.
943*/
944void
945VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
946{
947	PageEventWaiter waiter;
948	waiter.thread = thread_get_current_thread();
949	waiter.next = fPageEventWaiters;
950	waiter.page = page;
951	waiter.events = events;
952
953	fPageEventWaiters = &waiter;
954
955	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER_OBJECT, page);
956
957	Unlock();
958	thread_block();
959
960	if (relock)
961		Lock();
962}
963
964
965/*!	Makes this case the source of the \a consumer cache,
966	and adds the \a consumer to its list.
967	This also grabs a reference to the source cache.
968	Assumes you have the cache and the consumer's lock held.
969*/
970void
971VMCache::AddConsumer(VMCache* consumer)
972{
973	TRACE(("add consumer vm cache %p to cache %p\n", consumer, this));
974	AssertLocked();
975	consumer->AssertLocked();
976
977	T(AddConsumer(this, consumer));
978
979	consumer->source = this;
980	consumers.Add(consumer);
981
982	AcquireRefLocked();
983	AcquireStoreRef();
984}
985
986
987/*!	Adds the \a area to this cache.
988	Assumes you have the locked the cache.
989*/
990status_t
991VMCache::InsertAreaLocked(VMArea* area)
992{
993	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
994	AssertLocked();
995
996	T(InsertArea(this, area));
997
998	area->cache_next = areas;
999	if (area->cache_next)
1000		area->cache_next->cache_prev = area;
1001	area->cache_prev = NULL;
1002	areas = area;
1003
1004	AcquireStoreRef();
1005
1006	return B_OK;
1007}
1008
1009
1010status_t
1011VMCache::RemoveArea(VMArea* area)
1012{
1013	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
1014
1015	T(RemoveArea(this, area));
1016
1017	// We release the store reference first, since otherwise we would reverse
1018	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
1019	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
1020	// Also cf. _RemoveConsumer().
1021	ReleaseStoreRef();
1022
1023	AutoLocker<VMCache> locker(this);
1024
1025	if (area->cache_prev)
1026		area->cache_prev->cache_next = area->cache_next;
1027	if (area->cache_next)
1028		area->cache_next->cache_prev = area->cache_prev;
1029	if (areas == area)
1030		areas = area->cache_next;
1031
1032	return B_OK;
1033}
1034
1035
1036/*!	Transfers the areas from \a fromCache to this cache. This cache must not
1037	have areas yet. Both caches must be locked.
1038*/
1039void
1040VMCache::TransferAreas(VMCache* fromCache)
1041{
1042	AssertLocked();
1043	fromCache->AssertLocked();
1044	ASSERT(areas == NULL);
1045
1046	areas = fromCache->areas;
1047	fromCache->areas = NULL;
1048
1049	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1050		area->cache = this;
1051		AcquireRefLocked();
1052		fromCache->ReleaseRefLocked();
1053
1054		T(RemoveArea(fromCache, area));
1055		T(InsertArea(this, area));
1056	}
1057}
1058
1059
1060uint32
1061VMCache::CountWritableAreas(VMArea* ignoreArea) const
1062{
1063	uint32 count = 0;
1064
1065	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1066		if (area != ignoreArea
1067			&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
1068			count++;
1069		}
1070	}
1071
1072	return count;
1073}
1074
1075
1076status_t
1077VMCache::WriteModified()
1078{
1079	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
1080
1081	if (temporary)
1082		return B_OK;
1083
1084	Lock();
1085	status_t status = vm_page_write_modified_pages(this);
1086	Unlock();
1087
1088	return status;
1089}
1090
1091
1092/*!	Commits the memory to the store if the \a commitment is larger than
1093	what's committed already.
1094	Assumes you have the cache's lock held.
1095*/
1096status_t
1097VMCache::SetMinimalCommitment(off_t commitment, int priority)
1098{
1099	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
1100		")\n", this, commitment));
1101	AssertLocked();
1102
1103	T(SetMinimalCommitment(this, commitment));
1104
1105	status_t status = B_OK;
1106
1107	// If we don't have enough committed space to cover through to the new end
1108	// of the area...
1109	if (committed_size < commitment) {
1110		// ToDo: should we check if the cache's virtual size is large
1111		//	enough for a commitment of that size?
1112
1113		// try to commit more memory
1114		status = Commit(commitment, priority);
1115	}
1116
1117	return status;
1118}
1119
1120
1121bool
1122VMCache::_FreePageRange(VMCachePagesTree::Iterator it,
1123	page_num_t* toPage = NULL)
1124{
1125	for (vm_page* page = it.Next();
1126		page != NULL && (toPage == NULL || page->cache_offset < *toPage);
1127		page = it.Next()) {
1128
1129		if (page->busy) {
1130			if (page->busy_writing) {
1131				// We cannot wait for the page to become available
1132				// as we might cause a deadlock this way
1133				page->busy_writing = false;
1134					// this will notify the writer to free the page
1135				continue;
1136			}
1137
1138			// wait for page to become unbusy
1139			WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1140			return true;
1141		}
1142
1143		// remove the page and put it into the free queue
1144		DEBUG_PAGE_ACCESS_START(page);
1145		vm_remove_all_page_mappings(page);
1146		ASSERT(page->WiredCount() == 0);
1147			// TODO: Find a real solution! If the page is wired
1148			// temporarily (e.g. by lock_memory()), we actually must not
1149			// unmap it!
1150		RemovePage(page);
1151			// Note: When iterating through a IteratableSplayTree
1152			// removing the current node is safe.
1153
1154		vm_page_free(this, page);
1155	}
1156
1157	return false;
1158}
1159
1160
1161/*!	This function updates the size field of the cache.
1162	If needed, it will free up all pages that don't belong to the cache anymore.
1163	The cache lock must be held when you call it.
1164	Since removed pages don't belong to the cache any longer, they are not
1165	written back before they will be removed.
1166
1167	Note, this function may temporarily release the cache lock in case it
1168	has to wait for busy pages.
1169*/
1170status_t
1171VMCache::Resize(off_t newSize, int priority)
1172{
1173	TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %"
1174		B_PRIdOFF "\n", this, newSize, this->virtual_end));
1175	this->AssertLocked();
1176
1177	T(Resize(this, newSize));
1178
1179	status_t status = Commit(newSize - virtual_base, priority);
1180	if (status != B_OK)
1181		return status;
1182
1183	page_num_t oldPageCount = (page_num_t)((virtual_end + B_PAGE_SIZE - 1)
1184		>> PAGE_SHIFT);
1185	page_num_t newPageCount = (page_num_t)((newSize + B_PAGE_SIZE - 1)
1186		>> PAGE_SHIFT);
1187
1188	if (newPageCount < oldPageCount) {
1189		// we need to remove all pages in the cache outside of the new virtual
1190		// size
1191		while (_FreePageRange(pages.GetIterator(newPageCount, true, true)))
1192			;
1193	}
1194
1195	virtual_end = newSize;
1196	return B_OK;
1197}
1198
1199/*!	This function updates the virtual_base field of the cache.
1200	If needed, it will free up all pages that don't belong to the cache anymore.
1201	The cache lock must be held when you call it.
1202	Since removed pages don't belong to the cache any longer, they are not
1203	written back before they will be removed.
1204
1205	Note, this function may temporarily release the cache lock in case it
1206	has to wait for busy pages.
1207*/
1208status_t
1209VMCache::Rebase(off_t newBase, int priority)
1210{
1211	TRACE(("VMCache::Rebase(cache %p, newBase %lld) old base %lld\n",
1212		this, newBase, this->virtual_base));
1213	this->AssertLocked();
1214
1215	T(Rebase(this, newBase));
1216
1217	status_t status = Commit(virtual_end - newBase, priority);
1218	if (status != B_OK)
1219		return status;
1220
1221	page_num_t basePage = (page_num_t)(newBase >> PAGE_SHIFT);
1222
1223	if (newBase > virtual_base) {
1224		// we need to remove all pages in the cache outside of the new virtual
1225		// base
1226		while (_FreePageRange(pages.GetIterator(), &basePage))
1227			;
1228	}
1229
1230	virtual_base = newBase;
1231	return B_OK;
1232}
1233
1234
1235/*!	Moves pages in the given range from the source cache into this cache. Both
1236	caches must be locked.
1237*/
1238status_t
1239VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset)
1240{
1241	page_num_t startPage = offset >> PAGE_SHIFT;
1242	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1243	off_t offsetChange = newOffset - offset;
1244
1245	VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
1246		true);
1247	for (vm_page* page = it.Next();
1248				page != NULL && page->cache_offset < endPage;
1249				page = it.Next()) {
1250		MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
1251	}
1252
1253	return B_OK;
1254}
1255
1256
1257/*! Discards pages in the given range. */
1258status_t
1259VMCache::Discard(off_t offset, off_t size)
1260{
1261	page_num_t startPage = offset >> PAGE_SHIFT;
1262	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1263	while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage))
1264		;
1265
1266	return B_OK;
1267}
1268
1269
1270/*!	You have to call this function with the VMCache lock held. */
1271status_t
1272VMCache::FlushAndRemoveAllPages()
1273{
1274	ASSERT_LOCKED_MUTEX(&fLock);
1275
1276	while (page_count > 0) {
1277		// write back modified pages
1278		status_t status = vm_page_write_modified_pages(this);
1279		if (status != B_OK)
1280			return status;
1281
1282		// remove pages
1283		for (VMCachePagesTree::Iterator it = pages.GetIterator();
1284				vm_page* page = it.Next();) {
1285			if (page->busy) {
1286				// wait for page to become unbusy
1287				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1288
1289				// restart from the start of the list
1290				it = pages.GetIterator();
1291				continue;
1292			}
1293
1294			// skip modified pages -- they will be written back in the next
1295			// iteration
1296			if (page->State() == PAGE_STATE_MODIFIED)
1297				continue;
1298
1299			// We can't remove mapped pages.
1300			if (page->IsMapped())
1301				return B_BUSY;
1302
1303			DEBUG_PAGE_ACCESS_START(page);
1304			RemovePage(page);
1305			vm_page_free(this, page);
1306				// Note: When iterating through a IteratableSplayTree
1307				// removing the current node is safe.
1308		}
1309	}
1310
1311	return B_OK;
1312}
1313
1314
1315status_t
1316VMCache::Commit(off_t size, int priority)
1317{
1318	committed_size = size;
1319	return B_OK;
1320}
1321
1322
1323/*!	Returns whether the cache's underlying backing store could deliver the
1324	page at the given offset.
1325
1326	Basically it returns whether a Read() at \a offset would at least read a
1327	partial page (assuming that no unexpected errors occur or the situation
1328	changes in the meantime).
1329*/
1330bool
1331VMCache::HasPage(off_t offset)
1332{
1333	// In accordance with Fault() the default implementation doesn't have a
1334	// backing store and doesn't allow faults.
1335	return false;
1336}
1337
1338
1339status_t
1340VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count,
1341	uint32 flags, generic_size_t *_numBytes)
1342{
1343	return B_ERROR;
1344}
1345
1346
1347status_t
1348VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count,
1349	uint32 flags, generic_size_t *_numBytes)
1350{
1351	return B_ERROR;
1352}
1353
1354
1355status_t
1356VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count,
1357	generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1358{
1359	// Not supported, fall back to the synchronous hook.
1360	generic_size_t transferred = numBytes;
1361	status_t error = Write(offset, vecs, count, flags, &transferred);
1362
1363	if (callback != NULL)
1364		callback->IOFinished(error, transferred != numBytes, transferred);
1365
1366	return error;
1367}
1368
1369
1370/*!	\brief Returns whether the cache can write the page at the given offset.
1371
1372	The cache must be locked when this function is invoked.
1373
1374	@param offset The page offset.
1375	@return \c true, if the page can be written, \c false otherwise.
1376*/
1377bool
1378VMCache::CanWritePage(off_t offset)
1379{
1380	return false;
1381}
1382
1383
1384status_t
1385VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1386{
1387	return B_BAD_ADDRESS;
1388}
1389
1390
1391void
1392VMCache::Merge(VMCache* source)
1393{
1394	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1395			vm_page* page = it.Next();) {
1396		// Note: Removing the current node while iterating through a
1397		// IteratableSplayTree is safe.
1398		vm_page* consumerPage = LookupPage(
1399			(off_t)page->cache_offset << PAGE_SHIFT);
1400		if (consumerPage == NULL) {
1401			// the page is not yet in the consumer cache - move it upwards
1402			MovePage(page);
1403		}
1404	}
1405}
1406
1407
1408status_t
1409VMCache::AcquireUnreferencedStoreRef()
1410{
1411	return B_OK;
1412}
1413
1414
1415void
1416VMCache::AcquireStoreRef()
1417{
1418}
1419
1420
1421void
1422VMCache::ReleaseStoreRef()
1423{
1424}
1425
1426
1427/*!	Kernel debugger version of HasPage().
1428	Does not do any locking.
1429*/
1430bool
1431VMCache::DebugHasPage(off_t offset)
1432{
1433	// default that works for all subclasses that don't lock anyway
1434	return HasPage(offset);
1435}
1436
1437
1438/*!	Kernel debugger version of LookupPage().
1439	Does not do any locking.
1440*/
1441vm_page*
1442VMCache::DebugLookupPage(off_t offset)
1443{
1444	return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
1445}
1446
1447
1448void
1449VMCache::Dump(bool showPages) const
1450{
1451	kprintf("CACHE %p:\n", this);
1452	kprintf("  ref_count:    %" B_PRId32 "\n", RefCount());
1453	kprintf("  source:       %p\n", source);
1454	kprintf("  type:         %s\n", vm_cache_type_to_string(type));
1455	kprintf("  virtual_base: 0x%" B_PRIx64 "\n", virtual_base);
1456	kprintf("  virtual_end:  0x%" B_PRIx64 "\n", virtual_end);
1457	kprintf("  temporary:    %" B_PRIu32 "\n", uint32(temporary));
1458	kprintf("  lock:         %p\n", &fLock);
1459#if KDEBUG
1460	kprintf("  lock.holder:  %" B_PRId32 "\n", fLock.holder);
1461#endif
1462	kprintf("  areas:\n");
1463
1464	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1465		kprintf("    area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
1466		kprintf("\tbase_addr:  0x%lx, size: 0x%lx\n", area->Base(),
1467			area->Size());
1468		kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection);
1469		kprintf("\towner:      0x%" B_PRIx32 "\n", area->address_space->ID());
1470	}
1471
1472	kprintf("  consumers:\n");
1473	for (ConsumerList::ConstIterator it = consumers.GetIterator();
1474		 	VMCache* consumer = it.Next();) {
1475		kprintf("\t%p\n", consumer);
1476	}
1477
1478	kprintf("  pages:\n");
1479	if (showPages) {
1480		for (VMCachePagesTree::ConstIterator it = pages.GetIterator();
1481				vm_page* page = it.Next();) {
1482			if (!vm_page_is_dummy(page)) {
1483				kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR
1484					" state %u (%s) wired_count %u\n", page,
1485					page->physical_page_number, page->cache_offset,
1486					page->State(), page_state_to_string(page->State()),
1487					page->WiredCount());
1488			} else {
1489				kprintf("\t%p DUMMY PAGE state %u (%s)\n",
1490					page, page->State(), page_state_to_string(page->State()));
1491			}
1492		}
1493	} else
1494		kprintf("\t%" B_PRIu32 " in cache\n", page_count);
1495}
1496
1497
1498/*!	Wakes up threads waiting for page events.
1499	\param page The page for which events occurred.
1500	\param events The mask of events that occurred.
1501*/
1502void
1503VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1504{
1505	PageEventWaiter** it = &fPageEventWaiters;
1506	while (PageEventWaiter* waiter = *it) {
1507		if (waiter->page == page && (waiter->events & events) != 0) {
1508			// remove from list and unblock
1509			*it = waiter->next;
1510			thread_unblock(waiter->thread, B_OK);
1511		} else
1512			it = &waiter->next;
1513	}
1514}
1515
1516
1517/*!	Merges the given cache with its only consumer.
1518	The caller must hold both the cache's and the consumer's lock. The method
1519	does release neither lock.
1520*/
1521void
1522VMCache::_MergeWithOnlyConsumer()
1523{
1524	VMCache* consumer = consumers.RemoveHead();
1525
1526	TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n",
1527		this, this->fRefCount, consumer));
1528
1529	T(Merge(this, consumer));
1530
1531	// merge the cache
1532	consumer->Merge(this);
1533
1534	// The remaining consumer has got a new source.
1535	if (source != NULL) {
1536		VMCache* newSource = source;
1537
1538		newSource->Lock();
1539
1540		newSource->consumers.Remove(this);
1541		newSource->consumers.Add(consumer);
1542		consumer->source = newSource;
1543		source = NULL;
1544
1545		newSource->Unlock();
1546	} else
1547		consumer->source = NULL;
1548
1549	// Release the reference the cache's consumer owned. The consumer takes
1550	// over the cache's ref to its source (if any) instead.
1551	ReleaseRefLocked();
1552}
1553
1554
1555/*!	Removes the \a consumer from this cache.
1556	It will also release the reference to the cache owned by the consumer.
1557	Assumes you have the consumer's cache lock held. This cache must not be
1558	locked.
1559*/
1560void
1561VMCache::_RemoveConsumer(VMCache* consumer)
1562{
1563	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1564	consumer->AssertLocked();
1565
1566	T(RemoveConsumer(this, consumer));
1567
1568	// Remove the store ref before locking the cache. Otherwise we'd call into
1569	// the VFS while holding the cache lock, which would reverse the usual
1570	// locking order.
1571	ReleaseStoreRef();
1572
1573	// remove the consumer from the cache, but keep its reference until later
1574	Lock();
1575	consumers.Remove(consumer);
1576	consumer->source = NULL;
1577
1578	ReleaseRefAndUnlock();
1579}
1580
1581
1582// #pragma mark - VMCacheFactory
1583	// TODO: Move to own source file!
1584
1585
1586/*static*/ status_t
1587VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1588	int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
1589	int priority)
1590{
1591	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1592		| HEAP_DONT_LOCK_KERNEL_SPACE;
1593	if (priority >= VM_PRIORITY_VIP)
1594		allocationFlags |= HEAP_PRIORITY_VIP;
1595
1596#if ENABLE_SWAP_SUPPORT
1597	if (swappable) {
1598		VMAnonymousCache* cache
1599			= new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
1600		if (cache == NULL)
1601			return B_NO_MEMORY;
1602
1603		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1604			numGuardPages, allocationFlags);
1605		if (error != B_OK) {
1606			cache->Delete();
1607			return error;
1608		}
1609
1610		T(Create(cache));
1611
1612		_cache = cache;
1613		return B_OK;
1614	}
1615#endif
1616
1617	VMAnonymousNoSwapCache* cache
1618		= new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
1619			VMAnonymousNoSwapCache;
1620	if (cache == NULL)
1621		return B_NO_MEMORY;
1622
1623	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1624		numGuardPages, allocationFlags);
1625	if (error != B_OK) {
1626		cache->Delete();
1627		return error;
1628	}
1629
1630	T(Create(cache));
1631
1632	_cache = cache;
1633	return B_OK;
1634}
1635
1636
1637/*static*/ status_t
1638VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1639{
1640	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1641		| HEAP_DONT_LOCK_KERNEL_SPACE;
1642		// Note: Vnode cache creation is never VIP.
1643
1644	VMVnodeCache* cache
1645		= new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
1646	if (cache == NULL)
1647		return B_NO_MEMORY;
1648
1649	status_t error = cache->Init(vnode, allocationFlags);
1650	if (error != B_OK) {
1651		cache->Delete();
1652		return error;
1653	}
1654
1655	T(Create(cache));
1656
1657	_cache = cache;
1658	return B_OK;
1659}
1660
1661
1662/*static*/ status_t
1663VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1664{
1665	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1666		| HEAP_DONT_LOCK_KERNEL_SPACE;
1667		// Note: Device cache creation is never VIP.
1668
1669	VMDeviceCache* cache
1670		= new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
1671	if (cache == NULL)
1672		return B_NO_MEMORY;
1673
1674	status_t error = cache->Init(baseAddress, allocationFlags);
1675	if (error != B_OK) {
1676		cache->Delete();
1677		return error;
1678	}
1679
1680	T(Create(cache));
1681
1682	_cache = cache;
1683	return B_OK;
1684}
1685
1686
1687/*static*/ status_t
1688VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
1689{
1690	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1691		| HEAP_DONT_LOCK_KERNEL_SPACE;
1692	if (priority >= VM_PRIORITY_VIP)
1693		allocationFlags |= HEAP_PRIORITY_VIP;
1694
1695	VMNullCache* cache
1696		= new(gNullCacheObjectCache, allocationFlags) VMNullCache;
1697	if (cache == NULL)
1698		return B_NO_MEMORY;
1699
1700	status_t error = cache->Init(allocationFlags);
1701	if (error != B_OK) {
1702		cache->Delete();
1703		return error;
1704	}
1705
1706	T(Create(cache));
1707
1708	_cache = cache;
1709	return B_OK;
1710}
1711