1/*
2 * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2009, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9#ifndef _KERNEL_VM_VM_TYPES_H
10#define _KERNEL_VM_VM_TYPES_H
11
12
13#include <new>
14
15#include <AllocationTracking.h>
16#include <arch/vm_types.h>
17#include <condition_variable.h>
18#include <kernel.h>
19#include <lock.h>
20#include <util/DoublyLinkedList.h>
21#include <util/DoublyLinkedQueue.h>
22#include <util/SplayTree.h>
23
24#include <sys/uio.h>
25
26#include "kernel_debug_config.h"
27
28
29#define VM_PAGE_ALLOCATION_TRACKING_AVAILABLE \
30	(VM_PAGE_ALLOCATION_TRACKING && PAGE_ALLOCATION_TRACING != 0 \
31		&& PAGE_ALLOCATION_TRACING_STACK_TRACE > 0)
32
33
34class AsyncIOCallback;
35struct vm_page_mapping;
36struct VMCache;
37struct VMCacheRef;
38typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link;
39
40
41struct virtual_address_restrictions {
42	void*	address;
43				// base or exact address, depending on address_specification
44	uint32	address_specification;
45				// address specification as passed to create_area()
46	size_t	alignment;
47				// address alignment; overridden when
48				// address_specification == B_ANY_KERNEL_BLOCK_ADDRESS
49};
50
51struct physical_address_restrictions {
52	phys_addr_t	low_address;
53					// lowest acceptable address
54	phys_addr_t	high_address;
55					// lowest no longer acceptable address; for ranges: the
56					// highest acceptable non-inclusive end address
57	phys_size_t	alignment;
58					// address alignment
59	phys_size_t	boundary;
60					// multiples of which may not be crossed by the address
61					// range
62};
63
64
65typedef struct vm_page_mapping {
66	vm_page_mapping_link page_link;
67	vm_page_mapping_link area_link;
68	struct vm_page *page;
69	struct VMArea *area;
70} vm_page_mapping;
71
72class DoublyLinkedPageLink {
73	public:
74		inline vm_page_mapping_link *operator()(vm_page_mapping *element) const
75		{
76			return &element->page_link;
77		}
78
79		inline const vm_page_mapping_link *operator()(
80			const vm_page_mapping *element) const
81		{
82			return &element->page_link;
83		}
84};
85
86class DoublyLinkedAreaLink {
87	public:
88		inline vm_page_mapping_link *operator()(vm_page_mapping *element) const
89		{
90			return &element->area_link;
91		}
92
93		inline const vm_page_mapping_link *operator()(
94			const vm_page_mapping *element) const
95		{
96			return &element->area_link;
97		}
98};
99
100typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedPageLink>
101	vm_page_mappings;
102typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedAreaLink>
103	VMAreaMappings;
104
105typedef phys_addr_t page_num_t;
106
107
108struct VMCacheRef {
109			VMCache*			cache;
110			int32				ref_count;
111
112								VMCacheRef(VMCache* cache);
113};
114
115
116struct vm_page {
117	DoublyLinkedListLink<vm_page> queue_link;
118
119	page_num_t				physical_page_number;
120
121private:
122	VMCacheRef*				cache_ref;
123public:
124	page_num_t				cache_offset;
125								// in page size units
126								// TODO: Only 32 bit on 32 bit platforms!
127								// Introduce a new 64 bit type page_off_t!
128
129	SplayTreeLink<vm_page>	cache_link;
130	vm_page*				cache_next;
131
132	vm_page_mappings		mappings;
133
134#if DEBUG_PAGE_QUEUE
135	void*					queue;
136#endif
137
138#if DEBUG_PAGE_ACCESS
139	int32					accessing_thread;
140#endif
141
142#if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
143	AllocationTrackingInfo	allocation_tracking_info;
144#endif
145
146private:
147	uint8					state : 3;
148public:
149	bool					busy : 1;
150	bool					busy_writing : 1;
151		// used in VMAnonymousCache::Merge()
152	bool					accessed : 1;
153	bool					modified : 1;
154	uint8					unused : 1;
155
156	uint8					usage_count;
157
158	inline void Init(page_num_t pageNumber);
159
160	VMCacheRef* CacheRef() const			{ return cache_ref; }
161	void SetCacheRef(VMCacheRef* cacheRef)	{ this->cache_ref = cacheRef; }
162
163	VMCache* Cache() const
164		{ return cache_ref != NULL ? cache_ref->cache : NULL; }
165
166	bool IsMapped() const
167		{ return fWiredCount > 0 || !mappings.IsEmpty(); }
168
169	uint8 State() const				{ return state; }
170	void InitState(uint8 newState);
171	void SetState(uint8 newState);
172
173	inline uint16 WiredCount() const	{ return fWiredCount; }
174	inline void IncrementWiredCount();
175	inline void DecrementWiredCount();
176		// both implemented in VMCache.h to avoid inclusion here
177
178private:
179	uint16					fWiredCount;
180};
181
182
183enum {
184	PAGE_STATE_ACTIVE = 0,
185	PAGE_STATE_INACTIVE,
186	PAGE_STATE_MODIFIED,
187	PAGE_STATE_CACHED,
188	PAGE_STATE_FREE,
189	PAGE_STATE_CLEAR,
190	PAGE_STATE_WIRED,
191	PAGE_STATE_UNUSED,
192
193	PAGE_STATE_COUNT,
194
195	PAGE_STATE_FIRST_UNQUEUED = PAGE_STATE_WIRED
196};
197
198
199#define VM_PAGE_ALLOC_STATE	0x00000007
200#define VM_PAGE_ALLOC_CLEAR	0x00000010
201#define VM_PAGE_ALLOC_BUSY	0x00000020
202
203
204inline void
205vm_page::Init(page_num_t pageNumber)
206{
207	physical_page_number = pageNumber;
208	InitState(PAGE_STATE_FREE);
209	new(&mappings) vm_page_mappings();
210	fWiredCount = 0;
211	usage_count = 0;
212	busy_writing = false;
213	SetCacheRef(NULL);
214	#if DEBUG_PAGE_QUEUE
215		queue = NULL;
216	#endif
217	#if DEBUG_PAGE_ACCESS
218		accessing_thread = -1;
219	#endif
220}
221
222
223#if DEBUG_PAGE_ACCESS
224#	include <thread.h>
225
226static inline void
227vm_page_debug_access_start(vm_page* page)
228{
229	thread_id threadID = thread_get_current_thread_id();
230	thread_id previousThread = atomic_test_and_set(&page->accessing_thread,
231		threadID, -1);
232	if (previousThread != -1) {
233		panic("Invalid concurrent access to page 0x%" B_PRIXPHYSADDR " (start), currently "
234			"accessed by: %" B_PRId32 "@! page -m %p; sc %" B_PRId32 "; cache _cache",
235			page->physical_page_number * B_PAGE_SIZE, previousThread, page, previousThread);
236	}
237}
238
239
240static inline void
241vm_page_debug_access_end(vm_page* page)
242{
243	thread_id threadID = thread_get_current_thread_id();
244	thread_id previousThread = atomic_test_and_set(&page->accessing_thread, -1,
245		threadID);
246	if (previousThread != threadID) {
247		panic("Invalid concurrent access to page 0x%" B_PRIXPHYSADDR " (end) by "
248			"current thread, current accessor is: %" B_PRId32 "@! page -m %p; "
249			"sc %" B_PRId32 "; cache _cache", page->physical_page_number * B_PAGE_SIZE,
250			previousThread, page, previousThread);
251	}
252}
253
254
255static inline void
256vm_page_debug_access_check(vm_page* page)
257{
258	thread_id thread = page->accessing_thread;
259	if (thread != thread_get_current_thread_id()) {
260		panic("Invalid concurrent access to page 0x%" B_PRIXPHYSADDR " (check), currently "
261			"accessed by: %" B_PRId32 "@! page -m %p; sc %" B_PRId32 "; cache _cache",
262			page->physical_page_number * B_PAGE_SIZE, thread, page, thread);
263	}
264}
265
266
267static inline void
268vm_page_debug_access_transfer(vm_page* page, thread_id expectedPreviousThread)
269{
270	thread_id threadID = thread_get_current_thread_id();
271	thread_id previousThread = atomic_test_and_set(&page->accessing_thread,
272		threadID, expectedPreviousThread);
273	if (previousThread != expectedPreviousThread) {
274		panic("Invalid access transfer for page %p, currently accessed by: "
275			"%" B_PRId32 ", expected: %" B_PRId32, page, previousThread,
276			expectedPreviousThread);
277	}
278}
279
280#	define DEBUG_PAGE_ACCESS_START(page)	vm_page_debug_access_start(page)
281#	define DEBUG_PAGE_ACCESS_END(page)		vm_page_debug_access_end(page)
282#	define DEBUG_PAGE_ACCESS_CHECK(page)	vm_page_debug_access_check(page)
283#	define DEBUG_PAGE_ACCESS_TRANSFER(page, thread)	\
284		vm_page_debug_access_transfer(page, thread)
285#else
286#	define DEBUG_PAGE_ACCESS_START(page)			do {} while (false)
287#	define DEBUG_PAGE_ACCESS_END(page)				do {} while (false)
288#	define DEBUG_PAGE_ACCESS_CHECK(page)			do {} while (false)
289#	define DEBUG_PAGE_ACCESS_TRANSFER(page, thread)	do {} while (false)
290#endif
291
292
293#endif	// _KERNEL_VM_VM_TYPES_H
294