vm_object.h revision 315563
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_object.h	8.3 (Berkeley) 1/12/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 *
60 * $FreeBSD: stable/11/sys/vm/vm_object.h 315563 2017-03-19 16:01:44Z kib $
61 */
62
63/*
64 *	Virtual memory object module definitions.
65 */
66
67#ifndef	_VM_OBJECT_
68#define	_VM_OBJECT_
69
70#include <sys/queue.h>
71#include <sys/_lock.h>
72#include <sys/_mutex.h>
73#include <sys/_rwlock.h>
74
75#include <vm/_vm_radix.h>
76
77/*
78 *	Types defined:
79 *
80 *	vm_object_t		Virtual memory object.
81 *
82 *	The root of cached pages pool is protected by both the per-object lock
83 *	and the free pages queue mutex.
84 *	On insert in the cache radix trie, the per-object lock is expected
85 *	to be already held and the free pages queue mutex will be
86 *	acquired during the operation too.
87 *	On remove and lookup from the cache radix trie, only the free
88 *	pages queue mutex is expected to be locked.
89 *	These rules allow for reliably checking for the presence of cached
90 *	pages with only the per-object lock held, thereby reducing contention
91 *	for the free pages queue mutex.
92 *
93 * List of locks
94 *	(c)	const until freed
95 *	(o)	per-object lock
96 *	(f)	free pages queue mutex
97 *
98 */
99
100struct vm_object {
101	struct rwlock lock;
102	TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
103	LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
104	LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
105	TAILQ_HEAD(respgs, vm_page) memq; /* list of resident pages */
106	struct vm_radix rtree;		/* root of the resident page radix trie*/
107	vm_pindex_t size;		/* Object size */
108	int generation;			/* generation ID */
109	int ref_count;			/* How many refs?? */
110	int shadow_count;		/* how many objects that this is a shadow for */
111	vm_memattr_t memattr;		/* default memory attribute for pages */
112	objtype_t type;			/* type of pager */
113	u_short flags;			/* see below */
114	u_short pg_color;		/* (c) color of first page in obj */
115	u_int paging_in_progress;	/* Paging (in or out) so don't collapse or destroy */
116	int resident_page_count;	/* number of resident pages */
117	struct vm_object *backing_object; /* object that I'm a shadow of */
118	vm_ooffset_t backing_object_offset;/* Offset in backing object */
119	TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */
120	LIST_HEAD(, vm_reserv) rvq;	/* list of reservations */
121	struct vm_radix cache;		/* (o + f) root of the cache page radix trie */
122	void *handle;
123	union {
124		/*
125		 * VNode pager
126		 *
127		 *	vnp_size - current size of file
128		 */
129		struct {
130			off_t vnp_size;
131			vm_ooffset_t writemappings;
132		} vnp;
133
134		/*
135		 * Device pager
136		 *
137		 *	devp_pglist - list of allocated pages
138		 */
139		struct {
140			TAILQ_HEAD(, vm_page) devp_pglist;
141			struct cdev_pager_ops *ops;
142			struct cdev *dev;
143		} devp;
144
145		/*
146		 * SG pager
147		 *
148		 *	sgp_pglist - list of allocated pages
149		 */
150		struct {
151			TAILQ_HEAD(, vm_page) sgp_pglist;
152		} sgp;
153
154		/*
155		 * Swap pager
156		 *
157		 *	swp_tmpfs - back-pointer to the tmpfs vnode,
158		 *		     if any, which uses the vm object
159		 *		     as backing store.  The handle
160		 *		     cannot be reused for linking,
161		 *		     because the vnode can be
162		 *		     reclaimed and recreated, making
163		 *		     the handle changed and hash-chain
164		 *		     invalid.
165		 *
166		 *	swp_bcount - number of swap 'swblock' metablocks, each
167		 *		     contains up to 16 swapblk assignments.
168		 *		     see vm/swap_pager.h
169		 */
170		struct {
171			void *swp_tmpfs;
172			int swp_bcount;
173		} swp;
174	} un_pager;
175	struct ucred *cred;
176	vm_ooffset_t charge;
177	void *umtx_data;
178};
179
180/*
181 * Flags
182 */
183#define	OBJ_FICTITIOUS	0x0001		/* (c) contains fictitious pages */
184#define	OBJ_UNMANAGED	0x0002		/* (c) contains unmanaged pages */
185#define	OBJ_POPULATE	0x0004		/* pager implements populate() */
186#define OBJ_DEAD	0x0008		/* dead objects (during rundown) */
187#define	OBJ_NOSPLIT	0x0010		/* dont split this object */
188#define	OBJ_UMTXDEAD	0x0020		/* umtx pshared was terminated */
189#define OBJ_PIPWNT	0x0040		/* paging in progress wanted */
190#define OBJ_MIGHTBEDIRTY 0x0100		/* object might be dirty, only for vnode */
191#define	OBJ_TMPFS_NODE	0x0200		/* object belongs to tmpfs VREG node */
192#define	OBJ_TMPFS_DIRTY	0x0400		/* dirty tmpfs obj */
193#define	OBJ_COLORED	0x1000		/* pg_color is defined */
194#define	OBJ_ONEMAPPING	0x2000		/* One USE (a single, non-forked) mapping flag */
195#define	OBJ_DISCONNECTWNT 0x4000	/* disconnect from vnode wanted */
196#define	OBJ_TMPFS	0x8000		/* has tmpfs vnode allocated */
197
198/*
199 * Helpers to perform conversion between vm_object page indexes and offsets.
200 * IDX_TO_OFF() converts an index into an offset.
201 * OFF_TO_IDX() converts an offset into an index.  Since offsets are signed
202 *   by default, the sign propagation in OFF_TO_IDX(), when applied to
203 *   negative offsets, is intentional and returns a vm_object page index
204 *   that cannot be created by a userspace mapping.
205 * UOFF_TO_IDX() treats the offset as an unsigned value and converts it
206 *   into an index accordingly.  Use it only when the full range of offset
207 *   values are allowed.  Currently, this only applies to device mappings.
208 * OBJ_MAX_SIZE specifies the maximum page index corresponding to the
209 *   maximum unsigned offset.
210 */
211#define	IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
212#define	OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
213#define	UOFF_TO_IDX(off) (((vm_pindex_t)(off)) >> PAGE_SHIFT)
214#define	OBJ_MAX_SIZE	(UOFF_TO_IDX(UINT64_MAX) + 1)
215
216#ifdef	_KERNEL
217
218#define OBJPC_SYNC	0x1			/* sync I/O */
219#define OBJPC_INVAL	0x2			/* invalidate */
220#define OBJPC_NOSYNC	0x4			/* skip if VPO_NOSYNC */
221
222/*
223 * The following options are supported by vm_object_page_remove().
224 */
225#define	OBJPR_CLEANONLY	0x1		/* Don't remove dirty pages. */
226#define	OBJPR_NOTMAPPED	0x2		/* Don't unmap pages. */
227
228TAILQ_HEAD(object_q, vm_object);
229
230extern struct object_q vm_object_list;	/* list of allocated objects */
231extern struct mtx vm_object_list_mtx;	/* lock for object list and count */
232
233extern struct vm_object kernel_object_store;
234extern struct vm_object kmem_object_store;
235
236#define	kernel_object	(&kernel_object_store)
237#define	kmem_object	(&kmem_object_store)
238
239#define	VM_OBJECT_ASSERT_LOCKED(object)					\
240	rw_assert(&(object)->lock, RA_LOCKED)
241#define	VM_OBJECT_ASSERT_RLOCKED(object)				\
242	rw_assert(&(object)->lock, RA_RLOCKED)
243#define	VM_OBJECT_ASSERT_WLOCKED(object)				\
244	rw_assert(&(object)->lock, RA_WLOCKED)
245#define	VM_OBJECT_ASSERT_UNLOCKED(object)				\
246	rw_assert(&(object)->lock, RA_UNLOCKED)
247#define	VM_OBJECT_LOCK_DOWNGRADE(object)				\
248	rw_downgrade(&(object)->lock)
249#define	VM_OBJECT_RLOCK(object)						\
250	rw_rlock(&(object)->lock)
251#define	VM_OBJECT_RUNLOCK(object)					\
252	rw_runlock(&(object)->lock)
253#define	VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo)		\
254	rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo))
255#define	VM_OBJECT_TRYRLOCK(object)					\
256	rw_try_rlock(&(object)->lock)
257#define	VM_OBJECT_TRYWLOCK(object)					\
258	rw_try_wlock(&(object)->lock)
259#define	VM_OBJECT_TRYUPGRADE(object)					\
260	rw_try_upgrade(&(object)->lock)
261#define	VM_OBJECT_WLOCK(object)						\
262	rw_wlock(&(object)->lock)
263#define	VM_OBJECT_WOWNED(object)					\
264	rw_wowned(&(object)->lock)
265#define	VM_OBJECT_WUNLOCK(object)					\
266	rw_wunlock(&(object)->lock)
267
268/*
269 *	The object must be locked or thread private.
270 */
271static __inline void
272vm_object_set_flag(vm_object_t object, u_short bits)
273{
274
275	object->flags |= bits;
276}
277
278/*
279 *	Conditionally set the object's color, which (1) enables the allocation
280 *	of physical memory reservations for anonymous objects and larger-than-
281 *	superpage-sized named objects and (2) determines the first page offset
282 *	within the object at which a reservation may be allocated.  In other
283 *	words, the color determines the alignment of the object with respect
284 *	to the largest superpage boundary.  When mapping named objects, like
285 *	files or POSIX shared memory objects, the color should be set to zero
286 *	before a virtual address is selected for the mapping.  In contrast,
287 *	for anonymous objects, the color may be set after the virtual address
288 *	is selected.
289 *
290 *	The object must be locked.
291 */
292static __inline void
293vm_object_color(vm_object_t object, u_short color)
294{
295
296	if ((object->flags & OBJ_COLORED) == 0) {
297		object->pg_color = color;
298		object->flags |= OBJ_COLORED;
299	}
300}
301
302void vm_object_clear_flag(vm_object_t object, u_short bits);
303void vm_object_pip_add(vm_object_t object, short i);
304void vm_object_pip_subtract(vm_object_t object, short i);
305void vm_object_pip_wakeup(vm_object_t object);
306void vm_object_pip_wakeupn(vm_object_t object, short i);
307void vm_object_pip_wait(vm_object_t object, char *waitid);
308
309static __inline boolean_t
310vm_object_cache_is_empty(vm_object_t object)
311{
312
313	return (vm_radix_is_empty(&object->cache));
314}
315
316void umtx_shm_object_init(vm_object_t object);
317void umtx_shm_object_terminated(vm_object_t object);
318extern int umtx_shm_vnobj_persistent;
319
320vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
321boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
322   boolean_t);
323void vm_object_collapse (vm_object_t);
324void vm_object_deallocate (vm_object_t);
325void vm_object_destroy (vm_object_t);
326void vm_object_terminate (vm_object_t);
327void vm_object_set_writeable_dirty (vm_object_t);
328void vm_object_init (void);
329void vm_object_madvise(vm_object_t, vm_pindex_t, vm_pindex_t, int);
330boolean_t vm_object_page_clean(vm_object_t object, vm_ooffset_t start,
331    vm_ooffset_t end, int flags);
332void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start,
333    vm_pindex_t end);
334void vm_object_page_remove(vm_object_t object, vm_pindex_t start,
335    vm_pindex_t end, int options);
336boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t);
337void vm_object_print(long addr, boolean_t have_addr, long count, char *modif);
338void vm_object_reference (vm_object_t);
339void vm_object_reference_locked(vm_object_t);
340int  vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr);
341void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
342void vm_object_split(vm_map_entry_t);
343boolean_t vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,
344    boolean_t);
345void vm_object_unwire(vm_object_t object, vm_ooffset_t offset,
346    vm_size_t length, uint8_t queue);
347struct vnode *vm_object_vnode(vm_object_t object);
348#endif				/* _KERNEL */
349
350#endif				/* _VM_OBJECT_ */
351