phys_pager.c revision 61081
1/*
2 * Copyright (c) 2000 Peter Wemm
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/vm/phys_pager.c 61081 2000-05-29 22:40:54Z dillon $
26 */
27
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/linker_set.h>
31#include <sys/conf.h>
32#include <sys/mman.h>
33#include <sys/sysctl.h>
34
35#include <vm/vm.h>
36#include <vm/vm_object.h>
37#include <vm/vm_page.h>
38#include <vm/vm_pager.h>
39#include <vm/vm_zone.h>
40
41static void phys_pager_init __P((void));
42static vm_object_t phys_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t,
43		vm_ooffset_t));
44static void phys_pager_dealloc __P((vm_object_t));
45static int phys_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
46static void phys_pager_putpages __P((vm_object_t, vm_page_t *, int,
47		boolean_t, int *));
48static boolean_t phys_pager_haspage __P((vm_object_t, vm_pindex_t, int *,
49		int *));
50
51/* list of device pager objects */
52static struct pagerlst phys_pager_object_list;
53
54static int phys_pager_alloc_lock, phys_pager_alloc_lock_want;
55
56struct pagerops physpagerops = {
57	phys_pager_init,
58	phys_pager_alloc,
59	phys_pager_dealloc,
60	phys_pager_getpages,
61	phys_pager_putpages,
62	phys_pager_haspage,
63	NULL
64};
65
66static void
67phys_pager_init()
68{
69	TAILQ_INIT(&phys_pager_object_list);
70}
71
72static vm_object_t
73phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
74		 vm_ooffset_t foff)
75{
76	vm_object_t object;
77
78	/*
79	 * Offset should be page aligned.
80	 */
81	if (foff & PAGE_MASK)
82		return (NULL);
83
84	size = round_page(size);
85
86	/*
87	 * Lock to prevent object creation race condition.
88	 */
89	while (phys_pager_alloc_lock) {
90		phys_pager_alloc_lock_want++;
91		tsleep(&phys_pager_alloc_lock, PVM, "ppall", 0);
92		phys_pager_alloc_lock_want--;
93	}
94	phys_pager_alloc_lock = 1;
95
96	/*
97	 * Look up pager, creating as necessary.
98	 */
99	object = vm_pager_object_lookup(&phys_pager_object_list, handle);
100	if (object == NULL) {
101		/*
102		 * Allocate object and associate it with the pager.
103		 */
104		object = vm_object_allocate(OBJT_PHYS,
105			OFF_TO_IDX(foff + size));
106		object->handle = handle;
107#if 0
108		TAILQ_INIT(&object->un_pager.physp.physp_pglist);
109#endif
110		TAILQ_INSERT_TAIL(&phys_pager_object_list, object,
111		    pager_object_list);
112	} else {
113		/*
114		 * Gain a reference to the object.
115		 */
116		vm_object_reference(object);
117		if (OFF_TO_IDX(foff + size) > object->size)
118			object->size = OFF_TO_IDX(foff + size);
119	}
120
121	phys_pager_alloc_lock = 0;
122	if (phys_pager_alloc_lock_want)
123		wakeup(&phys_pager_alloc_lock);
124
125	return (object);
126}
127
128static void
129phys_pager_dealloc(object)
130	vm_object_t object;
131{
132	vm_page_t m;
133	int s;
134
135	TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
136}
137
138static int
139phys_pager_getpages(object, m, count, reqpage)
140	vm_object_t object;
141	vm_page_t *m;
142	int count;
143	int reqpage;
144{
145	int i, s;
146
147	s = splvm();
148	/*
149	 * Fill as many pages as vm_fault has allocated for us.
150	 */
151	for (i = 0; i < count; i++) {
152		if ((m[i]->flags & PG_ZERO) == 0)
153			vm_page_zero_fill(m[i]);
154		vm_page_flag_set(m[i], PG_ZERO);
155		/* Switch off pv_entries */
156		vm_page_unmanage(m[i]);
157		m[i]->valid = VM_PAGE_BITS_ALL;
158		m[i]->dirty = 0;
159		/* The requested page must remain busy, the others not. */
160		if (reqpage != i) {
161			vm_page_flag_clear(m[i], PG_BUSY);
162			m[i]->busy = 0;
163		}
164	}
165	splx(s);
166
167	return (VM_PAGER_OK);
168}
169
170static void
171phys_pager_putpages(object, m, count, sync, rtvals)
172	vm_object_t object;
173	vm_page_t *m;
174	int count;
175	boolean_t sync;
176	int *rtvals;
177{
178	panic("phys_pager_putpage called");
179}
180
181/*
182 * Implement a pretty aggressive clustered getpages strategy.  Hint that
183 * everything in an entire 4MB window should be prefaulted at once.
184 *
185 * XXX 4MB (1024 slots per page table page) is convenient for x86,
186 * but may not be for other arches.
187 */
188#ifndef PHYSCLUSTER
189#define PHYSCLUSTER 1024
190#endif
191static boolean_t
192phys_pager_haspage(object, pindex, before, after)
193	vm_object_t object;
194	vm_pindex_t pindex;
195	int *before;
196	int *after;
197{
198	vm_pindex_t base, end;
199
200	base = pindex & (~(PHYSCLUSTER - 1));
201	end = base + (PHYSCLUSTER - 1);
202	if (before != NULL)
203		*before = pindex - base;
204	if (after != NULL)
205		*after = end - pindex;
206	return (TRUE);
207}
208