sg_pager.c revision 331017
1/*-
2 * Copyright (c) 2009 Hudson River Trading LLC
3 * Written by: John H. Baldwin <jhb@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/11/sys/vm/sg_pager.c 331017 2018-03-15 19:08:33Z kevans $");
30
31/*
32 * This pager manages OBJT_SG objects.  These objects are backed by
33 * a scatter/gather list of physical address ranges.
34 */
35
36#include <sys/param.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/rwlock.h>
40#include <sys/sglist.h>
41#include <sys/vmmeter.h>
42
43#include <vm/vm.h>
44#include <vm/vm_param.h>
45#include <vm/vm_object.h>
46#include <vm/vm_page.h>
47#include <vm/vm_pager.h>
48#include <vm/vm_phys.h>
49#include <vm/uma.h>
50
51static vm_object_t sg_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
52    vm_ooffset_t, struct ucred *);
53static void sg_pager_dealloc(vm_object_t);
54static int sg_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
55static void sg_pager_putpages(vm_object_t, vm_page_t *, int,
56		boolean_t, int *);
57static boolean_t sg_pager_haspage(vm_object_t, vm_pindex_t, int *,
58		int *);
59
60struct pagerops sgpagerops = {
61	.pgo_alloc =	sg_pager_alloc,
62	.pgo_dealloc =	sg_pager_dealloc,
63	.pgo_getpages =	sg_pager_getpages,
64	.pgo_putpages =	sg_pager_putpages,
65	.pgo_haspage =	sg_pager_haspage,
66};
67
68static vm_object_t
69sg_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
70    vm_ooffset_t foff, struct ucred *cred)
71{
72	struct sglist *sg;
73	vm_object_t object;
74	vm_pindex_t npages, pindex;
75	int i;
76
77	/*
78	 * Offset should be page aligned.
79	 */
80	if (foff & PAGE_MASK)
81		return (NULL);
82
83	/*
84	 * The scatter/gather list must only include page-aligned
85	 * ranges.
86	 */
87	npages = 0;
88	sg = handle;
89	for (i = 0; i < sg->sg_nseg; i++) {
90		if ((sg->sg_segs[i].ss_paddr % PAGE_SIZE) != 0 ||
91		    (sg->sg_segs[i].ss_len % PAGE_SIZE) != 0)
92			return (NULL);
93		npages += sg->sg_segs[i].ss_len / PAGE_SIZE;
94	}
95
96	/*
97	 * The scatter/gather list has a fixed size.  Refuse requests
98	 * to map beyond that.
99	 */
100	size = round_page(size);
101	pindex = UOFF_TO_IDX(foff) + UOFF_TO_IDX(size);
102	if (pindex > npages || pindex < UOFF_TO_IDX(foff) ||
103	    pindex < UOFF_TO_IDX(size))
104		return (NULL);
105
106	/*
107	 * Allocate a new object and associate it with the
108	 * scatter/gather list.  It is ok for our purposes to have
109	 * multiple VM objects associated with the same scatter/gather
110	 * list because scatter/gather lists are static.  This is also
111	 * simpler than ensuring a unique object per scatter/gather
112	 * list.
113	 */
114	object = vm_object_allocate(OBJT_SG, npages);
115	object->handle = sglist_hold(sg);
116	TAILQ_INIT(&object->un_pager.sgp.sgp_pglist);
117	return (object);
118}
119
120static void
121sg_pager_dealloc(vm_object_t object)
122{
123	struct sglist *sg;
124	vm_page_t m;
125
126	/*
127	 * Free up our fake pages.
128	 */
129	while ((m = TAILQ_FIRST(&object->un_pager.sgp.sgp_pglist)) != 0) {
130		TAILQ_REMOVE(&object->un_pager.sgp.sgp_pglist, m, plinks.q);
131		vm_page_putfake(m);
132	}
133
134	sg = object->handle;
135	sglist_free(sg);
136	object->handle = NULL;
137	object->type = OBJT_DEAD;
138}
139
140static int
141sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
142    int *rahead)
143{
144	struct sglist *sg;
145	vm_page_t m_paddr, page;
146	vm_pindex_t offset;
147	vm_paddr_t paddr;
148	vm_memattr_t memattr;
149	size_t space;
150	int i;
151
152	/* Since our haspage reports zero after/before, the count is 1. */
153	KASSERT(count == 1, ("%s: count %d", __func__, count));
154	VM_OBJECT_ASSERT_WLOCKED(object);
155	sg = object->handle;
156	memattr = object->memattr;
157	VM_OBJECT_WUNLOCK(object);
158	offset = m[0]->pindex;
159
160	/*
161	 * Lookup the physical address of the requested page.  An initial
162	 * value of '1' instead of '0' is used so we can assert that the
163	 * page is found since '0' can be a valid page-aligned physical
164	 * address.
165	 */
166	space = 0;
167	paddr = 1;
168	for (i = 0; i < sg->sg_nseg; i++) {
169		if (space + sg->sg_segs[i].ss_len <= (offset * PAGE_SIZE)) {
170			space += sg->sg_segs[i].ss_len;
171			continue;
172		}
173		paddr = sg->sg_segs[i].ss_paddr + offset * PAGE_SIZE - space;
174		break;
175	}
176	KASSERT(paddr != 1, ("invalid SG page index"));
177
178	/* If "paddr" is a real page, perform a sanity check on "memattr". */
179	if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
180	    pmap_page_get_memattr(m_paddr) != memattr) {
181		memattr = pmap_page_get_memattr(m_paddr);
182		printf(
183	    "WARNING: A device driver has set \"memattr\" inconsistently.\n");
184	}
185
186	/* Return a fake page for the requested page. */
187	KASSERT(!(m[0]->flags & PG_FICTITIOUS),
188	    ("backing page for SG is fake"));
189
190	/* Construct a new fake page. */
191	page = vm_page_getfake(paddr, memattr);
192	VM_OBJECT_WLOCK(object);
193	TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, plinks.q);
194	vm_page_replace_checked(page, object, offset, m[0]);
195	vm_page_lock(m[0]);
196	vm_page_free(m[0]);
197	vm_page_unlock(m[0]);
198	m[0] = page;
199	page->valid = VM_PAGE_BITS_ALL;
200
201	if (rbehind)
202		*rbehind = 0;
203	if (rahead)
204		*rahead = 0;
205
206	return (VM_PAGER_OK);
207}
208
209static void
210sg_pager_putpages(vm_object_t object, vm_page_t *m, int count,
211    boolean_t sync, int *rtvals)
212{
213
214	panic("sg_pager_putpage called");
215}
216
217static boolean_t
218sg_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
219    int *after)
220{
221
222	if (before != NULL)
223		*before = 0;
224	if (after != NULL)
225		*after = 0;
226	return (TRUE);
227}
228