1139825Simp/*-
21541Srgrimes * Copyright (c) 1990 University of Utah.
31541Srgrimes * Copyright (c) 1991, 1993
41541Srgrimes *	The Regents of the University of California.  All rights reserved.
51541Srgrimes *
61541Srgrimes * This code is derived from software contributed to Berkeley by
71541Srgrimes * the Systems Programming Group of the University of Utah Computer
81541Srgrimes * Science Department.
91541Srgrimes *
101541Srgrimes * Redistribution and use in source and binary forms, with or without
111541Srgrimes * modification, are permitted provided that the following conditions
121541Srgrimes * are met:
131541Srgrimes * 1. Redistributions of source code must retain the above copyright
141541Srgrimes *    notice, this list of conditions and the following disclaimer.
151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
161541Srgrimes *    notice, this list of conditions and the following disclaimer in the
171541Srgrimes *    documentation and/or other materials provided with the distribution.
181541Srgrimes * 4. Neither the name of the University nor the names of its contributors
191541Srgrimes *    may be used to endorse or promote products derived from this software
201541Srgrimes *    without specific prior written permission.
211541Srgrimes *
221541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
231541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
241541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
251541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
261541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
271541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
281541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
291541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
301541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
311541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
321541Srgrimes * SUCH DAMAGE.
331541Srgrimes *
341549Srgrimes *	@(#)device_pager.c	8.1 (Berkeley) 6/11/93
351541Srgrimes */
361541Srgrimes
37116226Sobrien#include <sys/cdefs.h>
38116226Sobrien__FBSDID("$FreeBSD$");
39116226Sobrien
401541Srgrimes#include <sys/param.h>
411541Srgrimes#include <sys/systm.h>
421541Srgrimes#include <sys/conf.h>
4376166Smarkm#include <sys/lock.h>
4479224Sdillon#include <sys/proc.h>
4576166Smarkm#include <sys/mutex.h>
461541Srgrimes#include <sys/mman.h>
4775675Salfred#include <sys/sx.h>
481541Srgrimes
491541Srgrimes#include <vm/vm.h>
50240238Skib#include <vm/vm_param.h>
5112662Sdg#include <vm/vm_object.h>
521541Srgrimes#include <vm/vm_page.h>
539507Sdg#include <vm/vm_pager.h>
5492748Sjeff#include <vm/uma.h>
551541Srgrimes
5692727Salfredstatic void dev_pager_init(void);
5792727Salfredstatic vm_object_t dev_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
58194766Skib    vm_ooffset_t, struct ucred *);
5992727Salfredstatic void dev_pager_dealloc(vm_object_t);
6092727Salfredstatic int dev_pager_getpages(vm_object_t, vm_page_t *, int, int);
6192727Salfredstatic void dev_pager_putpages(vm_object_t, vm_page_t *, int,
6292727Salfred		boolean_t, int *);
6392727Salfredstatic boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *,
6492727Salfred		int *);
65236925Skibstatic void dev_pager_free_page(vm_object_t object, vm_page_t m);
661541Srgrimes
6712820Sphk/* list of device pager objects */
6812820Sphkstatic struct pagerlst dev_pager_object_list;
6975675Salfred/* protect list manipulation */
7075675Salfredstatic struct mtx dev_pager_mtx;
7112820Sphk
721541Srgrimesstruct pagerops devicepagerops = {
73118466Sphk	.pgo_init =	dev_pager_init,
74118466Sphk	.pgo_alloc =	dev_pager_alloc,
75118466Sphk	.pgo_dealloc =	dev_pager_dealloc,
76118466Sphk	.pgo_getpages =	dev_pager_getpages,
77118466Sphk	.pgo_putpages =	dev_pager_putpages,
78118466Sphk	.pgo_haspage =	dev_pager_haspage,
791541Srgrimes};
801541Srgrimes
81236925Skibstruct pagerops mgtdevicepagerops = {
82236925Skib	.pgo_alloc =	dev_pager_alloc,
83236925Skib	.pgo_dealloc =	dev_pager_dealloc,
84236925Skib	.pgo_getpages =	dev_pager_getpages,
85236925Skib	.pgo_putpages =	dev_pager_putpages,
86236925Skib	.pgo_haspage =	dev_pager_haspage,
87236925Skib};
88236925Skib
89229383Skibstatic int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
90229383Skib    vm_ooffset_t foff, struct ucred *cred, u_short *color);
91229383Skibstatic void old_dev_pager_dtor(void *handle);
92229383Skibstatic int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
93229383Skib    int prot, vm_page_t *mres);
94229383Skib
95229383Skibstatic struct cdev_pager_ops old_dev_pager_ops = {
96229383Skib	.cdev_pg_ctor =	old_dev_pager_ctor,
97229383Skib	.cdev_pg_dtor =	old_dev_pager_dtor,
98229383Skib	.cdev_pg_fault = old_dev_pager_fault
99229383Skib};
100229383Skib
10112820Sphkstatic void
1021541Srgrimesdev_pager_init()
1031541Srgrimes{
1049507Sdg	TAILQ_INIT(&dev_pager_object_list);
10593818Sjhb	mtx_init(&dev_pager_mtx, "dev_pager list", NULL, MTX_DEF);
1061541Srgrimes}
1071541Srgrimes
108229383Skibvm_object_t
109229383Skibcdev_pager_lookup(void *handle)
1101541Srgrimes{
111229383Skib	vm_object_t object;
112229383Skib
113229383Skib	mtx_lock(&dev_pager_mtx);
114229383Skib	object = vm_pager_object_lookup(&dev_pager_object_list, handle);
115229383Skib	mtx_unlock(&dev_pager_mtx);
116229383Skib	return (object);
117229383Skib}
118229383Skib
119229383Skibvm_object_t
120229383Skibcdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
121229383Skib    vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
122229383Skib{
123171779Skib	vm_object_t object, object1;
124124133Salc	vm_pindex_t pindex;
125229383Skib	u_short color;
1261541Srgrimes
127236925Skib	if (tp != OBJT_DEVICE && tp != OBJT_MGTDEVICE)
128229383Skib		return (NULL);
129229383Skib
1301541Srgrimes	/*
13198630Salc	 * Offset should be page aligned.
13298630Salc	 */
13398630Salc	if (foff & PAGE_MASK)
13498630Salc		return (NULL);
13598630Salc
13698630Salc	size = round_page(size);
137124133Salc	pindex = OFF_TO_IDX(foff + size);
13898630Salc
139229383Skib	if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0)
140135707Sphk		return (NULL);
141171779Skib	mtx_lock(&dev_pager_mtx);
1429507Sdg
1439507Sdg	/*
1441541Srgrimes	 * Look up pager, creating as necessary.
1451541Srgrimes	 */
146171779Skib	object1 = NULL;
1479507Sdg	object = vm_pager_object_lookup(&dev_pager_object_list, handle);
1489507Sdg	if (object == NULL) {
1491541Srgrimes		/*
150179074Salc		 * Allocate object and associate it with the pager.  Initialize
151179074Salc		 * the object's pg_color based upon the physical address of the
152179074Salc		 * device's memory.
1531541Srgrimes		 */
154171779Skib		mtx_unlock(&dev_pager_mtx);
155229383Skib		object1 = vm_object_allocate(tp, pindex);
156179074Salc		object1->flags |= OBJ_COLORED;
157229383Skib		object1->pg_color = color;
158229383Skib		object1->handle = handle;
159229383Skib		object1->un_pager.devp.ops = ops;
160224522Skib		TAILQ_INIT(&object1->un_pager.devp.devp_pglist);
16175675Salfred		mtx_lock(&dev_pager_mtx);
162171779Skib		object = vm_pager_object_lookup(&dev_pager_object_list, handle);
163171779Skib		if (object != NULL) {
164171779Skib			/*
165171779Skib			 * We raced with other thread while allocating object.
166171779Skib			 */
167171779Skib			if (pindex > object->size)
168171779Skib				object->size = pindex;
169171779Skib		} else {
170171779Skib			object = object1;
171171779Skib			object1 = NULL;
172171779Skib			object->handle = handle;
173171779Skib			TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
174171779Skib			    pager_object_list);
175229383Skib			KASSERT(object->type == tp,
176229383Skib		("Inconsistent device pager type %p %d", object, tp));
177171779Skib		}
1781541Srgrimes	} else {
179124133Salc		if (pindex > object->size)
180124133Salc			object->size = pindex;
1811541Srgrimes	}
182171779Skib	mtx_unlock(&dev_pager_mtx);
183224522Skib	if (object1 != NULL) {
184224522Skib		object1->handle = object1;
185224522Skib		mtx_lock(&dev_pager_mtx);
186224522Skib		TAILQ_INSERT_TAIL(&dev_pager_object_list, object1,
187224522Skib		    pager_object_list);
188224522Skib		mtx_unlock(&dev_pager_mtx);
189224522Skib		vm_object_deallocate(object1);
190224522Skib	}
1919507Sdg	return (object);
1921541Srgrimes}
1931541Srgrimes
194229383Skibstatic vm_object_t
195229383Skibdev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
196229383Skib    vm_ooffset_t foff, struct ucred *cred)
197229383Skib{
198229383Skib
199229383Skib	return (cdev_pager_allocate(handle, OBJT_DEVICE, &old_dev_pager_ops,
200229383Skib	    size, prot, foff, cred));
201229383Skib}
202229383Skib
203229383Skibvoid
204229383Skibcdev_pager_free_page(vm_object_t object, vm_page_t m)
205229383Skib{
206229383Skib
207229383Skib	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
208236925Skib	if (object->type == OBJT_MGTDEVICE) {
209236925Skib		KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m));
210236925Skib		pmap_remove_all(m);
211236925Skib		vm_page_lock(m);
212236925Skib		vm_page_remove(m);
213236925Skib		vm_page_unlock(m);
214236925Skib	} else if (object->type == OBJT_DEVICE)
215236925Skib		dev_pager_free_page(object, m);
216236925Skib}
217236925Skib
218236925Skibstatic void
219236925Skibdev_pager_free_page(vm_object_t object, vm_page_t m)
220236925Skib{
221236925Skib
222236925Skib	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
223236925Skib	KASSERT((object->type == OBJT_DEVICE &&
224236925Skib	    (m->oflags & VPO_UNMANAGED) != 0),
225236925Skib	    ("Managed device or page obj %p m %p", object, m));
226229383Skib	TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
227229383Skib	vm_page_putfake(m);
228229383Skib}
229229383Skib
23012820Sphkstatic void
2319507Sdgdev_pager_dealloc(object)
2329507Sdg	vm_object_t object;
2331541Srgrimes{
2341541Srgrimes	vm_page_t m;
2351541Srgrimes
236171779Skib	VM_OBJECT_UNLOCK(object);
237229383Skib	object->un_pager.devp.ops->cdev_pg_dtor(object->handle);
238229383Skib
23975675Salfred	mtx_lock(&dev_pager_mtx);
2409507Sdg	TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
24175675Salfred	mtx_unlock(&dev_pager_mtx);
242171779Skib	VM_OBJECT_LOCK(object);
243236925Skib
244236925Skib	if (object->type == OBJT_DEVICE) {
245236925Skib		/*
246236925Skib		 * Free up our fake pages.
247236925Skib		 */
248236925Skib		while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist))
249236925Skib		    != NULL)
250236925Skib			dev_pager_free_page(object, m);
251236925Skib	}
252229383Skib}
253229383Skib
254229383Skibstatic int
255229383Skibdev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int reqpage)
256229383Skib{
257229383Skib	int error, i;
258229383Skib
259229383Skib	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
260229383Skib	error = object->un_pager.devp.ops->cdev_pg_fault(object,
261229383Skib	    IDX_TO_OFF(ma[reqpage]->pindex), PROT_READ, &ma[reqpage]);
262229383Skib
263229383Skib	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
264229383Skib
265229383Skib	for (i = 0; i < count; i++) {
266229383Skib		if (i != reqpage) {
267229383Skib			vm_page_lock(ma[i]);
268229383Skib			vm_page_free(ma[i]);
269229383Skib			vm_page_unlock(ma[i]);
270229383Skib		}
2711541Srgrimes	}
272229383Skib
273229383Skib	if (error == VM_PAGER_OK) {
274236925Skib		KASSERT((object->type == OBJT_DEVICE &&
275236925Skib		     (ma[reqpage]->oflags & VPO_UNMANAGED) != 0) ||
276236925Skib		    (object->type == OBJT_MGTDEVICE &&
277236925Skib		     (ma[reqpage]->oflags & VPO_UNMANAGED) == 0),
278236925Skib		    ("Wrong page type %p %p", ma[reqpage], object));
279236925Skib		if (object->type == OBJT_DEVICE) {
280236925Skib			TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
281236925Skib			    ma[reqpage], pageq);
282236925Skib		}
283229383Skib	}
284229383Skib
285229383Skib	return (error);
2861541Srgrimes}
2871541Srgrimes
28812820Sphkstatic int
289229383Skibold_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
290229383Skib    vm_page_t *mres)
2911541Srgrimes{
292229383Skib	vm_pindex_t pidx;
293112569Sjake	vm_paddr_t paddr;
294195649Salc	vm_page_t m_paddr, page;
295130585Sphk	struct cdev *dev;
296135707Sphk	struct cdevsw *csw;
297229383Skib	struct file *fpop;
298183383Skib	struct thread *td;
299229383Skib	vm_memattr_t memattr;
300229383Skib	int ref, ret;
3011541Srgrimes
302229383Skib	pidx = OFF_TO_IDX(offset);
303195649Salc	memattr = object->memattr;
304229383Skib
305116279Salc	VM_OBJECT_UNLOCK(object);
306229383Skib
307229383Skib	dev = object->handle;
308210923Skib	csw = dev_refthread(dev, &ref);
309223823Sattilio	if (csw == NULL) {
310223823Sattilio		VM_OBJECT_LOCK(object);
311223823Sattilio		return (VM_PAGER_FAIL);
312223823Sattilio	}
313183383Skib	td = curthread;
314183383Skib	fpop = td->td_fpop;
315183383Skib	td->td_fpop = NULL;
316229383Skib	ret = csw->d_mmap(dev, offset, &paddr, prot, &memattr);
317183383Skib	td->td_fpop = fpop;
318210923Skib	dev_relthread(dev, ref);
319229383Skib	if (ret != 0) {
320229383Skib		printf(
321229383Skib	    "WARNING: dev_pager_getpage: map function returns error %d", ret);
322229383Skib		VM_OBJECT_LOCK(object);
323229383Skib		return (VM_PAGER_FAIL);
324229383Skib	}
325229383Skib
326195649Salc	/* If "paddr" is a real page, perform a sanity check on "memattr". */
327195649Salc	if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
328195649Salc	    pmap_page_get_memattr(m_paddr) != memattr) {
329195649Salc		memattr = pmap_page_get_memattr(m_paddr);
330195649Salc		printf(
331195649Salc	    "WARNING: A device driver has set \"memattr\" inconsistently.\n");
332195649Salc	}
333229383Skib	if (((*mres)->flags & PG_FICTITIOUS) != 0) {
334132884Sdfr		/*
335229383Skib		 * If the passed in result page is a fake page, update it with
336132884Sdfr		 * the new physical address.
337132884Sdfr		 */
338229383Skib		page = *mres;
339133113Sdfr		VM_OBJECT_LOCK(object);
340219476Salc		vm_page_updatefake(page, paddr, memattr);
341132884Sdfr	} else {
342132884Sdfr		/*
343132884Sdfr		 * Replace the passed in reqpage page with our own fake page and
344132884Sdfr		 * free up the all of the original pages.
345132884Sdfr		 */
346219476Salc		page = vm_page_getfake(paddr, memattr);
347132884Sdfr		VM_OBJECT_LOCK(object);
348229383Skib		vm_page_lock(*mres);
349229383Skib		vm_page_free(*mres);
350229383Skib		vm_page_unlock(*mres);
351229383Skib		*mres = page;
352229383Skib		vm_page_insert(page, object, pidx);
353132884Sdfr	}
354194642Salc	page->valid = VM_PAGE_BITS_ALL;
3555455Sdg	return (VM_PAGER_OK);
3561541Srgrimes}
3571541Srgrimes
35843129Sdillonstatic void
3599507Sdgdev_pager_putpages(object, m, count, sync, rtvals)
3609507Sdg	vm_object_t object;
3619507Sdg	vm_page_t *m;
3629507Sdg	int count;
3631541Srgrimes	boolean_t sync;
3649507Sdg	int *rtvals;
3651541Srgrimes{
366229383Skib
3671541Srgrimes	panic("dev_pager_putpage called");
3681541Srgrimes}
3691541Srgrimes
37012820Sphkstatic boolean_t
37112767Sdysondev_pager_haspage(object, pindex, before, after)
3729507Sdg	vm_object_t object;
37312767Sdyson	vm_pindex_t pindex;
3749507Sdg	int *before;
3759507Sdg	int *after;
3761541Srgrimes{
3779507Sdg	if (before != NULL)
3789507Sdg		*before = 0;
3799507Sdg	if (after != NULL)
3809507Sdg		*after = 0;
3815455Sdg	return (TRUE);
3821541Srgrimes}
383229383Skib
384229383Skibstatic int
385229383Skibold_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
386229383Skib    vm_ooffset_t foff, struct ucred *cred, u_short *color)
387229383Skib{
388229383Skib	struct cdev *dev;
389229383Skib	struct cdevsw *csw;
390229383Skib	vm_memattr_t dummy;
391229383Skib	vm_ooffset_t off;
392229383Skib	vm_paddr_t paddr;
393229383Skib	unsigned int npages;
394229383Skib	int ref;
395229383Skib
396229383Skib	/*
397229383Skib	 * Make sure this device can be mapped.
398229383Skib	 */
399229383Skib	dev = handle;
400229383Skib	csw = dev_refthread(dev, &ref);
401229383Skib	if (csw == NULL)
402229383Skib		return (ENXIO);
403229383Skib
404229383Skib	/*
405229383Skib	 * Check that the specified range of the device allows the desired
406229383Skib	 * protection.
407229383Skib	 *
408229383Skib	 * XXX assumes VM_PROT_* == PROT_*
409229383Skib	 */
410229383Skib	npages = OFF_TO_IDX(size);
411263361Skib	paddr = 0; /* Make paddr initialized for the case of size == 0. */
412229383Skib	for (off = foff; npages--; off += PAGE_SIZE) {
413229383Skib		if (csw->d_mmap(dev, off, &paddr, (int)prot, &dummy) != 0) {
414229383Skib			dev_relthread(dev, ref);
415229383Skib			return (EINVAL);
416229383Skib		}
417229383Skib	}
418229383Skib
419229383Skib	dev_ref(dev);
420229383Skib	dev_relthread(dev, ref);
421229383Skib	*color = atop(paddr) - OFF_TO_IDX(off - PAGE_SIZE);
422229383Skib	return (0);
423229383Skib}
424229383Skib
425229383Skibstatic void
426229383Skibold_dev_pager_dtor(void *handle)
427229383Skib{
428229383Skib
429229383Skib	dev_rel(handle);
430229383Skib}
431