1/*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63/*
64 *	Paging space routine stubs.  Emulates a matchmaker-like interface
65 *	for builtin pagers.
66 */
67
68#include <sys/cdefs.h>
69__FBSDID("$FreeBSD$");
70
71#include "opt_param.h"
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kernel.h>
76#include <sys/vnode.h>
77#include <sys/bio.h>
78#include <sys/buf.h>
79#include <sys/ucred.h>
80#include <sys/malloc.h>
81#include <sys/rwlock.h>
82#include <sys/user.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <vm/vm_kern.h>
87#include <vm/vm_object.h>
88#include <vm/vm_page.h>
89#include <vm/vm_pager.h>
90#include <vm/vm_extern.h>
91#include <vm/uma.h>
92
93uma_zone_t pbuf_zone;
94static int	pbuf_init(void *, int, int);
95static int	pbuf_ctor(void *, int, void *, int);
96static void	pbuf_dtor(void *, int, void *);
97
98static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
99static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
100    vm_ooffset_t, struct ucred *);
101static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
102static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
103static void dead_pager_dealloc(vm_object_t);
104static void dead_pager_getvp(vm_object_t, struct vnode **, bool *);
105
106static int
107dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int *rbehind,
108    int *rahead)
109{
110
111	return (VM_PAGER_FAIL);
112}
113
114static vm_object_t
115dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
116    vm_ooffset_t off, struct ucred *cred)
117{
118
119	return (NULL);
120}
121
122static void
123dead_pager_putpages(vm_object_t object, vm_page_t *m, int count,
124    int flags, int *rtvals)
125{
126	int i;
127
128	for (i = 0; i < count; i++)
129		rtvals[i] = VM_PAGER_AGAIN;
130}
131
132static int
133dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next)
134{
135
136	if (prev != NULL)
137		*prev = 0;
138	if (next != NULL)
139		*next = 0;
140	return (FALSE);
141}
142
143static void
144dead_pager_dealloc(vm_object_t object)
145{
146
147}
148
149static void
150dead_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
151{
152	/*
153	 * For OBJT_DEAD objects, v_writecount was handled in
154	 * vnode_pager_dealloc().
155	 */
156}
157
158static const struct pagerops deadpagerops = {
159	.pgo_kvme_type = KVME_TYPE_DEAD,
160	.pgo_alloc = 	dead_pager_alloc,
161	.pgo_dealloc =	dead_pager_dealloc,
162	.pgo_getpages =	dead_pager_getpages,
163	.pgo_putpages =	dead_pager_putpages,
164	.pgo_haspage =	dead_pager_haspage,
165	.pgo_getvp =	dead_pager_getvp,
166};
167
168const struct pagerops *pagertab[16] __read_mostly = {
169	[OBJT_DEFAULT] =	&defaultpagerops,
170	[OBJT_SWAP] =		&swappagerops,
171	[OBJT_VNODE] =		&vnodepagerops,
172	[OBJT_DEVICE] =		&devicepagerops,
173	[OBJT_PHYS] =		&physpagerops,
174	[OBJT_DEAD] =		&deadpagerops,
175	[OBJT_SG] = 		&sgpagerops,
176	[OBJT_MGTDEVICE] = 	&mgtdevicepagerops,
177};
178static struct mtx pagertab_lock;
179
180void
181vm_pager_init(void)
182{
183	const struct pagerops **pgops;
184	int i;
185
186	mtx_init(&pagertab_lock, "dynpag", NULL, MTX_DEF);
187
188	/*
189	 * Initialize known pagers
190	 */
191	for (i = 0; i < OBJT_FIRST_DYN; i++) {
192		pgops = &pagertab[i];
193		if ((*pgops)->pgo_init != NULL)
194			(*(*pgops)->pgo_init)();
195	}
196}
197
198static int nswbuf_max;
199
200void
201vm_pager_bufferinit(void)
202{
203
204	/* Main zone for paging bufs. */
205	pbuf_zone = uma_zcreate("pbuf",
206	    sizeof(struct buf) + PBUF_PAGES * sizeof(vm_page_t),
207	    pbuf_ctor, pbuf_dtor, pbuf_init, NULL, UMA_ALIGN_CACHE,
208	    UMA_ZONE_NOFREE);
209	/* Few systems may still use this zone directly, so it needs a limit. */
210	nswbuf_max += uma_zone_set_max(pbuf_zone, NSWBUF_MIN);
211}
212
213uma_zone_t
214pbuf_zsecond_create(const char *name, int max)
215{
216	uma_zone_t zone;
217
218	zone = uma_zsecond_create(name, pbuf_ctor, pbuf_dtor, NULL, NULL,
219	    pbuf_zone);
220	/*
221	 * uma_prealloc() rounds up to items per slab. If we would prealloc
222	 * immediately on every pbuf_zsecond_create(), we may accumulate too
223	 * much of difference between hard limit and prealloced items, which
224	 * means wasted memory.
225	 */
226	if (nswbuf_max > 0)
227		nswbuf_max += uma_zone_set_max(zone, max);
228	else
229		uma_prealloc(pbuf_zone, uma_zone_set_max(zone, max));
230
231	return (zone);
232}
233
234static void
235pbuf_prealloc(void *arg __unused)
236{
237
238	uma_prealloc(pbuf_zone, nswbuf_max);
239	nswbuf_max = -1;
240}
241
242SYSINIT(pbuf, SI_SUB_KTHREAD_BUF, SI_ORDER_ANY, pbuf_prealloc, NULL);
243
244/*
245 * Allocate an instance of a pager of the given type.
246 * Size, protection and offset parameters are passed in for pagers that
247 * need to perform page-level validation (e.g. the device pager).
248 */
249vm_object_t
250vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
251    vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
252{
253	MPASS(type < nitems(pagertab));
254
255	return ((*pagertab[type]->pgo_alloc)(handle, size, prot, off, cred));
256}
257
258/*
259 *	The object must be locked.
260 */
261void
262vm_pager_deallocate(vm_object_t object)
263{
264
265	VM_OBJECT_ASSERT_WLOCKED(object);
266	MPASS(object->type < nitems(pagertab));
267	(*pagertab[object->type]->pgo_dealloc) (object);
268}
269
270static void
271vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
272{
273#ifdef INVARIANTS
274
275	/*
276	 * All pages must be consecutive, busied, not mapped, not fully valid,
277	 * not dirty and belong to the proper object.  Some pages may be the
278	 * bogus page, but the first and last pages must be a real ones.
279	 */
280
281	VM_OBJECT_ASSERT_UNLOCKED(object);
282	VM_OBJECT_ASSERT_PAGING(object);
283	KASSERT(count > 0, ("%s: 0 count", __func__));
284	for (int i = 0 ; i < count; i++) {
285		if (m[i] == bogus_page) {
286			KASSERT(i != 0 && i != count - 1,
287			    ("%s: page %d is the bogus page", __func__, i));
288			continue;
289		}
290		vm_page_assert_xbusied(m[i]);
291		KASSERT(!pmap_page_is_mapped(m[i]),
292		    ("%s: page %p is mapped", __func__, m[i]));
293		KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
294		    ("%s: request for a valid page %p", __func__, m[i]));
295		KASSERT(m[i]->dirty == 0,
296		    ("%s: page %p is dirty", __func__, m[i]));
297		KASSERT(m[i]->object == object,
298		    ("%s: wrong object %p/%p", __func__, object, m[i]->object));
299		KASSERT(m[i]->pindex == m[0]->pindex + i,
300		    ("%s: page %p isn't consecutive", __func__, m[i]));
301	}
302#endif
303}
304
305/*
306 * Page in the pages for the object using its associated pager.
307 * The requested page must be fully valid on successful return.
308 */
309int
310vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
311    int *rahead)
312{
313#ifdef INVARIANTS
314	vm_pindex_t pindex = m[0]->pindex;
315#endif
316	int r;
317
318	MPASS(object->type < nitems(pagertab));
319	vm_pager_assert_in(object, m, count);
320
321	r = (*pagertab[object->type]->pgo_getpages)(object, m, count, rbehind,
322	    rahead);
323	if (r != VM_PAGER_OK)
324		return (r);
325
326	for (int i = 0; i < count; i++) {
327		/*
328		 * If pager has replaced a page, assert that it had
329		 * updated the array.
330		 */
331#ifdef INVARIANTS
332		VM_OBJECT_RLOCK(object);
333		KASSERT(m[i] == vm_page_lookup(object, pindex++),
334		    ("%s: mismatch page %p pindex %ju", __func__,
335		    m[i], (uintmax_t )pindex - 1));
336		VM_OBJECT_RUNLOCK(object);
337#endif
338		/*
339		 * Zero out partially filled data.
340		 */
341		if (m[i]->valid != VM_PAGE_BITS_ALL)
342			vm_page_zero_invalid(m[i], TRUE);
343	}
344	return (VM_PAGER_OK);
345}
346
347int
348vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
349    int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
350{
351
352	MPASS(object->type < nitems(pagertab));
353	vm_pager_assert_in(object, m, count);
354
355	return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
356	    count, rbehind, rahead, iodone, arg));
357}
358
359/*
360 * vm_pager_put_pages() - inline, see vm/vm_pager.h
361 * vm_pager_has_page() - inline, see vm/vm_pager.h
362 */
363
364/*
365 * Search the specified pager object list for an object with the
366 * specified handle.  If an object with the specified handle is found,
367 * increase its reference count and return it.  Otherwise, return NULL.
368 *
369 * The pager object list must be locked.
370 */
371vm_object_t
372vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
373{
374	vm_object_t object;
375
376	TAILQ_FOREACH(object, pg_list, pager_object_list) {
377		if (object->handle == handle) {
378			VM_OBJECT_WLOCK(object);
379			if ((object->flags & OBJ_DEAD) == 0) {
380				vm_object_reference_locked(object);
381				VM_OBJECT_WUNLOCK(object);
382				break;
383			}
384			VM_OBJECT_WUNLOCK(object);
385		}
386	}
387	return (object);
388}
389
390int
391vm_pager_alloc_dyn_type(struct pagerops *ops, int base_type)
392{
393	int res;
394
395	mtx_lock(&pagertab_lock);
396	MPASS(base_type == -1 ||
397	    (base_type >= OBJT_DEFAULT && base_type < nitems(pagertab)));
398	for (res = OBJT_FIRST_DYN; res < nitems(pagertab); res++) {
399		if (pagertab[res] == NULL)
400			break;
401	}
402	if (res == nitems(pagertab)) {
403		mtx_unlock(&pagertab_lock);
404		return (-1);
405	}
406	if (base_type != -1) {
407		MPASS(pagertab[base_type] != NULL);
408#define	FIX(n)								\
409		if (ops->pgo_##n == NULL)				\
410			ops->pgo_##n = pagertab[base_type]->pgo_##n
411		FIX(init);
412		FIX(alloc);
413		FIX(dealloc);
414		FIX(getpages);
415		FIX(getpages_async);
416		FIX(putpages);
417		FIX(haspage);
418		FIX(populate);
419		FIX(pageunswapped);
420		FIX(update_writecount);
421		FIX(release_writecount);
422		FIX(set_writeable_dirty);
423		FIX(mightbedirty);
424		FIX(getvp);
425		FIX(freespace);
426#undef FIX
427	}
428	pagertab[res] = ops;	/* XXXKIB should be rel, but acq is too much */
429	mtx_unlock(&pagertab_lock);
430	return (res);
431}
432
433void
434vm_pager_free_dyn_type(objtype_t type)
435{
436	MPASS(type >= OBJT_FIRST_DYN && type < nitems(pagertab));
437
438	mtx_lock(&pagertab_lock);
439	MPASS(pagertab[type] != NULL);
440	pagertab[type] = NULL;
441	mtx_unlock(&pagertab_lock);
442}
443
444static int
445pbuf_ctor(void *mem, int size, void *arg, int flags)
446{
447	struct buf *bp = mem;
448
449	bp->b_vp = NULL;
450	bp->b_bufobj = NULL;
451
452	/* copied from initpbuf() */
453	bp->b_rcred = NOCRED;
454	bp->b_wcred = NOCRED;
455	bp->b_qindex = 0;       /* On no queue (QUEUE_NONE) */
456	bp->b_data = bp->b_kvabase;
457	bp->b_xflags = 0;
458	bp->b_flags = B_MAXPHYS;
459	bp->b_ioflags = 0;
460	bp->b_iodone = NULL;
461	bp->b_error = 0;
462	BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
463
464	return (0);
465}
466
467static void
468pbuf_dtor(void *mem, int size, void *arg)
469{
470	struct buf *bp = mem;
471
472	if (bp->b_rcred != NOCRED) {
473		crfree(bp->b_rcred);
474		bp->b_rcred = NOCRED;
475	}
476	if (bp->b_wcred != NOCRED) {
477		crfree(bp->b_wcred);
478		bp->b_wcred = NOCRED;
479	}
480
481	BUF_UNLOCK(bp);
482}
483
484static int
485pbuf_init(void *mem, int size, int flags)
486{
487	struct buf *bp = mem;
488
489	bp->b_kvabase = (void *)kva_alloc(ptoa(PBUF_PAGES));
490	if (bp->b_kvabase == NULL)
491		return (ENOMEM);
492	bp->b_kvasize = ptoa(PBUF_PAGES);
493	BUF_LOCKINIT(bp);
494	LIST_INIT(&bp->b_dep);
495	bp->b_rcred = bp->b_wcred = NOCRED;
496	bp->b_xflags = 0;
497
498	return (0);
499}
500
501/*
502 * Associate a p-buffer with a vnode.
503 *
504 * Also sets B_PAGING flag to indicate that vnode is not fully associated
505 * with the buffer.  i.e. the bp has not been linked into the vnode or
506 * ref-counted.
507 */
508void
509pbgetvp(struct vnode *vp, struct buf *bp)
510{
511
512	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
513	KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
514
515	bp->b_vp = vp;
516	bp->b_flags |= B_PAGING;
517	bp->b_bufobj = &vp->v_bufobj;
518}
519
520/*
521 * Associate a p-buffer with a vnode.
522 *
523 * Also sets B_PAGING flag to indicate that vnode is not fully associated
524 * with the buffer.  i.e. the bp has not been linked into the vnode or
525 * ref-counted.
526 */
527void
528pbgetbo(struct bufobj *bo, struct buf *bp)
529{
530
531	KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
532	KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
533
534	bp->b_flags |= B_PAGING;
535	bp->b_bufobj = bo;
536}
537
538/*
539 * Disassociate a p-buffer from a vnode.
540 */
541void
542pbrelvp(struct buf *bp)
543{
544
545	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
546	KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
547	KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
548	    ("pbrelvp: pager buf on vnode list."));
549
550	bp->b_vp = NULL;
551	bp->b_bufobj = NULL;
552	bp->b_flags &= ~B_PAGING;
553}
554
555/*
556 * Disassociate a p-buffer from a bufobj.
557 */
558void
559pbrelbo(struct buf *bp)
560{
561
562	KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
563	KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
564	KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
565	    ("pbrelbo: pager buf on vnode list."));
566
567	bp->b_bufobj = NULL;
568	bp->b_flags &= ~B_PAGING;
569}
570
571void
572vm_object_set_writeable_dirty(vm_object_t object)
573{
574	pgo_set_writeable_dirty_t *method;
575
576	MPASS(object->type < nitems(pagertab));
577
578	method = pagertab[object->type]->pgo_set_writeable_dirty;
579	if (method != NULL)
580		method(object);
581}
582
583bool
584vm_object_mightbedirty(vm_object_t object)
585{
586	pgo_mightbedirty_t *method;
587
588	MPASS(object->type < nitems(pagertab));
589
590	method = pagertab[object->type]->pgo_mightbedirty;
591	if (method == NULL)
592		return (false);
593	return (method(object));
594}
595
596/*
597 * Return the kvme type of the given object.
598 * If vpp is not NULL, set it to the object's vm_object_vnode() or NULL.
599 */
600int
601vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
602{
603	VM_OBJECT_ASSERT_LOCKED(object);
604	MPASS(object->type < nitems(pagertab));
605
606	if (vpp != NULL)
607		*vpp = vm_object_vnode(object);
608	return (pagertab[object->type]->pgo_kvme_type);
609}
610