1/*	$OpenBSD: uvm_pmemrange.h,v 1.17 2024/05/01 12:54:27 mpi Exp $	*/
2
3/*
4 * Copyright (c) 2009 Ariane van der Steldt <ariane@stack.nl>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * uvm_pmemrange.h: describe and manage free physical memory.
21 */
22
23#ifndef _UVM_UVM_PMEMRANGE_H_
24#define _UVM_UVM_PMEMRANGE_H_
25
26RBT_HEAD(uvm_pmr_addr, vm_page);
27RBT_HEAD(uvm_pmr_size, vm_page);
28
29/*
30 * Page types available:
31 * - DIRTY: this page may contain random data.
32 * - ZERO: this page has been zeroed.
33 */
34#define UVM_PMR_MEMTYPE_DIRTY	0
35#define UVM_PMR_MEMTYPE_ZERO	1
36#define UVM_PMR_MEMTYPE_MAX	2
37
38/*
39 * An address range of memory.
40 */
41struct uvm_pmemrange {
42	struct	uvm_pmr_addr addr;	/* Free page chunks, sorted by addr. */
43	struct	uvm_pmr_size size[UVM_PMR_MEMTYPE_MAX];
44					/* Free page chunks, sorted by size. */
45	TAILQ_HEAD(, vm_page) single[UVM_PMR_MEMTYPE_MAX];
46					/* single page regions (uses pageq) */
47
48	paddr_t	low;			/* Start of address range (pgno). */
49	paddr_t	high;			/* End +1 (pgno). */
50	int	use;			/* Use counter. */
51	psize_t	nsegs;			/* Current range count. */
52
53	TAILQ_ENTRY(uvm_pmemrange) pmr_use;
54					/* pmr, sorted by use */
55	RBT_ENTRY(uvm_pmemrange) pmr_addr;
56					/* pmr, sorted by address */
57};
58
59/*
60 * Description of failing memory allocation.
61 *
62 * Two ways new pages can become available:
63 * [1] page daemon drops them (we notice because they are freed)
64 * [2] a process calls free
65 *
66 * The buffer cache and page daemon can decide that they don't have the
67 * ability to make pages available in the requested range. In that case,
68 * the FAIL bit will be set.
69 * XXX There's a possibility that a page is no longer on the queues but
70 * XXX has not yet been freed, or that a page was busy.
71 * XXX Also, wired pages are not considered for paging, so they could
72 * XXX cause a failure that may be recoverable.
73 */
74struct uvm_pmalloc {
75	TAILQ_ENTRY(uvm_pmalloc) pmq;
76
77	/*
78	 * Allocation request parameters.
79	 */
80	struct uvm_constraint_range pm_constraint;
81	psize_t	pm_size;
82
83	/*
84	 * State flags.
85	 */
86	int	pm_flags;
87};
88
89/*
90 * uvm_pmalloc flags.
91 */
92#define UVM_PMA_LINKED	0x01	/* uvm_pmalloc is on list */
93#define UVM_PMA_BUSY	0x02	/* entry is busy with fpageq unlocked */
94#define UVM_PMA_FAIL	0x10	/* page daemon cannot free pages */
95#define UVM_PMA_FREED	0x20	/* at least one page in the range was freed */
96
97RBT_HEAD(uvm_pmemrange_addr, uvm_pmemrange);
98TAILQ_HEAD(uvm_pmemrange_use, uvm_pmemrange);
99
100/*
101 * pmr control structure. Contained in uvm.pmr_control.
102 */
103struct uvm_pmr_control {
104	struct	uvm_pmemrange_addr addr;
105	struct	uvm_pmemrange_use use;
106
107	/* Only changed while fpageq is locked. */
108	TAILQ_HEAD(, uvm_pmalloc) allocs;
109};
110
111void	uvm_pmr_freepages(struct vm_page *, psize_t);
112void	uvm_pmr_freepageq(struct pglist *);
113int	uvm_pmr_getpages(psize_t, paddr_t, paddr_t, paddr_t, paddr_t,
114	    int, int, struct pglist *);
115void	uvm_pmr_init(void);
116int	uvm_wait_pla(paddr_t, paddr_t, paddr_t, int);
117void	uvm_wakeup_pla(paddr_t, psize_t);
118
119#if defined(DDB) || defined(DEBUG)
120int	uvm_pmr_isfree(struct vm_page *pg);
121#endif
122
123/*
124 * Internal tree logic.
125 */
126
127int	uvm_pmr_addr_cmp(const struct vm_page *, const struct vm_page *);
128int	uvm_pmr_size_cmp(const struct vm_page *, const struct vm_page *);
129
130RBT_PROTOTYPE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp);
131RBT_PROTOTYPE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp);
132RBT_PROTOTYPE(uvm_pmemrange_addr, uvm_pmemrange, pmr_addr,
133    uvm_pmemrange_addr_cmp);
134
135struct vm_page		*uvm_pmr_insert_addr(struct uvm_pmemrange *,
136			    struct vm_page *, int);
137void			 uvm_pmr_insert_size(struct uvm_pmemrange *,
138			    struct vm_page *);
139struct vm_page		*uvm_pmr_insert(struct uvm_pmemrange *,
140			    struct vm_page *, int);
141void			 uvm_pmr_remove_addr(struct uvm_pmemrange *,
142			    struct vm_page *);
143void			 uvm_pmr_remove_size(struct uvm_pmemrange *,
144			    struct vm_page *);
145void			 uvm_pmr_remove(struct uvm_pmemrange *,
146			    struct vm_page *);
147struct vm_page		*uvm_pmr_extract_range(struct uvm_pmemrange *,
148			    struct vm_page *, paddr_t, paddr_t,
149			    struct pglist *);
150struct vm_page		*uvm_pmr_cache_get(int);
151void			 uvm_pmr_cache_put(struct vm_page *);
152void			 uvm_pmr_cache_drain(void);
153
154
155#endif /* _UVM_UVM_PMEMRANGE_H_ */
156