1/*	$OpenBSD: pool.h,v 1.78 2021/01/02 03:23:59 cheloha Exp $	*/
2/*	$NetBSD: pool.h,v 1.27 2001/06/06 22:00:17 rafal Exp $	*/
3
4/*-
5 * Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#ifndef _SYS_POOL_H_
35#define _SYS_POOL_H_
36
37/*
38 * sysctls.
39 * kern.pool.npools
40 * kern.pool.name.<number>
41 * kern.pool.pool.<number>
42 */
43#define KERN_POOL_NPOOLS	1
44#define KERN_POOL_NAME		2
45#define KERN_POOL_POOL		3
46#define KERN_POOL_CACHE		4	/* global pool cache info */
47#define KERN_POOL_CACHE_CPUS	5	/* all cpus cache info */
48
49struct kinfo_pool {
50	unsigned int	pr_size;	/* size of a pool item */
51	unsigned int	pr_pgsize;	/* size of a "page" */
52	unsigned int	pr_itemsperpage; /* number of items per "page" */
53	unsigned int	pr_minpages;	/* same in page units */
54	unsigned int	pr_maxpages;	/* maximum # of idle pages to keep */
55	unsigned int	pr_hardlimit;	/* hard limit to number of allocated
56					   items */
57
58	unsigned int	pr_npages;	/* # of pages allocated */
59	unsigned int	pr_nout;	/* # items currently allocated */
60	unsigned int	pr_nitems;	/* # items in the pool */
61
62	unsigned long	pr_nget;	/* # of successful requests */
63	unsigned long	pr_nput;	/* # of releases */
64	unsigned long	pr_nfail;	/* # of unsuccessful requests */
65	unsigned long	pr_npagealloc;	/* # of pages allocated */
66	unsigned long	pr_npagefree;	/* # of pages released */
67	unsigned int	pr_hiwat;	/* max # of pages in pool */
68	unsigned long	pr_nidle;	/* # of idle pages */
69};
70
71struct kinfo_pool_cache {
72	uint64_t	pr_ngc;		/* # of times a list has been gc'ed */
73	unsigned int	pr_len;		/* current target for list len */
74	unsigned int	pr_nitems;	/* # of idle items in the depot */
75	unsigned int	pr_contention;	/* # of times mtx was busy */
76};
77
78/*
79 * KERN_POOL_CACHE_CPUS provides an array, not a single struct. ie, it
80 * provides struct kinfo_pool_cache_cpu kppc[ncpusfound].
81 */
82struct kinfo_pool_cache_cpu {
83	unsigned int	pr_cpu;		/* which cpu this cache is on */
84
85	/* counters for times items were handled by the cache */
86	uint64_t	pr_nget;	/* # of requests */
87	uint64_t	pr_nfail;	/* # of unsuccessful requests */
88	uint64_t	pr_nput;	/* # of releases */
89
90	/* counters for times the cache interacted with the pool */
91	uint64_t	pr_nlget;	/* # of list requests */
92	uint64_t	pr_nlfail;	/* # of unsuccessful list requests */
93	uint64_t	pr_nlput;	/* # of list releases */
94};
95
96#if defined(_KERNEL) || defined(_LIBKVM)
97
98#include <sys/queue.h>
99#include <sys/tree.h>
100#include <sys/mutex.h>
101#include <sys/rwlock.h>
102
103struct pool;
104struct pool_request;
105struct pool_lock_ops;
106TAILQ_HEAD(pool_requests, pool_request);
107
108struct pool_allocator {
109	void		*(*pa_alloc)(struct pool *, int, int *);
110	void		 (*pa_free)(struct pool *, void *);
111	size_t		   pa_pagesz;
112};
113
114/*
115 * The pa_pagesz member encodes the sizes of pages that can be
116 * provided by the allocator, and whether the allocations can be
117 * aligned to their size.
118 *
119 * Page sizes can only be powers of two. Each available page size is
120 * represented by its value set as a bit. e.g., to indicate that an
121 * allocator can provide 16k and 32k pages you initialise pa_pagesz
122 * to (32768 | 16384).
123 *
124 * If the allocator can provide aligned pages the low bit in pa_pagesz
125 * is set. The POOL_ALLOC_ALIGNED macro is provided as a convenience.
126 *
127 * If pa_pagesz is unset (i.e. 0), POOL_ALLOC_DEFAULT will be used
128 * instead.
129 */
130
131#define POOL_ALLOC_ALIGNED		1UL
132#define POOL_ALLOC_SIZE(_sz, _a)	((_sz) | (_a))
133#define POOL_ALLOC_SIZES(_min, _max, _a) \
134	((_max) | \
135	(((_max) - 1) & ~((_min) - 1)) | (_a))
136
137#define POOL_ALLOC_DEFAULT \
138	POOL_ALLOC_SIZE(PAGE_SIZE, POOL_ALLOC_ALIGNED)
139
140TAILQ_HEAD(pool_pagelist, pool_page_header);
141
142struct pool_cache_item;
143TAILQ_HEAD(pool_cache_lists, pool_cache_item);
144struct cpumem;
145
146union pool_lock {
147	struct mutex	prl_mtx;
148	struct rwlock	prl_rwlock;
149};
150
151struct pool {
152	union pool_lock	pr_lock;
153	const struct pool_lock_ops *
154			pr_lock_ops;
155	SIMPLEQ_ENTRY(pool)
156			pr_poollist;
157	struct pool_pagelist
158			pr_emptypages;	/* Empty pages */
159	struct pool_pagelist
160			pr_fullpages;	/* Full pages */
161	struct pool_pagelist
162			pr_partpages;	/* Partially-allocated pages */
163	struct pool_page_header	*
164			pr_curpage;
165	unsigned int	pr_size;	/* Size of item */
166	unsigned int	pr_minitems;	/* minimum # of items to keep */
167	unsigned int	pr_minpages;	/* same in page units */
168	unsigned int	pr_maxpages;	/* maximum # of idle pages to keep */
169	unsigned int	pr_npages;	/* # of pages allocated */
170	unsigned int	pr_itemsperpage;/* # items that fit in a page */
171	unsigned int	pr_slack;	/* unused space in a page */
172	unsigned int	pr_nitems;	/* number of available items in pool */
173	unsigned int	pr_nout;	/* # items currently allocated */
174	unsigned int	pr_hardlimit;	/* hard limit to number of allocated
175					   items */
176	unsigned int	pr_serial;	/* unique serial number of the pool */
177	unsigned int	pr_pgsize;	/* Size of a "page" */
178	vaddr_t		pr_pgmask;	/* Mask with an item to get a page */
179	struct pool_allocator *
180			pr_alloc;	/* backend allocator */
181	const char *	pr_wchan;	/* tsleep(9) identifier */
182#define PR_WAITOK	0x0001 /* M_WAITOK */
183#define PR_NOWAIT	0x0002 /* M_NOWAIT */
184#define PR_LIMITFAIL	0x0004 /* M_CANFAIL */
185#define PR_ZERO		0x0008 /* M_ZERO */
186#define PR_RWLOCK	0x0010
187#define PR_WANTED	0x0100
188
189	int		pr_flags;
190	int		pr_ipl;
191
192	RBT_HEAD(phtree, pool_page_header)
193			pr_phtree;
194
195	struct cpumem *	pr_cache;
196	unsigned long	pr_cache_magic[2];
197	union pool_lock	pr_cache_lock;
198	struct pool_cache_lists
199			pr_cache_lists;	/* list of idle item lists */
200	u_int		pr_cache_nitems; /* # of idle items */
201	u_int		pr_cache_items;	/* target list length */
202	u_int		pr_cache_contention;
203	u_int		pr_cache_contention_prev;
204	uint64_t	pr_cache_timestamp;	/* time idle list was empty */
205	uint64_t	pr_cache_ngc;	/* # of times the gc released a list */
206	int		pr_cache_nout;
207
208	u_int		pr_align;
209	u_int		pr_maxcolors;	/* Cache coloring */
210	int		pr_phoffset;	/* Offset in page of page header */
211
212	/*
213	 * Warning message to be issued, and a per-time-delta rate cap,
214	 * if the hard limit is reached.
215	 */
216	const char	*pr_hardlimit_warning;
217	struct timeval	pr_hardlimit_ratecap;
218	struct timeval	pr_hardlimit_warning_last;
219
220	/*
221	 * pool item requests queue
222	 */
223	union pool_lock	pr_requests_lock;
224	struct pool_requests
225			pr_requests;
226	unsigned int	pr_requesting;
227
228	/*
229	 * Instrumentation
230	 */
231	unsigned long	pr_nget;	/* # of successful requests */
232	unsigned long	pr_nfail;	/* # of unsuccessful requests */
233	unsigned long	pr_nput;	/* # of releases */
234	unsigned long	pr_npagealloc;	/* # of pages allocated */
235	unsigned long	pr_npagefree;	/* # of pages released */
236	unsigned int	pr_hiwat;	/* max # of pages in pool */
237	unsigned long	pr_nidle;	/* # of idle pages */
238
239	/* Physical memory configuration. */
240	const struct kmem_pa_mode *
241			pr_crange;
242};
243
244#endif /* _KERNEL || _LIBKVM */
245
246#ifdef _KERNEL
247
248extern struct pool_allocator pool_allocator_single;
249extern struct pool_allocator pool_allocator_multi;
250
251struct pool_request {
252	TAILQ_ENTRY(pool_request) pr_entry;
253	void (*pr_handler)(struct pool *, void *, void *);
254	void *pr_cookie;
255	void *pr_item;
256};
257
258void		pool_init(struct pool *, size_t, u_int, int, int,
259		    const char *, struct pool_allocator *);
260void		pool_cache_init(struct pool *);
261void		pool_destroy(struct pool *);
262void		pool_setlowat(struct pool *, int);
263void		pool_sethiwat(struct pool *, int);
264int		pool_sethardlimit(struct pool *, u_int, const char *, int);
265struct uvm_constraint_range; /* XXX */
266void		pool_set_constraints(struct pool *,
267		    const struct kmem_pa_mode *mode);
268
269void		*pool_get(struct pool *, int) __malloc;
270void		pool_request_init(struct pool_request *,
271		    void (*)(struct pool *, void *, void *), void *);
272void		pool_request(struct pool *, struct pool_request *);
273void		pool_put(struct pool *, void *);
274void		pool_wakeup(struct pool *);
275int		pool_reclaim(struct pool *);
276void		pool_reclaim_all(void);
277int		pool_prime(struct pool *, int);
278
279#ifdef DDB
280/*
281 * Debugging and diagnostic aides.
282 */
283void		pool_printit(struct pool *, const char *,
284		    int (*)(const char *, ...));
285void		pool_walk(struct pool *, int, int (*)(const char *, ...),
286		    void (*)(void *, int, int (*)(const char *, ...)));
287#endif
288
289/* the allocator for dma-able memory is a thin layer on top of pool  */
290void		 dma_alloc_init(void);
291void		*dma_alloc(size_t size, int flags);
292void		 dma_free(void *m, size_t size);
293#endif /* _KERNEL */
294
295#endif /* _SYS_POOL_H_ */
296