1/*	$OpenBSD: mpool.c,v 1.21 2015/11/01 03:45:28 guenther Exp $	*/
2
3/*-
4 * Copyright (c) 1990, 1993, 1994
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/queue.h>
33#include <sys/stat.h>
34
35#include <errno.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <string.h>
39#include <unistd.h>
40
41#include <db.h>
42
43#define	__MPOOLINTERFACE_PRIVATE
44#include <mpool.h>
45
46static BKT *mpool_bkt(MPOOL *);
47static BKT *mpool_look(MPOOL *, pgno_t);
48static int  mpool_write(MPOOL *, BKT *);
49
50/*
51 * mpool_open --
52 *	Initialize a memory pool.
53 */
54MPOOL *
55mpool_open(void *key, int fd, pgno_t pagesize, pgno_t maxcache)
56{
57	struct stat sb;
58	MPOOL *mp;
59	int entry;
60
61	/*
62	 * Get information about the file.
63	 *
64	 * XXX
65	 * We don't currently handle pipes, although we should.
66	 */
67	if (fstat(fd, &sb))
68		return (NULL);
69	if (!S_ISREG(sb.st_mode)) {
70		errno = ESPIPE;
71		return (NULL);
72	}
73
74	/* Allocate and initialize the MPOOL cookie. */
75	if ((mp = (MPOOL *)calloc(1, sizeof(MPOOL))) == NULL)
76		return (NULL);
77	TAILQ_INIT(&mp->lqh);
78	for (entry = 0; entry < HASHSIZE; ++entry)
79		TAILQ_INIT(&mp->hqh[entry]);
80	mp->maxcache = maxcache;
81	mp->npages = sb.st_size / pagesize;
82	mp->pagesize = pagesize;
83	mp->fd = fd;
84	return (mp);
85}
86
87/*
88 * mpool_filter --
89 *	Initialize input/output filters.
90 */
91void
92mpool_filter(MPOOL *mp, void (*pgin) (void *, pgno_t, void *),
93    void (*pgout) (void *, pgno_t, void *), void *pgcookie)
94{
95	mp->pgin = pgin;
96	mp->pgout = pgout;
97	mp->pgcookie = pgcookie;
98}
99
100/*
101 * mpool_new --
102 *	Get a new page of memory.
103 */
104void *
105mpool_new(MPOOL *mp, pgno_t *pgnoaddr, u_int flags)
106{
107	struct _hqh *head;
108	BKT *bp;
109
110	if (mp->npages == MAX_PAGE_NUMBER) {
111		(void)fprintf(stderr, "mpool_new: page allocation overflow.\n");
112		abort();
113	}
114#ifdef STATISTICS
115	++mp->pagenew;
116#endif
117	/*
118	 * Get a BKT from the cache.  Assign a new page number, attach
119	 * it to the head of the hash chain, the tail of the lru chain,
120	 * and return.
121	 */
122	if ((bp = mpool_bkt(mp)) == NULL)
123		return (NULL);
124	if (flags == MPOOL_PAGE_REQUEST) {
125		mp->npages++;
126		bp->pgno = *pgnoaddr;
127	} else
128		bp->pgno = *pgnoaddr = mp->npages++;
129
130	bp->flags = MPOOL_PINNED | MPOOL_INUSE;
131
132	head = &mp->hqh[HASHKEY(bp->pgno)];
133	TAILQ_INSERT_HEAD(head, bp, hq);
134	TAILQ_INSERT_TAIL(&mp->lqh, bp, q);
135	return (bp->page);
136}
137
138int
139mpool_delete(MPOOL *mp, void *page)
140{
141	struct _hqh *head;
142	BKT *bp;
143
144	bp = (BKT *)((char *)page - sizeof(BKT));
145
146#ifdef DEBUG
147	if (!(bp->flags & MPOOL_PINNED)) {
148		(void)fprintf(stderr,
149		    "mpool_delete: page %d not pinned\n", bp->pgno);
150		abort();
151	}
152#endif
153
154	/* Remove from the hash and lru queues. */
155	head = &mp->hqh[HASHKEY(bp->pgno)];
156	TAILQ_REMOVE(head, bp, hq);
157	TAILQ_REMOVE(&mp->lqh, bp, q);
158
159	free(bp);
160	mp->curcache--;
161	return (RET_SUCCESS);
162}
163
164/*
165 * mpool_get
166 *	Get a page.
167 */
168void *
169mpool_get(MPOOL *mp, pgno_t pgno,
170    u_int flags)		/* XXX not used? */
171{
172	struct _hqh *head;
173	BKT *bp;
174	off_t off;
175	int nr;
176
177#ifdef STATISTICS
178	++mp->pageget;
179#endif
180
181	/* Check for a page that is cached. */
182	if ((bp = mpool_look(mp, pgno)) != NULL) {
183#ifdef DEBUG
184		if (!(flags & MPOOL_IGNOREPIN) && bp->flags & MPOOL_PINNED) {
185			(void)fprintf(stderr,
186			    "mpool_get: page %d already pinned\n", bp->pgno);
187			abort();
188		}
189#endif
190		/*
191		 * Move the page to the head of the hash chain and the tail
192		 * of the lru chain.
193		 */
194		head = &mp->hqh[HASHKEY(bp->pgno)];
195		TAILQ_REMOVE(head, bp, hq);
196		TAILQ_INSERT_HEAD(head, bp, hq);
197		TAILQ_REMOVE(&mp->lqh, bp, q);
198		TAILQ_INSERT_TAIL(&mp->lqh, bp, q);
199
200		/* Return a pinned page. */
201		bp->flags |= MPOOL_PINNED;
202		return (bp->page);
203	}
204
205	/* Get a page from the cache. */
206	if ((bp = mpool_bkt(mp)) == NULL)
207		return (NULL);
208
209	/* Read in the contents. */
210	off = mp->pagesize * pgno;
211	if ((nr = pread(mp->fd, bp->page, mp->pagesize, off)) != mp->pagesize) {
212		switch (nr) {
213		case -1:
214			/* errno is set for us by pread(). */
215			free(bp);
216			mp->curcache--;
217			return (NULL);
218		case 0:
219			/*
220			 * A zero-length read means you need to create a
221			 * new page.
222			 */
223			memset(bp->page, 0, mp->pagesize);
224			break;
225		default:
226			/* A partial read is definitely bad. */
227			free(bp);
228			mp->curcache--;
229			errno = EINVAL;
230			return (NULL);
231		}
232	}
233#ifdef STATISTICS
234	++mp->pageread;
235#endif
236
237	/* Set the page number, pin the page. */
238	bp->pgno = pgno;
239	if (!(flags & MPOOL_IGNOREPIN))
240		bp->flags = MPOOL_PINNED;
241	bp->flags |= MPOOL_INUSE;
242
243	/*
244	 * Add the page to the head of the hash chain and the tail
245	 * of the lru chain.
246	 */
247	head = &mp->hqh[HASHKEY(bp->pgno)];
248	TAILQ_INSERT_HEAD(head, bp, hq);
249	TAILQ_INSERT_TAIL(&mp->lqh, bp, q);
250
251	/* Run through the user's filter. */
252	if (mp->pgin != NULL)
253		(mp->pgin)(mp->pgcookie, bp->pgno, bp->page);
254
255	return (bp->page);
256}
257
258/*
259 * mpool_put
260 *	Return a page.
261 */
262int
263mpool_put(MPOOL *mp, void *page, u_int flags)
264{
265	BKT *bp;
266
267#ifdef STATISTICS
268	++mp->pageput;
269#endif
270	bp = (BKT *)((char *)page - sizeof(BKT));
271#ifdef DEBUG
272	if (!(bp->flags & MPOOL_PINNED)) {
273		(void)fprintf(stderr,
274		    "mpool_put: page %d not pinned\n", bp->pgno);
275		abort();
276	}
277#endif
278	bp->flags &= ~MPOOL_PINNED;
279	if (flags & MPOOL_DIRTY)
280		bp->flags |= flags & MPOOL_DIRTY;
281	return (RET_SUCCESS);
282}
283
284/*
285 * mpool_close
286 *	Close the buffer pool.
287 */
288int
289mpool_close(MPOOL *mp)
290{
291	BKT *bp;
292
293	/* Free up any space allocated to the lru pages. */
294	while ((bp = TAILQ_FIRST(&mp->lqh))) {
295		TAILQ_REMOVE(&mp->lqh, bp, q);
296		free(bp);
297	}
298
299	/* Free the MPOOL cookie. */
300	free(mp);
301	return (RET_SUCCESS);
302}
303
304/*
305 * mpool_sync
306 *	Sync the pool to disk.
307 */
308int
309mpool_sync(MPOOL *mp)
310{
311	BKT *bp;
312
313	/* Walk the lru chain, flushing any dirty pages to disk. */
314	TAILQ_FOREACH(bp, &mp->lqh, q)
315		if (bp->flags & MPOOL_DIRTY &&
316		    mpool_write(mp, bp) == RET_ERROR)
317			return (RET_ERROR);
318
319	/* Sync the file descriptor. */
320	return (fsync(mp->fd) ? RET_ERROR : RET_SUCCESS);
321}
322
323/*
324 * mpool_bkt
325 *	Get a page from the cache (or create one).
326 */
327static BKT *
328mpool_bkt(MPOOL *mp)
329{
330	struct _hqh *head;
331	BKT *bp;
332
333	/* If under the max cached, always create a new page. */
334	if (mp->curcache < mp->maxcache)
335		goto new;
336
337	/*
338	 * If the cache is max'd out, walk the lru list for a buffer we
339	 * can flush.  If we find one, write it (if necessary) and take it
340	 * off any lists.  If we don't find anything we grow the cache anyway.
341	 * The cache never shrinks.
342	 */
343	TAILQ_FOREACH(bp, &mp->lqh, q)
344		if (!(bp->flags & MPOOL_PINNED)) {
345			/* Flush if dirty. */
346			if (bp->flags & MPOOL_DIRTY &&
347			    mpool_write(mp, bp) == RET_ERROR)
348				return (NULL);
349#ifdef STATISTICS
350			++mp->pageflush;
351#endif
352			/* Remove from the hash and lru queues. */
353			head = &mp->hqh[HASHKEY(bp->pgno)];
354			TAILQ_REMOVE(head, bp, hq);
355			TAILQ_REMOVE(&mp->lqh, bp, q);
356#ifdef DEBUG
357			{ void *spage;
358				spage = bp->page;
359				memset(bp, 0xff, sizeof(BKT) + mp->pagesize);
360				bp->page = spage;
361			}
362#endif
363			bp->flags = 0;
364			return (bp);
365		}
366
367new:	if ((bp = (BKT *)malloc(sizeof(BKT) + mp->pagesize)) == NULL)
368		return (NULL);
369#ifdef STATISTICS
370	++mp->pagealloc;
371#endif
372	memset(bp, 0xff, sizeof(BKT) + mp->pagesize);
373	bp->page = (char *)bp + sizeof(BKT);
374	bp->flags = 0;
375	++mp->curcache;
376	return (bp);
377}
378
379/*
380 * mpool_write
381 *	Write a page to disk.
382 */
383static int
384mpool_write(MPOOL *mp, BKT *bp)
385{
386	off_t off;
387
388#ifdef STATISTICS
389	++mp->pagewrite;
390#endif
391
392	/* Run through the user's filter. */
393	if (mp->pgout)
394		(mp->pgout)(mp->pgcookie, bp->pgno, bp->page);
395
396	off = mp->pagesize * bp->pgno;
397	if (pwrite(mp->fd, bp->page, mp->pagesize, off) != mp->pagesize)
398		return (RET_ERROR);
399
400	/*
401	 * Re-run through the input filter since this page may soon be
402	 * accessed via the cache, and whatever the user's output filter
403	 * did may screw things up if we don't let the input filter
404	 * restore the in-core copy.
405	 */
406	if (mp->pgin)
407		(mp->pgin)(mp->pgcookie, bp->pgno, bp->page);
408
409	bp->flags &= ~MPOOL_DIRTY;
410	return (RET_SUCCESS);
411}
412
413/*
414 * mpool_look
415 *	Lookup a page in the cache.
416 */
417static BKT *
418mpool_look(MPOOL *mp, pgno_t pgno)
419{
420	struct _hqh *head;
421	BKT *bp;
422
423	head = &mp->hqh[HASHKEY(pgno)];
424	TAILQ_FOREACH(bp, head, hq)
425		if ((bp->pgno == pgno) &&
426			((bp->flags & MPOOL_INUSE) == MPOOL_INUSE)) {
427#ifdef STATISTICS
428			++mp->cachehit;
429#endif
430			return (bp);
431		}
432#ifdef STATISTICS
433	++mp->cachemiss;
434#endif
435	return (NULL);
436}
437
438#ifdef STATISTICS
439/*
440 * mpool_stat
441 *	Print out cache statistics.
442 */
443void
444mpool_stat(MPOOL *mp)
445{
446	BKT *bp;
447	int cnt;
448	char *sep;
449
450	(void)fprintf(stderr, "%lu pages in the file\n", mp->npages);
451	(void)fprintf(stderr,
452	    "page size %lu, cacheing %lu pages of %lu page max cache\n",
453	    mp->pagesize, mp->curcache, mp->maxcache);
454	(void)fprintf(stderr, "%lu page puts, %lu page gets, %lu page new\n",
455	    mp->pageput, mp->pageget, mp->pagenew);
456	(void)fprintf(stderr, "%lu page allocs, %lu page flushes\n",
457	    mp->pagealloc, mp->pageflush);
458	if (mp->cachehit + mp->cachemiss)
459		(void)fprintf(stderr,
460		    "%.0f%% cache hit rate (%lu hits, %lu misses)\n",
461		    ((double)mp->cachehit / (mp->cachehit + mp->cachemiss))
462		    * 100, mp->cachehit, mp->cachemiss);
463	(void)fprintf(stderr, "%lu page reads, %lu page writes\n",
464	    mp->pageread, mp->pagewrite);
465
466	sep = "";
467	cnt = 0;
468	TAILQ_FOREACH(bp, &mp->lqh, q) {
469		(void)fprintf(stderr, "%s%d", sep, bp->pgno);
470		if (bp->flags & MPOOL_DIRTY)
471			(void)fprintf(stderr, "d");
472		if (bp->flags & MPOOL_PINNED)
473			(void)fprintf(stderr, "P");
474		if (++cnt == 10) {
475			sep = "\n";
476			cnt = 0;
477		} else
478			sep = ", ";
479
480	}
481	(void)fprintf(stderr, "\n");
482}
483#endif
484