1/*	$NetBSD: tmpfs_mem.c,v 1.3 2011/05/19 03:21:23 rmind Exp $	*/
2
3/*
4 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * tmpfs memory allocation routines.
34 * Implements memory usage accounting and limiting.
35 */
36
37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: tmpfs_mem.c,v 1.3 2011/05/19 03:21:23 rmind Exp $");
39
40#include <sys/param.h>
41#include <sys/atomic.h>
42#include <sys/kmem.h>
43#include <sys/namei.h>
44#include <sys/pool.h>
45
46#include <fs/tmpfs/tmpfs.h>
47
48extern struct pool	tmpfs_dirent_pool;
49extern struct pool	tmpfs_node_pool;
50
51void
52tmpfs_mntmem_init(struct tmpfs_mount *mp, uint64_t memlimit)
53{
54
55	mutex_init(&mp->tm_acc_lock, MUTEX_DEFAULT, IPL_NONE);
56	mp->tm_mem_limit = memlimit;
57	mp->tm_bytes_used = 0;
58}
59
60void
61tmpfs_mntmem_destroy(struct tmpfs_mount *mp)
62{
63
64	KASSERT(mp->tm_bytes_used == 0);
65	mutex_destroy(&mp->tm_acc_lock);
66}
67
68/*
69 * tmpfs_mem_info: return the number of available memory pages.
70 *
71 * => If 'total' is true, then return _total_ amount of pages.
72 * => If false, then return the amount of _free_ memory pages.
73 *
74 * Remember to remove TMPFS_PAGES_RESERVED from the returned value to avoid
75 * excessive memory usage.
76 */
77size_t
78tmpfs_mem_info(bool total)
79{
80	size_t size = 0;
81
82	/* XXX: unlocked */
83	size += uvmexp.swpgavail;
84	if (!total) {
85		size -= uvmexp.swpgonly;
86	}
87	size += uvmexp.free;
88	size += uvmexp.filepages;
89	if (size > uvmexp.wired) {
90		size -= uvmexp.wired;
91	} else {
92		size = 0;
93	}
94	return size;
95}
96
97uint64_t
98tmpfs_bytes_max(struct tmpfs_mount *mp)
99{
100	size_t freepages = tmpfs_mem_info(false);
101	uint64_t avail_mem;
102
103	if (freepages < TMPFS_PAGES_RESERVED) {
104		freepages = 0;
105	} else {
106		freepages -= TMPFS_PAGES_RESERVED;
107	}
108	avail_mem = round_page(mp->tm_bytes_used) + (freepages << PAGE_SHIFT);
109	return MIN(mp->tm_mem_limit, avail_mem);
110}
111
112size_t
113tmpfs_pages_avail(struct tmpfs_mount *mp)
114{
115
116	return (tmpfs_bytes_max(mp) - mp->tm_bytes_used) >> PAGE_SHIFT;
117}
118
119bool
120tmpfs_mem_incr(struct tmpfs_mount *mp, size_t sz)
121{
122	uint64_t lim;
123
124	mutex_enter(&mp->tm_acc_lock);
125	lim = tmpfs_bytes_max(mp);
126	if (mp->tm_bytes_used + sz >= lim) {
127		mutex_exit(&mp->tm_acc_lock);
128		return false;
129	}
130	mp->tm_bytes_used += sz;
131	mutex_exit(&mp->tm_acc_lock);
132	return true;
133}
134
135void
136tmpfs_mem_decr(struct tmpfs_mount *mp, size_t sz)
137{
138
139	mutex_enter(&mp->tm_acc_lock);
140	KASSERT(mp->tm_bytes_used >= sz);
141	mp->tm_bytes_used -= sz;
142	mutex_exit(&mp->tm_acc_lock);
143}
144
145struct tmpfs_dirent *
146tmpfs_dirent_get(struct tmpfs_mount *mp)
147{
148
149	if (!tmpfs_mem_incr(mp, sizeof(struct tmpfs_dirent))) {
150		return NULL;
151	}
152	return pool_get(&tmpfs_dirent_pool, PR_WAITOK);
153}
154
155void
156tmpfs_dirent_put(struct tmpfs_mount *mp, struct tmpfs_dirent *de)
157{
158
159	tmpfs_mem_decr(mp, sizeof(struct tmpfs_dirent));
160	pool_put(&tmpfs_dirent_pool, de);
161}
162
163struct tmpfs_node *
164tmpfs_node_get(struct tmpfs_mount *mp)
165{
166
167	if (atomic_inc_uint_nv(&mp->tm_nodes_cnt) >= mp->tm_nodes_max) {
168		atomic_dec_uint(&mp->tm_nodes_cnt);
169		return NULL;
170	}
171	if (!tmpfs_mem_incr(mp, sizeof(struct tmpfs_node))) {
172		return NULL;
173	}
174	return pool_get(&tmpfs_node_pool, PR_WAITOK);
175}
176
177void
178tmpfs_node_put(struct tmpfs_mount *mp, struct tmpfs_node *tn)
179{
180
181	atomic_dec_uint(&mp->tm_nodes_cnt);
182	tmpfs_mem_decr(mp, sizeof(struct tmpfs_node));
183	pool_put(&tmpfs_node_pool, tn);
184}
185
186/*
187 * Quantum size to round-up the tmpfs names in order to reduce re-allocations.
188 */
189
190#define	TMPFS_NAME_QUANTUM	(32)
191
192char *
193tmpfs_strname_alloc(struct tmpfs_mount *mp, size_t len)
194{
195	const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM);
196
197	KASSERT(sz > 0 && sz <= 1024);
198	if (!tmpfs_mem_incr(mp, sz)) {
199		return NULL;
200	}
201	return kmem_alloc(sz, KM_SLEEP);
202}
203
204void
205tmpfs_strname_free(struct tmpfs_mount *mp, char *str, size_t len)
206{
207	const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM);
208
209	KASSERT(sz > 0 && sz <= 1024);
210	tmpfs_mem_decr(mp, sz);
211	kmem_free(str, sz);
212}
213
214bool
215tmpfs_strname_neqlen(struct componentname *fcnp, struct componentname *tcnp)
216{
217	const size_t fln = roundup2(fcnp->cn_namelen, TMPFS_NAME_QUANTUM);
218	const size_t tln = roundup2(tcnp->cn_namelen, TMPFS_NAME_QUANTUM);
219
220	return (fln != tln) || memcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fln);
221}
222