Deleted Added
full compact
ffs_alloc.c (202125) ffs_alloc.c (203763)
1/*-
2 * Copyright (c) 2002 Networks Associates Technology, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9 * research program
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
60 */
61
62#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002 Networks Associates Technology, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9 * research program
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
60 */
61
62#include <sys/cdefs.h>
63__FBSDID("$FreeBSD: head/sys/ufs/ffs/ffs_alloc.c 202125 2010-01-11 22:42:06Z mckusick $");
63__FBSDID("$FreeBSD: head/sys/ufs/ffs/ffs_alloc.c 203763 2010-02-10 20:10:35Z mckusick $");
64
65#include "opt_quota.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/bio.h>
70#include <sys/buf.h>
71#include <sys/conf.h>
72#include <sys/fcntl.h>
73#include <sys/file.h>
74#include <sys/filedesc.h>
75#include <sys/priv.h>
76#include <sys/proc.h>
77#include <sys/vnode.h>
78#include <sys/mount.h>
79#include <sys/kernel.h>
80#include <sys/syscallsubr.h>
81#include <sys/sysctl.h>
82#include <sys/syslog.h>
83
84#include <security/audit/audit.h>
85
86#include <ufs/ufs/dir.h>
87#include <ufs/ufs/extattr.h>
88#include <ufs/ufs/quota.h>
89#include <ufs/ufs/inode.h>
90#include <ufs/ufs/ufs_extern.h>
91#include <ufs/ufs/ufsmount.h>
92
93#include <ufs/ffs/fs.h>
94#include <ufs/ffs/ffs_extern.h>
95
64
65#include "opt_quota.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/bio.h>
70#include <sys/buf.h>
71#include <sys/conf.h>
72#include <sys/fcntl.h>
73#include <sys/file.h>
74#include <sys/filedesc.h>
75#include <sys/priv.h>
76#include <sys/proc.h>
77#include <sys/vnode.h>
78#include <sys/mount.h>
79#include <sys/kernel.h>
80#include <sys/syscallsubr.h>
81#include <sys/sysctl.h>
82#include <sys/syslog.h>
83
84#include <security/audit/audit.h>
85
86#include <ufs/ufs/dir.h>
87#include <ufs/ufs/extattr.h>
88#include <ufs/ufs/quota.h>
89#include <ufs/ufs/inode.h>
90#include <ufs/ufs/ufs_extern.h>
91#include <ufs/ufs/ufsmount.h>
92
93#include <ufs/ffs/fs.h>
94#include <ufs/ffs/ffs_extern.h>
95
96typedef ufs2_daddr_t allocfcn_t(struct inode *ip, int cg, ufs2_daddr_t bpref,
96typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
97 int size);
98
97 int size);
98
99static ufs2_daddr_t ffs_alloccg(struct inode *, int, ufs2_daddr_t, int);
99static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int);
100static ufs2_daddr_t
101 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t);
102#ifdef INVARIANTS
103static int ffs_checkblk(struct inode *, ufs2_daddr_t, long);
104#endif
100static ufs2_daddr_t
101 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t);
102#ifdef INVARIANTS
103static int ffs_checkblk(struct inode *, ufs2_daddr_t, long);
104#endif
105static ufs2_daddr_t ffs_clusteralloc(struct inode *, int, ufs2_daddr_t, int);
105static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int);
106static void ffs_clusteracct(struct ufsmount *, struct fs *, struct cg *,
107 ufs1_daddr_t, int);
108static ino_t ffs_dirpref(struct inode *);
106static void ffs_clusteracct(struct ufsmount *, struct fs *, struct cg *,
107 ufs1_daddr_t, int);
108static ino_t ffs_dirpref(struct inode *);
109static ufs2_daddr_t ffs_fragextend(struct inode *, int, ufs2_daddr_t, int, int);
109static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
110 int, int);
110static void ffs_fserr(struct fs *, ino_t, char *);
111static ufs2_daddr_t ffs_hashalloc
111static void ffs_fserr(struct fs *, ino_t, char *);
112static ufs2_daddr_t ffs_hashalloc
112 (struct inode *, int, ufs2_daddr_t, int, allocfcn_t *);
113static ufs2_daddr_t ffs_nodealloccg(struct inode *, int, ufs2_daddr_t, int);
113 (struct inode *, u_int, ufs2_daddr_t, int, allocfcn_t *);
114static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int);
114static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
115static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
116static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
117
118/*
119 * Allocate a block in the filesystem.
120 *
121 * The size of the requested block is given, which must be some
122 * multiple of fs_fsize and <= fs_bsize.
123 * A preference may be optionally specified. If a preference is given
124 * the following hierarchy is used to allocate a block:
125 * 1) allocate the requested block.
126 * 2) allocate a rotationally optimal block in the same cylinder.
127 * 3) allocate a block in the same cylinder group.
128 * 4) quadradically rehash into other cylinder groups, until an
129 * available block is located.
130 * If no block preference is given the following hierarchy is used
131 * to allocate a block:
132 * 1) allocate a block in the cylinder group that contains the
133 * inode for the file.
134 * 2) quadradically rehash into other cylinder groups, until an
135 * available block is located.
136 */
137int
138ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
139 struct inode *ip;
140 ufs2_daddr_t lbn, bpref;
141 int size, flags;
142 struct ucred *cred;
143 ufs2_daddr_t *bnp;
144{
145 struct fs *fs;
146 struct ufsmount *ump;
147 ufs2_daddr_t bno;
115static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
116static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
117static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
118
119/*
120 * Allocate a block in the filesystem.
121 *
122 * The size of the requested block is given, which must be some
123 * multiple of fs_fsize and <= fs_bsize.
124 * A preference may be optionally specified. If a preference is given
125 * the following hierarchy is used to allocate a block:
126 * 1) allocate the requested block.
127 * 2) allocate a rotationally optimal block in the same cylinder.
128 * 3) allocate a block in the same cylinder group.
129 * 4) quadradically rehash into other cylinder groups, until an
130 * available block is located.
131 * If no block preference is given the following hierarchy is used
132 * to allocate a block:
133 * 1) allocate a block in the cylinder group that contains the
134 * inode for the file.
135 * 2) quadradically rehash into other cylinder groups, until an
136 * available block is located.
137 */
138int
139ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
140 struct inode *ip;
141 ufs2_daddr_t lbn, bpref;
142 int size, flags;
143 struct ucred *cred;
144 ufs2_daddr_t *bnp;
145{
146 struct fs *fs;
147 struct ufsmount *ump;
148 ufs2_daddr_t bno;
148 int cg, reclaimed;
149 u_int cg, reclaimed;
149 static struct timeval lastfail;
150 static int curfail;
151 int64_t delta;
152#ifdef QUOTA
153 int error;
154#endif
155
156 *bnp = 0;
157 fs = ip->i_fs;
158 ump = ip->i_ump;
159 mtx_assert(UFS_MTX(ump), MA_OWNED);
160#ifdef INVARIANTS
161 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
162 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
163 devtoname(ip->i_dev), (long)fs->fs_bsize, size,
164 fs->fs_fsmnt);
165 panic("ffs_alloc: bad size");
166 }
167 if (cred == NOCRED)
168 panic("ffs_alloc: missing credential");
169#endif /* INVARIANTS */
170 reclaimed = 0;
171retry:
172#ifdef QUOTA
173 UFS_UNLOCK(ump);
174 error = chkdq(ip, btodb(size), cred, 0);
175 if (error)
176 return (error);
177 UFS_LOCK(ump);
178#endif
179 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
180 goto nospace;
181 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
182 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
183 goto nospace;
184 if (bpref >= fs->fs_size)
185 bpref = 0;
186 if (bpref == 0)
187 cg = ino_to_cg(fs, ip->i_number);
188 else
189 cg = dtog(fs, bpref);
190 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
191 if (bno > 0) {
192 delta = btodb(size);
193 if (ip->i_flag & IN_SPACECOUNTED) {
194 UFS_LOCK(ump);
195 fs->fs_pendingblocks += delta;
196 UFS_UNLOCK(ump);
197 }
198 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
199 if (flags & IO_EXT)
200 ip->i_flag |= IN_CHANGE;
201 else
202 ip->i_flag |= IN_CHANGE | IN_UPDATE;
203 *bnp = bno;
204 return (0);
205 }
206nospace:
207#ifdef QUOTA
208 UFS_UNLOCK(ump);
209 /*
210 * Restore user's disk quota because allocation failed.
211 */
212 (void) chkdq(ip, -btodb(size), cred, FORCE);
213 UFS_LOCK(ump);
214#endif
215 if (fs->fs_pendingblocks > 0 && reclaimed == 0) {
216 reclaimed = 1;
217 softdep_request_cleanup(fs, ITOV(ip));
218 goto retry;
219 }
220 UFS_UNLOCK(ump);
221 if (ppsratecheck(&lastfail, &curfail, 1)) {
222 ffs_fserr(fs, ip->i_number, "filesystem full");
223 uprintf("\n%s: write failed, filesystem is full\n",
224 fs->fs_fsmnt);
225 }
226 return (ENOSPC);
227}
228
229/*
230 * Reallocate a fragment to a bigger size
231 *
232 * The number and size of the old block is given, and a preference
233 * and new size is also specified. The allocator attempts to extend
234 * the original block. Failing that, the regular block allocator is
235 * invoked to get an appropriate block.
236 */
237int
238ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
239 struct inode *ip;
240 ufs2_daddr_t lbprev;
241 ufs2_daddr_t bprev;
242 ufs2_daddr_t bpref;
243 int osize, nsize, flags;
244 struct ucred *cred;
245 struct buf **bpp;
246{
247 struct vnode *vp;
248 struct fs *fs;
249 struct buf *bp;
250 struct ufsmount *ump;
150 static struct timeval lastfail;
151 static int curfail;
152 int64_t delta;
153#ifdef QUOTA
154 int error;
155#endif
156
157 *bnp = 0;
158 fs = ip->i_fs;
159 ump = ip->i_ump;
160 mtx_assert(UFS_MTX(ump), MA_OWNED);
161#ifdef INVARIANTS
162 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
163 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
164 devtoname(ip->i_dev), (long)fs->fs_bsize, size,
165 fs->fs_fsmnt);
166 panic("ffs_alloc: bad size");
167 }
168 if (cred == NOCRED)
169 panic("ffs_alloc: missing credential");
170#endif /* INVARIANTS */
171 reclaimed = 0;
172retry:
173#ifdef QUOTA
174 UFS_UNLOCK(ump);
175 error = chkdq(ip, btodb(size), cred, 0);
176 if (error)
177 return (error);
178 UFS_LOCK(ump);
179#endif
180 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
181 goto nospace;
182 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
183 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
184 goto nospace;
185 if (bpref >= fs->fs_size)
186 bpref = 0;
187 if (bpref == 0)
188 cg = ino_to_cg(fs, ip->i_number);
189 else
190 cg = dtog(fs, bpref);
191 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
192 if (bno > 0) {
193 delta = btodb(size);
194 if (ip->i_flag & IN_SPACECOUNTED) {
195 UFS_LOCK(ump);
196 fs->fs_pendingblocks += delta;
197 UFS_UNLOCK(ump);
198 }
199 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
200 if (flags & IO_EXT)
201 ip->i_flag |= IN_CHANGE;
202 else
203 ip->i_flag |= IN_CHANGE | IN_UPDATE;
204 *bnp = bno;
205 return (0);
206 }
207nospace:
208#ifdef QUOTA
209 UFS_UNLOCK(ump);
210 /*
211 * Restore user's disk quota because allocation failed.
212 */
213 (void) chkdq(ip, -btodb(size), cred, FORCE);
214 UFS_LOCK(ump);
215#endif
216 if (fs->fs_pendingblocks > 0 && reclaimed == 0) {
217 reclaimed = 1;
218 softdep_request_cleanup(fs, ITOV(ip));
219 goto retry;
220 }
221 UFS_UNLOCK(ump);
222 if (ppsratecheck(&lastfail, &curfail, 1)) {
223 ffs_fserr(fs, ip->i_number, "filesystem full");
224 uprintf("\n%s: write failed, filesystem is full\n",
225 fs->fs_fsmnt);
226 }
227 return (ENOSPC);
228}
229
230/*
231 * Reallocate a fragment to a bigger size
232 *
233 * The number and size of the old block is given, and a preference
234 * and new size is also specified. The allocator attempts to extend
235 * the original block. Failing that, the regular block allocator is
236 * invoked to get an appropriate block.
237 */
238int
239ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
240 struct inode *ip;
241 ufs2_daddr_t lbprev;
242 ufs2_daddr_t bprev;
243 ufs2_daddr_t bpref;
244 int osize, nsize, flags;
245 struct ucred *cred;
246 struct buf **bpp;
247{
248 struct vnode *vp;
249 struct fs *fs;
250 struct buf *bp;
251 struct ufsmount *ump;
251 int cg, request, error, reclaimed;
252 u_int cg, request, reclaimed;
253 int error;
252 ufs2_daddr_t bno;
253 static struct timeval lastfail;
254 static int curfail;
255 int64_t delta;
256
257 *bpp = 0;
258 vp = ITOV(ip);
259 fs = ip->i_fs;
260 bp = NULL;
261 ump = ip->i_ump;
262 mtx_assert(UFS_MTX(ump), MA_OWNED);
263#ifdef INVARIANTS
264 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
265 panic("ffs_realloccg: allocation on suspended filesystem");
266 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
267 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
268 printf(
269 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
270 devtoname(ip->i_dev), (long)fs->fs_bsize, osize,
271 nsize, fs->fs_fsmnt);
272 panic("ffs_realloccg: bad size");
273 }
274 if (cred == NOCRED)
275 panic("ffs_realloccg: missing credential");
276#endif /* INVARIANTS */
277 reclaimed = 0;
278retry:
279 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
280 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) {
281 goto nospace;
282 }
283 if (bprev == 0) {
284 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
285 devtoname(ip->i_dev), (long)fs->fs_bsize, (intmax_t)bprev,
286 fs->fs_fsmnt);
287 panic("ffs_realloccg: bad bprev");
288 }
289 UFS_UNLOCK(ump);
290 /*
291 * Allocate the extra space in the buffer.
292 */
293 error = bread(vp, lbprev, osize, NOCRED, &bp);
294 if (error) {
295 brelse(bp);
296 return (error);
297 }
298
299 if (bp->b_blkno == bp->b_lblkno) {
300 if (lbprev >= NDADDR)
301 panic("ffs_realloccg: lbprev out of range");
302 bp->b_blkno = fsbtodb(fs, bprev);
303 }
304
305#ifdef QUOTA
306 error = chkdq(ip, btodb(nsize - osize), cred, 0);
307 if (error) {
308 brelse(bp);
309 return (error);
310 }
311#endif
312 /*
313 * Check for extension in the existing location.
314 */
315 cg = dtog(fs, bprev);
316 UFS_LOCK(ump);
317 bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
318 if (bno) {
319 if (bp->b_blkno != fsbtodb(fs, bno))
320 panic("ffs_realloccg: bad blockno");
321 delta = btodb(nsize - osize);
322 if (ip->i_flag & IN_SPACECOUNTED) {
323 UFS_LOCK(ump);
324 fs->fs_pendingblocks += delta;
325 UFS_UNLOCK(ump);
326 }
327 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
328 if (flags & IO_EXT)
329 ip->i_flag |= IN_CHANGE;
330 else
331 ip->i_flag |= IN_CHANGE | IN_UPDATE;
332 allocbuf(bp, nsize);
333 bp->b_flags |= B_DONE;
334 bzero(bp->b_data + osize, nsize - osize);
335 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
336 vfs_bio_set_valid(bp, osize, nsize - osize);
337 *bpp = bp;
338 return (0);
339 }
340 /*
341 * Allocate a new disk location.
342 */
343 if (bpref >= fs->fs_size)
344 bpref = 0;
345 switch ((int)fs->fs_optim) {
346 case FS_OPTSPACE:
347 /*
348 * Allocate an exact sized fragment. Although this makes
349 * best use of space, we will waste time relocating it if
350 * the file continues to grow. If the fragmentation is
351 * less than half of the minimum free reserve, we choose
352 * to begin optimizing for time.
353 */
354 request = nsize;
355 if (fs->fs_minfree <= 5 ||
356 fs->fs_cstotal.cs_nffree >
357 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
358 break;
359 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
360 fs->fs_fsmnt);
361 fs->fs_optim = FS_OPTTIME;
362 break;
363 case FS_OPTTIME:
364 /*
365 * At this point we have discovered a file that is trying to
366 * grow a small fragment to a larger fragment. To save time,
367 * we allocate a full sized block, then free the unused portion.
368 * If the file continues to grow, the `ffs_fragextend' call
369 * above will be able to grow it in place without further
370 * copying. If aberrant programs cause disk fragmentation to
371 * grow within 2% of the free reserve, we choose to begin
372 * optimizing for space.
373 */
374 request = fs->fs_bsize;
375 if (fs->fs_cstotal.cs_nffree <
376 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
377 break;
378 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
379 fs->fs_fsmnt);
380 fs->fs_optim = FS_OPTSPACE;
381 break;
382 default:
383 printf("dev = %s, optim = %ld, fs = %s\n",
384 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt);
385 panic("ffs_realloccg: bad optim");
386 /* NOTREACHED */
387 }
388 bno = ffs_hashalloc(ip, cg, bpref, request, ffs_alloccg);
389 if (bno > 0) {
390 bp->b_blkno = fsbtodb(fs, bno);
391 if (!DOINGSOFTDEP(vp))
392 ffs_blkfree(ump, fs, ip->i_devvp, bprev, (long)osize,
393 ip->i_number);
394 if (nsize < request)
395 ffs_blkfree(ump, fs, ip->i_devvp,
396 bno + numfrags(fs, nsize),
397 (long)(request - nsize), ip->i_number);
398 delta = btodb(nsize - osize);
399 if (ip->i_flag & IN_SPACECOUNTED) {
400 UFS_LOCK(ump);
401 fs->fs_pendingblocks += delta;
402 UFS_UNLOCK(ump);
403 }
404 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
405 if (flags & IO_EXT)
406 ip->i_flag |= IN_CHANGE;
407 else
408 ip->i_flag |= IN_CHANGE | IN_UPDATE;
409 allocbuf(bp, nsize);
410 bp->b_flags |= B_DONE;
411 bzero(bp->b_data + osize, nsize - osize);
412 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
413 vfs_bio_set_valid(bp, osize, nsize - osize);
414 *bpp = bp;
415 return (0);
416 }
417#ifdef QUOTA
418 UFS_UNLOCK(ump);
419 /*
420 * Restore user's disk quota because allocation failed.
421 */
422 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
423 UFS_LOCK(ump);
424#endif
425nospace:
426 /*
427 * no space available
428 */
429 if (fs->fs_pendingblocks > 0 && reclaimed == 0) {
430 reclaimed = 1;
431 softdep_request_cleanup(fs, vp);
432 UFS_UNLOCK(ump);
433 if (bp)
434 brelse(bp);
435 UFS_LOCK(ump);
436 goto retry;
437 }
438 UFS_UNLOCK(ump);
439 if (bp)
440 brelse(bp);
441 if (ppsratecheck(&lastfail, &curfail, 1)) {
442 ffs_fserr(fs, ip->i_number, "filesystem full");
443 uprintf("\n%s: write failed, filesystem is full\n",
444 fs->fs_fsmnt);
445 }
446 return (ENOSPC);
447}
448
449/*
450 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
451 *
452 * The vnode and an array of buffer pointers for a range of sequential
453 * logical blocks to be made contiguous is given. The allocator attempts
454 * to find a range of sequential blocks starting as close as possible
455 * from the end of the allocation for the logical block immediately
456 * preceding the current range. If successful, the physical block numbers
457 * in the buffer pointers and in the inode are changed to reflect the new
458 * allocation. If unsuccessful, the allocation is left unchanged. The
459 * success in doing the reallocation is returned. Note that the error
460 * return is not reflected back to the user. Rather the previous block
461 * allocation will be used.
462 */
463
464SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
465
466static int doasyncfree = 1;
467SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, "");
468
469static int doreallocblks = 1;
470SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
471
472#ifdef DEBUG
473static volatile int prtrealloc = 0;
474#endif
475
476int
477ffs_reallocblks(ap)
478 struct vop_reallocblks_args /* {
479 struct vnode *a_vp;
480 struct cluster_save *a_buflist;
481 } */ *ap;
482{
483
484 if (doreallocblks == 0)
485 return (ENOSPC);
486 if (VTOI(ap->a_vp)->i_ump->um_fstype == UFS1)
487 return (ffs_reallocblks_ufs1(ap));
488 return (ffs_reallocblks_ufs2(ap));
489}
490
491static int
492ffs_reallocblks_ufs1(ap)
493 struct vop_reallocblks_args /* {
494 struct vnode *a_vp;
495 struct cluster_save *a_buflist;
496 } */ *ap;
497{
498 struct fs *fs;
499 struct inode *ip;
500 struct vnode *vp;
501 struct buf *sbp, *ebp;
502 ufs1_daddr_t *bap, *sbap, *ebap = 0;
503 struct cluster_save *buflist;
504 struct ufsmount *ump;
505 ufs_lbn_t start_lbn, end_lbn;
506 ufs1_daddr_t soff, newblk, blkno;
507 ufs2_daddr_t pref;
508 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
509 int i, len, start_lvl, end_lvl, ssize;
510
511 vp = ap->a_vp;
512 ip = VTOI(vp);
513 fs = ip->i_fs;
514 ump = ip->i_ump;
515 if (fs->fs_contigsumsize <= 0)
516 return (ENOSPC);
517 buflist = ap->a_buflist;
518 len = buflist->bs_nchildren;
519 start_lbn = buflist->bs_children[0]->b_lblkno;
520 end_lbn = start_lbn + len - 1;
521#ifdef INVARIANTS
522 for (i = 0; i < len; i++)
523 if (!ffs_checkblk(ip,
524 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
525 panic("ffs_reallocblks: unallocated block 1");
526 for (i = 1; i < len; i++)
527 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
528 panic("ffs_reallocblks: non-logical cluster");
529 blkno = buflist->bs_children[0]->b_blkno;
530 ssize = fsbtodb(fs, fs->fs_frag);
531 for (i = 1; i < len - 1; i++)
532 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
533 panic("ffs_reallocblks: non-physical cluster %d", i);
534#endif
535 /*
536 * If the latest allocation is in a new cylinder group, assume that
537 * the filesystem has decided to move and do not force it back to
538 * the previous cylinder group.
539 */
540 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
541 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
542 return (ENOSPC);
543 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
544 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
545 return (ENOSPC);
546 /*
547 * Get the starting offset and block map for the first block.
548 */
549 if (start_lvl == 0) {
550 sbap = &ip->i_din1->di_db[0];
551 soff = start_lbn;
552 } else {
553 idp = &start_ap[start_lvl - 1];
554 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
555 brelse(sbp);
556 return (ENOSPC);
557 }
558 sbap = (ufs1_daddr_t *)sbp->b_data;
559 soff = idp->in_off;
560 }
561 /*
562 * If the block range spans two block maps, get the second map.
563 */
564 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
565 ssize = len;
566 } else {
567#ifdef INVARIANTS
568 if (start_lvl > 0 &&
569 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
570 panic("ffs_reallocblk: start == end");
571#endif
572 ssize = len - (idp->in_off + 1);
573 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
574 goto fail;
575 ebap = (ufs1_daddr_t *)ebp->b_data;
576 }
577 /*
578 * Find the preferred location for the cluster.
579 */
580 UFS_LOCK(ump);
581 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
582 /*
583 * Search the block map looking for an allocation of the desired size.
584 */
585 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref,
586 len, ffs_clusteralloc)) == 0) {
587 UFS_UNLOCK(ump);
588 goto fail;
589 }
590 /*
591 * We have found a new contiguous block.
592 *
593 * First we have to replace the old block pointers with the new
594 * block pointers in the inode and indirect blocks associated
595 * with the file.
596 */
597#ifdef DEBUG
598 if (prtrealloc)
599 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number,
600 (intmax_t)start_lbn, (intmax_t)end_lbn);
601#endif
602 blkno = newblk;
603 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
604 if (i == ssize) {
605 bap = ebap;
606 soff = -i;
607 }
608#ifdef INVARIANTS
609 if (!ffs_checkblk(ip,
610 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
611 panic("ffs_reallocblks: unallocated block 2");
612 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
613 panic("ffs_reallocblks: alloc mismatch");
614#endif
615#ifdef DEBUG
616 if (prtrealloc)
617 printf(" %d,", *bap);
618#endif
619 if (DOINGSOFTDEP(vp)) {
620 if (sbap == &ip->i_din1->di_db[0] && i < ssize)
621 softdep_setup_allocdirect(ip, start_lbn + i,
622 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
623 buflist->bs_children[i]);
624 else
625 softdep_setup_allocindir_page(ip, start_lbn + i,
626 i < ssize ? sbp : ebp, soff + i, blkno,
627 *bap, buflist->bs_children[i]);
628 }
629 *bap++ = blkno;
630 }
631 /*
632 * Next we must write out the modified inode and indirect blocks.
633 * For strict correctness, the writes should be synchronous since
634 * the old block values may have been written to disk. In practise
635 * they are almost never written, but if we are concerned about
636 * strict correctness, the `doasyncfree' flag should be set to zero.
637 *
638 * The test on `doasyncfree' should be changed to test a flag
639 * that shows whether the associated buffers and inodes have
640 * been written. The flag should be set when the cluster is
641 * started and cleared whenever the buffer or inode is flushed.
642 * We can then check below to see if it is set, and do the
643 * synchronous write only when it has been cleared.
644 */
645 if (sbap != &ip->i_din1->di_db[0]) {
646 if (doasyncfree)
647 bdwrite(sbp);
648 else
649 bwrite(sbp);
650 } else {
651 ip->i_flag |= IN_CHANGE | IN_UPDATE;
652 if (!doasyncfree)
653 ffs_update(vp, 1);
654 }
655 if (ssize < len) {
656 if (doasyncfree)
657 bdwrite(ebp);
658 else
659 bwrite(ebp);
660 }
661 /*
662 * Last, free the old blocks and assign the new blocks to the buffers.
663 */
664#ifdef DEBUG
665 if (prtrealloc)
666 printf("\n\tnew:");
667#endif
668 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
669 if (!DOINGSOFTDEP(vp))
670 ffs_blkfree(ump, fs, ip->i_devvp,
671 dbtofsb(fs, buflist->bs_children[i]->b_blkno),
672 fs->fs_bsize, ip->i_number);
673 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
674#ifdef INVARIANTS
675 if (!ffs_checkblk(ip,
676 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
677 panic("ffs_reallocblks: unallocated block 3");
678#endif
679#ifdef DEBUG
680 if (prtrealloc)
681 printf(" %d,", blkno);
682#endif
683 }
684#ifdef DEBUG
685 if (prtrealloc) {
686 prtrealloc--;
687 printf("\n");
688 }
689#endif
690 return (0);
691
692fail:
693 if (ssize < len)
694 brelse(ebp);
695 if (sbap != &ip->i_din1->di_db[0])
696 brelse(sbp);
697 return (ENOSPC);
698}
699
700static int
701ffs_reallocblks_ufs2(ap)
702 struct vop_reallocblks_args /* {
703 struct vnode *a_vp;
704 struct cluster_save *a_buflist;
705 } */ *ap;
706{
707 struct fs *fs;
708 struct inode *ip;
709 struct vnode *vp;
710 struct buf *sbp, *ebp;
711 ufs2_daddr_t *bap, *sbap, *ebap = 0;
712 struct cluster_save *buflist;
713 struct ufsmount *ump;
714 ufs_lbn_t start_lbn, end_lbn;
715 ufs2_daddr_t soff, newblk, blkno, pref;
716 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
717 int i, len, start_lvl, end_lvl, ssize;
718
719 vp = ap->a_vp;
720 ip = VTOI(vp);
721 fs = ip->i_fs;
722 ump = ip->i_ump;
723 if (fs->fs_contigsumsize <= 0)
724 return (ENOSPC);
725 buflist = ap->a_buflist;
726 len = buflist->bs_nchildren;
727 start_lbn = buflist->bs_children[0]->b_lblkno;
728 end_lbn = start_lbn + len - 1;
729#ifdef INVARIANTS
730 for (i = 0; i < len; i++)
731 if (!ffs_checkblk(ip,
732 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
733 panic("ffs_reallocblks: unallocated block 1");
734 for (i = 1; i < len; i++)
735 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
736 panic("ffs_reallocblks: non-logical cluster");
737 blkno = buflist->bs_children[0]->b_blkno;
738 ssize = fsbtodb(fs, fs->fs_frag);
739 for (i = 1; i < len - 1; i++)
740 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
741 panic("ffs_reallocblks: non-physical cluster %d", i);
742#endif
743 /*
744 * If the latest allocation is in a new cylinder group, assume that
745 * the filesystem has decided to move and do not force it back to
746 * the previous cylinder group.
747 */
748 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
749 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
750 return (ENOSPC);
751 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
752 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
753 return (ENOSPC);
754 /*
755 * Get the starting offset and block map for the first block.
756 */
757 if (start_lvl == 0) {
758 sbap = &ip->i_din2->di_db[0];
759 soff = start_lbn;
760 } else {
761 idp = &start_ap[start_lvl - 1];
762 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
763 brelse(sbp);
764 return (ENOSPC);
765 }
766 sbap = (ufs2_daddr_t *)sbp->b_data;
767 soff = idp->in_off;
768 }
769 /*
770 * If the block range spans two block maps, get the second map.
771 */
772 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
773 ssize = len;
774 } else {
775#ifdef INVARIANTS
776 if (start_lvl > 0 &&
777 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
778 panic("ffs_reallocblk: start == end");
779#endif
780 ssize = len - (idp->in_off + 1);
781 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
782 goto fail;
783 ebap = (ufs2_daddr_t *)ebp->b_data;
784 }
785 /*
786 * Find the preferred location for the cluster.
787 */
788 UFS_LOCK(ump);
789 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
790 /*
791 * Search the block map looking for an allocation of the desired size.
792 */
793 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref,
794 len, ffs_clusteralloc)) == 0) {
795 UFS_UNLOCK(ump);
796 goto fail;
797 }
798 /*
799 * We have found a new contiguous block.
800 *
801 * First we have to replace the old block pointers with the new
802 * block pointers in the inode and indirect blocks associated
803 * with the file.
804 */
805#ifdef DEBUG
806 if (prtrealloc)
807 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number,
808 (intmax_t)start_lbn, (intmax_t)end_lbn);
809#endif
810 blkno = newblk;
811 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
812 if (i == ssize) {
813 bap = ebap;
814 soff = -i;
815 }
816#ifdef INVARIANTS
817 if (!ffs_checkblk(ip,
818 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
819 panic("ffs_reallocblks: unallocated block 2");
820 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
821 panic("ffs_reallocblks: alloc mismatch");
822#endif
823#ifdef DEBUG
824 if (prtrealloc)
825 printf(" %jd,", (intmax_t)*bap);
826#endif
827 if (DOINGSOFTDEP(vp)) {
828 if (sbap == &ip->i_din2->di_db[0] && i < ssize)
829 softdep_setup_allocdirect(ip, start_lbn + i,
830 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
831 buflist->bs_children[i]);
832 else
833 softdep_setup_allocindir_page(ip, start_lbn + i,
834 i < ssize ? sbp : ebp, soff + i, blkno,
835 *bap, buflist->bs_children[i]);
836 }
837 *bap++ = blkno;
838 }
839 /*
840 * Next we must write out the modified inode and indirect blocks.
841 * For strict correctness, the writes should be synchronous since
842 * the old block values may have been written to disk. In practise
843 * they are almost never written, but if we are concerned about
844 * strict correctness, the `doasyncfree' flag should be set to zero.
845 *
846 * The test on `doasyncfree' should be changed to test a flag
847 * that shows whether the associated buffers and inodes have
848 * been written. The flag should be set when the cluster is
849 * started and cleared whenever the buffer or inode is flushed.
850 * We can then check below to see if it is set, and do the
851 * synchronous write only when it has been cleared.
852 */
853 if (sbap != &ip->i_din2->di_db[0]) {
854 if (doasyncfree)
855 bdwrite(sbp);
856 else
857 bwrite(sbp);
858 } else {
859 ip->i_flag |= IN_CHANGE | IN_UPDATE;
860 if (!doasyncfree)
861 ffs_update(vp, 1);
862 }
863 if (ssize < len) {
864 if (doasyncfree)
865 bdwrite(ebp);
866 else
867 bwrite(ebp);
868 }
869 /*
870 * Last, free the old blocks and assign the new blocks to the buffers.
871 */
872#ifdef DEBUG
873 if (prtrealloc)
874 printf("\n\tnew:");
875#endif
876 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
877 if (!DOINGSOFTDEP(vp))
878 ffs_blkfree(ump, fs, ip->i_devvp,
879 dbtofsb(fs, buflist->bs_children[i]->b_blkno),
880 fs->fs_bsize, ip->i_number);
881 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
882#ifdef INVARIANTS
883 if (!ffs_checkblk(ip,
884 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
885 panic("ffs_reallocblks: unallocated block 3");
886#endif
887#ifdef DEBUG
888 if (prtrealloc)
889 printf(" %jd,", (intmax_t)blkno);
890#endif
891 }
892#ifdef DEBUG
893 if (prtrealloc) {
894 prtrealloc--;
895 printf("\n");
896 }
897#endif
898 return (0);
899
900fail:
901 if (ssize < len)
902 brelse(ebp);
903 if (sbap != &ip->i_din2->di_db[0])
904 brelse(sbp);
905 return (ENOSPC);
906}
907
908/*
909 * Allocate an inode in the filesystem.
910 *
911 * If allocating a directory, use ffs_dirpref to select the inode.
912 * If allocating in a directory, the following hierarchy is followed:
913 * 1) allocate the preferred inode.
914 * 2) allocate an inode in the same cylinder group.
915 * 3) quadradically rehash into other cylinder groups, until an
916 * available inode is located.
917 * If no inode preference is given the following hierarchy is used
918 * to allocate an inode:
919 * 1) allocate an inode in cylinder group 0.
920 * 2) quadradically rehash into other cylinder groups, until an
921 * available inode is located.
922 */
923int
924ffs_valloc(pvp, mode, cred, vpp)
925 struct vnode *pvp;
926 int mode;
927 struct ucred *cred;
928 struct vnode **vpp;
929{
930 struct inode *pip;
931 struct fs *fs;
932 struct inode *ip;
933 struct timespec ts;
934 struct ufsmount *ump;
935 ino_t ino, ipref;
254 ufs2_daddr_t bno;
255 static struct timeval lastfail;
256 static int curfail;
257 int64_t delta;
258
259 *bpp = 0;
260 vp = ITOV(ip);
261 fs = ip->i_fs;
262 bp = NULL;
263 ump = ip->i_ump;
264 mtx_assert(UFS_MTX(ump), MA_OWNED);
265#ifdef INVARIANTS
266 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
267 panic("ffs_realloccg: allocation on suspended filesystem");
268 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
269 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
270 printf(
271 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
272 devtoname(ip->i_dev), (long)fs->fs_bsize, osize,
273 nsize, fs->fs_fsmnt);
274 panic("ffs_realloccg: bad size");
275 }
276 if (cred == NOCRED)
277 panic("ffs_realloccg: missing credential");
278#endif /* INVARIANTS */
279 reclaimed = 0;
280retry:
281 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
282 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) {
283 goto nospace;
284 }
285 if (bprev == 0) {
286 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
287 devtoname(ip->i_dev), (long)fs->fs_bsize, (intmax_t)bprev,
288 fs->fs_fsmnt);
289 panic("ffs_realloccg: bad bprev");
290 }
291 UFS_UNLOCK(ump);
292 /*
293 * Allocate the extra space in the buffer.
294 */
295 error = bread(vp, lbprev, osize, NOCRED, &bp);
296 if (error) {
297 brelse(bp);
298 return (error);
299 }
300
301 if (bp->b_blkno == bp->b_lblkno) {
302 if (lbprev >= NDADDR)
303 panic("ffs_realloccg: lbprev out of range");
304 bp->b_blkno = fsbtodb(fs, bprev);
305 }
306
307#ifdef QUOTA
308 error = chkdq(ip, btodb(nsize - osize), cred, 0);
309 if (error) {
310 brelse(bp);
311 return (error);
312 }
313#endif
314 /*
315 * Check for extension in the existing location.
316 */
317 cg = dtog(fs, bprev);
318 UFS_LOCK(ump);
319 bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
320 if (bno) {
321 if (bp->b_blkno != fsbtodb(fs, bno))
322 panic("ffs_realloccg: bad blockno");
323 delta = btodb(nsize - osize);
324 if (ip->i_flag & IN_SPACECOUNTED) {
325 UFS_LOCK(ump);
326 fs->fs_pendingblocks += delta;
327 UFS_UNLOCK(ump);
328 }
329 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
330 if (flags & IO_EXT)
331 ip->i_flag |= IN_CHANGE;
332 else
333 ip->i_flag |= IN_CHANGE | IN_UPDATE;
334 allocbuf(bp, nsize);
335 bp->b_flags |= B_DONE;
336 bzero(bp->b_data + osize, nsize - osize);
337 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
338 vfs_bio_set_valid(bp, osize, nsize - osize);
339 *bpp = bp;
340 return (0);
341 }
342 /*
343 * Allocate a new disk location.
344 */
345 if (bpref >= fs->fs_size)
346 bpref = 0;
347 switch ((int)fs->fs_optim) {
348 case FS_OPTSPACE:
349 /*
350 * Allocate an exact sized fragment. Although this makes
351 * best use of space, we will waste time relocating it if
352 * the file continues to grow. If the fragmentation is
353 * less than half of the minimum free reserve, we choose
354 * to begin optimizing for time.
355 */
356 request = nsize;
357 if (fs->fs_minfree <= 5 ||
358 fs->fs_cstotal.cs_nffree >
359 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
360 break;
361 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
362 fs->fs_fsmnt);
363 fs->fs_optim = FS_OPTTIME;
364 break;
365 case FS_OPTTIME:
366 /*
367 * At this point we have discovered a file that is trying to
368 * grow a small fragment to a larger fragment. To save time,
369 * we allocate a full sized block, then free the unused portion.
370 * If the file continues to grow, the `ffs_fragextend' call
371 * above will be able to grow it in place without further
372 * copying. If aberrant programs cause disk fragmentation to
373 * grow within 2% of the free reserve, we choose to begin
374 * optimizing for space.
375 */
376 request = fs->fs_bsize;
377 if (fs->fs_cstotal.cs_nffree <
378 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
379 break;
380 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
381 fs->fs_fsmnt);
382 fs->fs_optim = FS_OPTSPACE;
383 break;
384 default:
385 printf("dev = %s, optim = %ld, fs = %s\n",
386 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt);
387 panic("ffs_realloccg: bad optim");
388 /* NOTREACHED */
389 }
390 bno = ffs_hashalloc(ip, cg, bpref, request, ffs_alloccg);
391 if (bno > 0) {
392 bp->b_blkno = fsbtodb(fs, bno);
393 if (!DOINGSOFTDEP(vp))
394 ffs_blkfree(ump, fs, ip->i_devvp, bprev, (long)osize,
395 ip->i_number);
396 if (nsize < request)
397 ffs_blkfree(ump, fs, ip->i_devvp,
398 bno + numfrags(fs, nsize),
399 (long)(request - nsize), ip->i_number);
400 delta = btodb(nsize - osize);
401 if (ip->i_flag & IN_SPACECOUNTED) {
402 UFS_LOCK(ump);
403 fs->fs_pendingblocks += delta;
404 UFS_UNLOCK(ump);
405 }
406 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
407 if (flags & IO_EXT)
408 ip->i_flag |= IN_CHANGE;
409 else
410 ip->i_flag |= IN_CHANGE | IN_UPDATE;
411 allocbuf(bp, nsize);
412 bp->b_flags |= B_DONE;
413 bzero(bp->b_data + osize, nsize - osize);
414 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
415 vfs_bio_set_valid(bp, osize, nsize - osize);
416 *bpp = bp;
417 return (0);
418 }
419#ifdef QUOTA
420 UFS_UNLOCK(ump);
421 /*
422 * Restore user's disk quota because allocation failed.
423 */
424 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
425 UFS_LOCK(ump);
426#endif
427nospace:
428 /*
429 * no space available
430 */
431 if (fs->fs_pendingblocks > 0 && reclaimed == 0) {
432 reclaimed = 1;
433 softdep_request_cleanup(fs, vp);
434 UFS_UNLOCK(ump);
435 if (bp)
436 brelse(bp);
437 UFS_LOCK(ump);
438 goto retry;
439 }
440 UFS_UNLOCK(ump);
441 if (bp)
442 brelse(bp);
443 if (ppsratecheck(&lastfail, &curfail, 1)) {
444 ffs_fserr(fs, ip->i_number, "filesystem full");
445 uprintf("\n%s: write failed, filesystem is full\n",
446 fs->fs_fsmnt);
447 }
448 return (ENOSPC);
449}
450
451/*
452 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
453 *
454 * The vnode and an array of buffer pointers for a range of sequential
455 * logical blocks to be made contiguous is given. The allocator attempts
456 * to find a range of sequential blocks starting as close as possible
457 * from the end of the allocation for the logical block immediately
458 * preceding the current range. If successful, the physical block numbers
459 * in the buffer pointers and in the inode are changed to reflect the new
460 * allocation. If unsuccessful, the allocation is left unchanged. The
461 * success in doing the reallocation is returned. Note that the error
462 * return is not reflected back to the user. Rather the previous block
463 * allocation will be used.
464 */
465
466SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
467
468static int doasyncfree = 1;
469SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, "");
470
471static int doreallocblks = 1;
472SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
473
474#ifdef DEBUG
475static volatile int prtrealloc = 0;
476#endif
477
478int
479ffs_reallocblks(ap)
480 struct vop_reallocblks_args /* {
481 struct vnode *a_vp;
482 struct cluster_save *a_buflist;
483 } */ *ap;
484{
485
486 if (doreallocblks == 0)
487 return (ENOSPC);
488 if (VTOI(ap->a_vp)->i_ump->um_fstype == UFS1)
489 return (ffs_reallocblks_ufs1(ap));
490 return (ffs_reallocblks_ufs2(ap));
491}
492
493static int
494ffs_reallocblks_ufs1(ap)
495 struct vop_reallocblks_args /* {
496 struct vnode *a_vp;
497 struct cluster_save *a_buflist;
498 } */ *ap;
499{
500 struct fs *fs;
501 struct inode *ip;
502 struct vnode *vp;
503 struct buf *sbp, *ebp;
504 ufs1_daddr_t *bap, *sbap, *ebap = 0;
505 struct cluster_save *buflist;
506 struct ufsmount *ump;
507 ufs_lbn_t start_lbn, end_lbn;
508 ufs1_daddr_t soff, newblk, blkno;
509 ufs2_daddr_t pref;
510 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
511 int i, len, start_lvl, end_lvl, ssize;
512
513 vp = ap->a_vp;
514 ip = VTOI(vp);
515 fs = ip->i_fs;
516 ump = ip->i_ump;
517 if (fs->fs_contigsumsize <= 0)
518 return (ENOSPC);
519 buflist = ap->a_buflist;
520 len = buflist->bs_nchildren;
521 start_lbn = buflist->bs_children[0]->b_lblkno;
522 end_lbn = start_lbn + len - 1;
523#ifdef INVARIANTS
524 for (i = 0; i < len; i++)
525 if (!ffs_checkblk(ip,
526 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
527 panic("ffs_reallocblks: unallocated block 1");
528 for (i = 1; i < len; i++)
529 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
530 panic("ffs_reallocblks: non-logical cluster");
531 blkno = buflist->bs_children[0]->b_blkno;
532 ssize = fsbtodb(fs, fs->fs_frag);
533 for (i = 1; i < len - 1; i++)
534 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
535 panic("ffs_reallocblks: non-physical cluster %d", i);
536#endif
537 /*
538 * If the latest allocation is in a new cylinder group, assume that
539 * the filesystem has decided to move and do not force it back to
540 * the previous cylinder group.
541 */
542 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
543 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
544 return (ENOSPC);
545 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
546 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
547 return (ENOSPC);
548 /*
549 * Get the starting offset and block map for the first block.
550 */
551 if (start_lvl == 0) {
552 sbap = &ip->i_din1->di_db[0];
553 soff = start_lbn;
554 } else {
555 idp = &start_ap[start_lvl - 1];
556 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
557 brelse(sbp);
558 return (ENOSPC);
559 }
560 sbap = (ufs1_daddr_t *)sbp->b_data;
561 soff = idp->in_off;
562 }
563 /*
564 * If the block range spans two block maps, get the second map.
565 */
566 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
567 ssize = len;
568 } else {
569#ifdef INVARIANTS
570 if (start_lvl > 0 &&
571 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
572 panic("ffs_reallocblk: start == end");
573#endif
574 ssize = len - (idp->in_off + 1);
575 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
576 goto fail;
577 ebap = (ufs1_daddr_t *)ebp->b_data;
578 }
579 /*
580 * Find the preferred location for the cluster.
581 */
582 UFS_LOCK(ump);
583 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
584 /*
585 * Search the block map looking for an allocation of the desired size.
586 */
587 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref,
588 len, ffs_clusteralloc)) == 0) {
589 UFS_UNLOCK(ump);
590 goto fail;
591 }
592 /*
593 * We have found a new contiguous block.
594 *
595 * First we have to replace the old block pointers with the new
596 * block pointers in the inode and indirect blocks associated
597 * with the file.
598 */
599#ifdef DEBUG
600 if (prtrealloc)
601 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number,
602 (intmax_t)start_lbn, (intmax_t)end_lbn);
603#endif
604 blkno = newblk;
605 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
606 if (i == ssize) {
607 bap = ebap;
608 soff = -i;
609 }
610#ifdef INVARIANTS
611 if (!ffs_checkblk(ip,
612 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
613 panic("ffs_reallocblks: unallocated block 2");
614 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
615 panic("ffs_reallocblks: alloc mismatch");
616#endif
617#ifdef DEBUG
618 if (prtrealloc)
619 printf(" %d,", *bap);
620#endif
621 if (DOINGSOFTDEP(vp)) {
622 if (sbap == &ip->i_din1->di_db[0] && i < ssize)
623 softdep_setup_allocdirect(ip, start_lbn + i,
624 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
625 buflist->bs_children[i]);
626 else
627 softdep_setup_allocindir_page(ip, start_lbn + i,
628 i < ssize ? sbp : ebp, soff + i, blkno,
629 *bap, buflist->bs_children[i]);
630 }
631 *bap++ = blkno;
632 }
633 /*
634 * Next we must write out the modified inode and indirect blocks.
635 * For strict correctness, the writes should be synchronous since
636 * the old block values may have been written to disk. In practise
637 * they are almost never written, but if we are concerned about
638 * strict correctness, the `doasyncfree' flag should be set to zero.
639 *
640 * The test on `doasyncfree' should be changed to test a flag
641 * that shows whether the associated buffers and inodes have
642 * been written. The flag should be set when the cluster is
643 * started and cleared whenever the buffer or inode is flushed.
644 * We can then check below to see if it is set, and do the
645 * synchronous write only when it has been cleared.
646 */
647 if (sbap != &ip->i_din1->di_db[0]) {
648 if (doasyncfree)
649 bdwrite(sbp);
650 else
651 bwrite(sbp);
652 } else {
653 ip->i_flag |= IN_CHANGE | IN_UPDATE;
654 if (!doasyncfree)
655 ffs_update(vp, 1);
656 }
657 if (ssize < len) {
658 if (doasyncfree)
659 bdwrite(ebp);
660 else
661 bwrite(ebp);
662 }
663 /*
664 * Last, free the old blocks and assign the new blocks to the buffers.
665 */
666#ifdef DEBUG
667 if (prtrealloc)
668 printf("\n\tnew:");
669#endif
670 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
671 if (!DOINGSOFTDEP(vp))
672 ffs_blkfree(ump, fs, ip->i_devvp,
673 dbtofsb(fs, buflist->bs_children[i]->b_blkno),
674 fs->fs_bsize, ip->i_number);
675 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
676#ifdef INVARIANTS
677 if (!ffs_checkblk(ip,
678 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
679 panic("ffs_reallocblks: unallocated block 3");
680#endif
681#ifdef DEBUG
682 if (prtrealloc)
683 printf(" %d,", blkno);
684#endif
685 }
686#ifdef DEBUG
687 if (prtrealloc) {
688 prtrealloc--;
689 printf("\n");
690 }
691#endif
692 return (0);
693
694fail:
695 if (ssize < len)
696 brelse(ebp);
697 if (sbap != &ip->i_din1->di_db[0])
698 brelse(sbp);
699 return (ENOSPC);
700}
701
702static int
703ffs_reallocblks_ufs2(ap)
704 struct vop_reallocblks_args /* {
705 struct vnode *a_vp;
706 struct cluster_save *a_buflist;
707 } */ *ap;
708{
709 struct fs *fs;
710 struct inode *ip;
711 struct vnode *vp;
712 struct buf *sbp, *ebp;
713 ufs2_daddr_t *bap, *sbap, *ebap = 0;
714 struct cluster_save *buflist;
715 struct ufsmount *ump;
716 ufs_lbn_t start_lbn, end_lbn;
717 ufs2_daddr_t soff, newblk, blkno, pref;
718 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
719 int i, len, start_lvl, end_lvl, ssize;
720
721 vp = ap->a_vp;
722 ip = VTOI(vp);
723 fs = ip->i_fs;
724 ump = ip->i_ump;
725 if (fs->fs_contigsumsize <= 0)
726 return (ENOSPC);
727 buflist = ap->a_buflist;
728 len = buflist->bs_nchildren;
729 start_lbn = buflist->bs_children[0]->b_lblkno;
730 end_lbn = start_lbn + len - 1;
731#ifdef INVARIANTS
732 for (i = 0; i < len; i++)
733 if (!ffs_checkblk(ip,
734 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
735 panic("ffs_reallocblks: unallocated block 1");
736 for (i = 1; i < len; i++)
737 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
738 panic("ffs_reallocblks: non-logical cluster");
739 blkno = buflist->bs_children[0]->b_blkno;
740 ssize = fsbtodb(fs, fs->fs_frag);
741 for (i = 1; i < len - 1; i++)
742 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
743 panic("ffs_reallocblks: non-physical cluster %d", i);
744#endif
745 /*
746 * If the latest allocation is in a new cylinder group, assume that
747 * the filesystem has decided to move and do not force it back to
748 * the previous cylinder group.
749 */
750 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
751 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
752 return (ENOSPC);
753 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
754 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
755 return (ENOSPC);
756 /*
757 * Get the starting offset and block map for the first block.
758 */
759 if (start_lvl == 0) {
760 sbap = &ip->i_din2->di_db[0];
761 soff = start_lbn;
762 } else {
763 idp = &start_ap[start_lvl - 1];
764 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
765 brelse(sbp);
766 return (ENOSPC);
767 }
768 sbap = (ufs2_daddr_t *)sbp->b_data;
769 soff = idp->in_off;
770 }
771 /*
772 * If the block range spans two block maps, get the second map.
773 */
774 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
775 ssize = len;
776 } else {
777#ifdef INVARIANTS
778 if (start_lvl > 0 &&
779 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
780 panic("ffs_reallocblk: start == end");
781#endif
782 ssize = len - (idp->in_off + 1);
783 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
784 goto fail;
785 ebap = (ufs2_daddr_t *)ebp->b_data;
786 }
787 /*
788 * Find the preferred location for the cluster.
789 */
790 UFS_LOCK(ump);
791 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
792 /*
793 * Search the block map looking for an allocation of the desired size.
794 */
795 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref,
796 len, ffs_clusteralloc)) == 0) {
797 UFS_UNLOCK(ump);
798 goto fail;
799 }
800 /*
801 * We have found a new contiguous block.
802 *
803 * First we have to replace the old block pointers with the new
804 * block pointers in the inode and indirect blocks associated
805 * with the file.
806 */
807#ifdef DEBUG
808 if (prtrealloc)
809 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number,
810 (intmax_t)start_lbn, (intmax_t)end_lbn);
811#endif
812 blkno = newblk;
813 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
814 if (i == ssize) {
815 bap = ebap;
816 soff = -i;
817 }
818#ifdef INVARIANTS
819 if (!ffs_checkblk(ip,
820 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
821 panic("ffs_reallocblks: unallocated block 2");
822 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
823 panic("ffs_reallocblks: alloc mismatch");
824#endif
825#ifdef DEBUG
826 if (prtrealloc)
827 printf(" %jd,", (intmax_t)*bap);
828#endif
829 if (DOINGSOFTDEP(vp)) {
830 if (sbap == &ip->i_din2->di_db[0] && i < ssize)
831 softdep_setup_allocdirect(ip, start_lbn + i,
832 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
833 buflist->bs_children[i]);
834 else
835 softdep_setup_allocindir_page(ip, start_lbn + i,
836 i < ssize ? sbp : ebp, soff + i, blkno,
837 *bap, buflist->bs_children[i]);
838 }
839 *bap++ = blkno;
840 }
841 /*
842 * Next we must write out the modified inode and indirect blocks.
843 * For strict correctness, the writes should be synchronous since
844 * the old block values may have been written to disk. In practise
845 * they are almost never written, but if we are concerned about
846 * strict correctness, the `doasyncfree' flag should be set to zero.
847 *
848 * The test on `doasyncfree' should be changed to test a flag
849 * that shows whether the associated buffers and inodes have
850 * been written. The flag should be set when the cluster is
851 * started and cleared whenever the buffer or inode is flushed.
852 * We can then check below to see if it is set, and do the
853 * synchronous write only when it has been cleared.
854 */
855 if (sbap != &ip->i_din2->di_db[0]) {
856 if (doasyncfree)
857 bdwrite(sbp);
858 else
859 bwrite(sbp);
860 } else {
861 ip->i_flag |= IN_CHANGE | IN_UPDATE;
862 if (!doasyncfree)
863 ffs_update(vp, 1);
864 }
865 if (ssize < len) {
866 if (doasyncfree)
867 bdwrite(ebp);
868 else
869 bwrite(ebp);
870 }
871 /*
872 * Last, free the old blocks and assign the new blocks to the buffers.
873 */
874#ifdef DEBUG
875 if (prtrealloc)
876 printf("\n\tnew:");
877#endif
878 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
879 if (!DOINGSOFTDEP(vp))
880 ffs_blkfree(ump, fs, ip->i_devvp,
881 dbtofsb(fs, buflist->bs_children[i]->b_blkno),
882 fs->fs_bsize, ip->i_number);
883 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
884#ifdef INVARIANTS
885 if (!ffs_checkblk(ip,
886 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
887 panic("ffs_reallocblks: unallocated block 3");
888#endif
889#ifdef DEBUG
890 if (prtrealloc)
891 printf(" %jd,", (intmax_t)blkno);
892#endif
893 }
894#ifdef DEBUG
895 if (prtrealloc) {
896 prtrealloc--;
897 printf("\n");
898 }
899#endif
900 return (0);
901
902fail:
903 if (ssize < len)
904 brelse(ebp);
905 if (sbap != &ip->i_din2->di_db[0])
906 brelse(sbp);
907 return (ENOSPC);
908}
909
910/*
911 * Allocate an inode in the filesystem.
912 *
913 * If allocating a directory, use ffs_dirpref to select the inode.
914 * If allocating in a directory, the following hierarchy is followed:
915 * 1) allocate the preferred inode.
916 * 2) allocate an inode in the same cylinder group.
917 * 3) quadradically rehash into other cylinder groups, until an
918 * available inode is located.
919 * If no inode preference is given the following hierarchy is used
920 * to allocate an inode:
921 * 1) allocate an inode in cylinder group 0.
922 * 2) quadradically rehash into other cylinder groups, until an
923 * available inode is located.
924 */
925int
926ffs_valloc(pvp, mode, cred, vpp)
927 struct vnode *pvp;
928 int mode;
929 struct ucred *cred;
930 struct vnode **vpp;
931{
932 struct inode *pip;
933 struct fs *fs;
934 struct inode *ip;
935 struct timespec ts;
936 struct ufsmount *ump;
937 ino_t ino, ipref;
936 int cg, error, error1;
938 u_int cg;
939 int error, error1;
937 static struct timeval lastfail;
938 static int curfail;
939
940 *vpp = NULL;
941 pip = VTOI(pvp);
942 fs = pip->i_fs;
943 ump = pip->i_ump;
944
945 UFS_LOCK(ump);
946 if (fs->fs_cstotal.cs_nifree == 0)
947 goto noinodes;
948
949 if ((mode & IFMT) == IFDIR)
950 ipref = ffs_dirpref(pip);
951 else
952 ipref = pip->i_number;
953 if (ipref >= fs->fs_ncg * fs->fs_ipg)
954 ipref = 0;
955 cg = ino_to_cg(fs, ipref);
956 /*
957 * Track number of dirs created one after another
958 * in a same cg without intervening by files.
959 */
960 if ((mode & IFMT) == IFDIR) {
961 if (fs->fs_contigdirs[cg] < 255)
962 fs->fs_contigdirs[cg]++;
963 } else {
964 if (fs->fs_contigdirs[cg] > 0)
965 fs->fs_contigdirs[cg]--;
966 }
967 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode,
968 (allocfcn_t *)ffs_nodealloccg);
969 if (ino == 0)
970 goto noinodes;
971 error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
972 if (error) {
973 error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
974 FFSV_FORCEINSMQ);
975 ffs_vfree(pvp, ino, mode);
976 if (error1 == 0) {
977 ip = VTOI(*vpp);
978 if (ip->i_mode)
979 goto dup_alloc;
980 ip->i_flag |= IN_MODIFIED;
981 vput(*vpp);
982 }
983 return (error);
984 }
985 ip = VTOI(*vpp);
986 if (ip->i_mode) {
987dup_alloc:
988 printf("mode = 0%o, inum = %lu, fs = %s\n",
989 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt);
990 panic("ffs_valloc: dup alloc");
991 }
992 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */
993 printf("free inode %s/%lu had %ld blocks\n",
994 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
995 DIP_SET(ip, i_blocks, 0);
996 }
997 ip->i_flags = 0;
998 DIP_SET(ip, i_flags, 0);
999 /*
1000 * Set up a new generation number for this inode.
1001 */
1002 if (ip->i_gen == 0 || ++ip->i_gen == 0)
1003 ip->i_gen = arc4random() / 2 + 1;
1004 DIP_SET(ip, i_gen, ip->i_gen);
1005 if (fs->fs_magic == FS_UFS2_MAGIC) {
1006 vfs_timestamp(&ts);
1007 ip->i_din2->di_birthtime = ts.tv_sec;
1008 ip->i_din2->di_birthnsec = ts.tv_nsec;
1009 }
1010 ip->i_flag = 0;
1011 vnode_destroy_vobject(*vpp);
1012 (*vpp)->v_type = VNON;
1013 if (fs->fs_magic == FS_UFS2_MAGIC)
1014 (*vpp)->v_op = &ffs_vnodeops2;
1015 else
1016 (*vpp)->v_op = &ffs_vnodeops1;
1017 return (0);
1018noinodes:
1019 UFS_UNLOCK(ump);
1020 if (ppsratecheck(&lastfail, &curfail, 1)) {
1021 ffs_fserr(fs, pip->i_number, "out of inodes");
1022 uprintf("\n%s: create/symlink failed, no inodes free\n",
1023 fs->fs_fsmnt);
1024 }
1025 return (ENOSPC);
1026}
1027
1028/*
1029 * Find a cylinder group to place a directory.
1030 *
1031 * The policy implemented by this algorithm is to allocate a
1032 * directory inode in the same cylinder group as its parent
1033 * directory, but also to reserve space for its files inodes
1034 * and data. Restrict the number of directories which may be
1035 * allocated one after another in the same cylinder group
1036 * without intervening allocation of files.
1037 *
1038 * If we allocate a first level directory then force allocation
1039 * in another cylinder group.
1040 */
1041static ino_t
1042ffs_dirpref(pip)
1043 struct inode *pip;
1044{
1045 struct fs *fs;
940 static struct timeval lastfail;
941 static int curfail;
942
943 *vpp = NULL;
944 pip = VTOI(pvp);
945 fs = pip->i_fs;
946 ump = pip->i_ump;
947
948 UFS_LOCK(ump);
949 if (fs->fs_cstotal.cs_nifree == 0)
950 goto noinodes;
951
952 if ((mode & IFMT) == IFDIR)
953 ipref = ffs_dirpref(pip);
954 else
955 ipref = pip->i_number;
956 if (ipref >= fs->fs_ncg * fs->fs_ipg)
957 ipref = 0;
958 cg = ino_to_cg(fs, ipref);
959 /*
960 * Track number of dirs created one after another
961 * in a same cg without intervening by files.
962 */
963 if ((mode & IFMT) == IFDIR) {
964 if (fs->fs_contigdirs[cg] < 255)
965 fs->fs_contigdirs[cg]++;
966 } else {
967 if (fs->fs_contigdirs[cg] > 0)
968 fs->fs_contigdirs[cg]--;
969 }
970 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode,
971 (allocfcn_t *)ffs_nodealloccg);
972 if (ino == 0)
973 goto noinodes;
974 error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
975 if (error) {
976 error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
977 FFSV_FORCEINSMQ);
978 ffs_vfree(pvp, ino, mode);
979 if (error1 == 0) {
980 ip = VTOI(*vpp);
981 if (ip->i_mode)
982 goto dup_alloc;
983 ip->i_flag |= IN_MODIFIED;
984 vput(*vpp);
985 }
986 return (error);
987 }
988 ip = VTOI(*vpp);
989 if (ip->i_mode) {
990dup_alloc:
991 printf("mode = 0%o, inum = %lu, fs = %s\n",
992 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt);
993 panic("ffs_valloc: dup alloc");
994 }
995 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */
996 printf("free inode %s/%lu had %ld blocks\n",
997 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
998 DIP_SET(ip, i_blocks, 0);
999 }
1000 ip->i_flags = 0;
1001 DIP_SET(ip, i_flags, 0);
1002 /*
1003 * Set up a new generation number for this inode.
1004 */
1005 if (ip->i_gen == 0 || ++ip->i_gen == 0)
1006 ip->i_gen = arc4random() / 2 + 1;
1007 DIP_SET(ip, i_gen, ip->i_gen);
1008 if (fs->fs_magic == FS_UFS2_MAGIC) {
1009 vfs_timestamp(&ts);
1010 ip->i_din2->di_birthtime = ts.tv_sec;
1011 ip->i_din2->di_birthnsec = ts.tv_nsec;
1012 }
1013 ip->i_flag = 0;
1014 vnode_destroy_vobject(*vpp);
1015 (*vpp)->v_type = VNON;
1016 if (fs->fs_magic == FS_UFS2_MAGIC)
1017 (*vpp)->v_op = &ffs_vnodeops2;
1018 else
1019 (*vpp)->v_op = &ffs_vnodeops1;
1020 return (0);
1021noinodes:
1022 UFS_UNLOCK(ump);
1023 if (ppsratecheck(&lastfail, &curfail, 1)) {
1024 ffs_fserr(fs, pip->i_number, "out of inodes");
1025 uprintf("\n%s: create/symlink failed, no inodes free\n",
1026 fs->fs_fsmnt);
1027 }
1028 return (ENOSPC);
1029}
1030
1031/*
1032 * Find a cylinder group to place a directory.
1033 *
1034 * The policy implemented by this algorithm is to allocate a
1035 * directory inode in the same cylinder group as its parent
1036 * directory, but also to reserve space for its files inodes
1037 * and data. Restrict the number of directories which may be
1038 * allocated one after another in the same cylinder group
1039 * without intervening allocation of files.
1040 *
1041 * If we allocate a first level directory then force allocation
1042 * in another cylinder group.
1043 */
1044static ino_t
1045ffs_dirpref(pip)
1046 struct inode *pip;
1047{
1048 struct fs *fs;
1046 int cg, prefcg, dirsize, cgsize;
1047 int avgifree, avgbfree, avgndir, curdirsize;
1048 int minifree, minbfree, maxndir;
1049 int mincg, minndir;
1050 int maxcontigdirs;
1049 u_int cg, prefcg, dirsize, cgsize;
1050 u_int avgifree, avgbfree, avgndir, curdirsize;
1051 u_int minifree, minbfree, maxndir;
1052 u_int mincg, minndir;
1053 u_int maxcontigdirs;
1051
1052 mtx_assert(UFS_MTX(pip->i_ump), MA_OWNED);
1053 fs = pip->i_fs;
1054
1055 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1056 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1057 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1058
1059 /*
1060 * Force allocation in another cg if creating a first level dir.
1061 */
1062 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1063 if (ITOV(pip)->v_vflag & VV_ROOT) {
1064 prefcg = arc4random() % fs->fs_ncg;
1065 mincg = prefcg;
1066 minndir = fs->fs_ipg;
1067 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1068 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1069 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1070 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1071 mincg = cg;
1072 minndir = fs->fs_cs(fs, cg).cs_ndir;
1073 }
1074 for (cg = 0; cg < prefcg; cg++)
1075 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1076 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1077 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1078 mincg = cg;
1079 minndir = fs->fs_cs(fs, cg).cs_ndir;
1080 }
1081 return ((ino_t)(fs->fs_ipg * mincg));
1082 }
1083
1084 /*
1085 * Count various limits which used for
1086 * optimal allocation of a directory inode.
1087 */
1088 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1089 minifree = avgifree - avgifree / 4;
1090 if (minifree < 1)
1091 minifree = 1;
1092 minbfree = avgbfree - avgbfree / 4;
1093 if (minbfree < 1)
1094 minbfree = 1;
1095 cgsize = fs->fs_fsize * fs->fs_fpg;
1096 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1097 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1098 if (dirsize < curdirsize)
1099 dirsize = curdirsize;
1100 if (dirsize <= 0)
1101 maxcontigdirs = 0; /* dirsize overflowed */
1102 else
1103 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1104 if (fs->fs_avgfpdir > 0)
1105 maxcontigdirs = min(maxcontigdirs,
1106 fs->fs_ipg / fs->fs_avgfpdir);
1107 if (maxcontigdirs == 0)
1108 maxcontigdirs = 1;
1109
1110 /*
1111 * Limit number of dirs in one cg and reserve space for
1112 * regular files, but only if we have no deficit in
1113 * inodes or space.
1114 */
1115 prefcg = ino_to_cg(fs, pip->i_number);
1116 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1117 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1118 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1119 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1120 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1121 return ((ino_t)(fs->fs_ipg * cg));
1122 }
1123 for (cg = 0; cg < prefcg; cg++)
1124 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1125 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1126 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1127 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1128 return ((ino_t)(fs->fs_ipg * cg));
1129 }
1130 /*
1131 * This is a backstop when we have deficit in space.
1132 */
1133 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1134 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1135 return ((ino_t)(fs->fs_ipg * cg));
1136 for (cg = 0; cg < prefcg; cg++)
1137 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1138 break;
1139 return ((ino_t)(fs->fs_ipg * cg));
1140}
1141
1142/*
1143 * Select the desired position for the next block in a file. The file is
1144 * logically divided into sections. The first section is composed of the
1145 * direct blocks. Each additional section contains fs_maxbpg blocks.
1146 *
1147 * If no blocks have been allocated in the first section, the policy is to
1148 * request a block in the same cylinder group as the inode that describes
1149 * the file. If no blocks have been allocated in any other section, the
1150 * policy is to place the section in a cylinder group with a greater than
1151 * average number of free blocks. An appropriate cylinder group is found
1152 * by using a rotor that sweeps the cylinder groups. When a new group of
1153 * blocks is needed, the sweep begins in the cylinder group following the
1154 * cylinder group from which the previous allocation was made. The sweep
1155 * continues until a cylinder group with greater than the average number
1156 * of free blocks is found. If the allocation is for the first block in an
1157 * indirect block, the information on the previous allocation is unavailable;
1158 * here a best guess is made based upon the logical block number being
1159 * allocated.
1160 *
1161 * If a section is already partially allocated, the policy is to
1162 * contiguously allocate fs_maxcontig blocks. The end of one of these
1163 * contiguous blocks and the beginning of the next is laid out
1164 * contiguously if possible.
1165 */
1166ufs2_daddr_t
1167ffs_blkpref_ufs1(ip, lbn, indx, bap)
1168 struct inode *ip;
1169 ufs_lbn_t lbn;
1170 int indx;
1171 ufs1_daddr_t *bap;
1172{
1173 struct fs *fs;
1054
1055 mtx_assert(UFS_MTX(pip->i_ump), MA_OWNED);
1056 fs = pip->i_fs;
1057
1058 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1059 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1060 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1061
1062 /*
1063 * Force allocation in another cg if creating a first level dir.
1064 */
1065 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1066 if (ITOV(pip)->v_vflag & VV_ROOT) {
1067 prefcg = arc4random() % fs->fs_ncg;
1068 mincg = prefcg;
1069 minndir = fs->fs_ipg;
1070 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1071 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1072 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1073 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1074 mincg = cg;
1075 minndir = fs->fs_cs(fs, cg).cs_ndir;
1076 }
1077 for (cg = 0; cg < prefcg; cg++)
1078 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1079 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1080 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1081 mincg = cg;
1082 minndir = fs->fs_cs(fs, cg).cs_ndir;
1083 }
1084 return ((ino_t)(fs->fs_ipg * mincg));
1085 }
1086
1087 /*
1088 * Count various limits which used for
1089 * optimal allocation of a directory inode.
1090 */
1091 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1092 minifree = avgifree - avgifree / 4;
1093 if (minifree < 1)
1094 minifree = 1;
1095 minbfree = avgbfree - avgbfree / 4;
1096 if (minbfree < 1)
1097 minbfree = 1;
1098 cgsize = fs->fs_fsize * fs->fs_fpg;
1099 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1100 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1101 if (dirsize < curdirsize)
1102 dirsize = curdirsize;
1103 if (dirsize <= 0)
1104 maxcontigdirs = 0; /* dirsize overflowed */
1105 else
1106 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1107 if (fs->fs_avgfpdir > 0)
1108 maxcontigdirs = min(maxcontigdirs,
1109 fs->fs_ipg / fs->fs_avgfpdir);
1110 if (maxcontigdirs == 0)
1111 maxcontigdirs = 1;
1112
1113 /*
1114 * Limit number of dirs in one cg and reserve space for
1115 * regular files, but only if we have no deficit in
1116 * inodes or space.
1117 */
1118 prefcg = ino_to_cg(fs, pip->i_number);
1119 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1120 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1121 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1122 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1123 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1124 return ((ino_t)(fs->fs_ipg * cg));
1125 }
1126 for (cg = 0; cg < prefcg; cg++)
1127 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1128 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1129 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1130 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1131 return ((ino_t)(fs->fs_ipg * cg));
1132 }
1133 /*
1134 * This is a backstop when we have deficit in space.
1135 */
1136 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1137 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1138 return ((ino_t)(fs->fs_ipg * cg));
1139 for (cg = 0; cg < prefcg; cg++)
1140 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1141 break;
1142 return ((ino_t)(fs->fs_ipg * cg));
1143}
1144
1145/*
1146 * Select the desired position for the next block in a file. The file is
1147 * logically divided into sections. The first section is composed of the
1148 * direct blocks. Each additional section contains fs_maxbpg blocks.
1149 *
1150 * If no blocks have been allocated in the first section, the policy is to
1151 * request a block in the same cylinder group as the inode that describes
1152 * the file. If no blocks have been allocated in any other section, the
1153 * policy is to place the section in a cylinder group with a greater than
1154 * average number of free blocks. An appropriate cylinder group is found
1155 * by using a rotor that sweeps the cylinder groups. When a new group of
1156 * blocks is needed, the sweep begins in the cylinder group following the
1157 * cylinder group from which the previous allocation was made. The sweep
1158 * continues until a cylinder group with greater than the average number
1159 * of free blocks is found. If the allocation is for the first block in an
1160 * indirect block, the information on the previous allocation is unavailable;
1161 * here a best guess is made based upon the logical block number being
1162 * allocated.
1163 *
1164 * If a section is already partially allocated, the policy is to
1165 * contiguously allocate fs_maxcontig blocks. The end of one of these
1166 * contiguous blocks and the beginning of the next is laid out
1167 * contiguously if possible.
1168 */
1169ufs2_daddr_t
1170ffs_blkpref_ufs1(ip, lbn, indx, bap)
1171 struct inode *ip;
1172 ufs_lbn_t lbn;
1173 int indx;
1174 ufs1_daddr_t *bap;
1175{
1176 struct fs *fs;
1174 int cg;
1175 int avgbfree, startcg;
1177 u_int cg;
1178 u_int avgbfree, startcg;
1176
1177 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1178 fs = ip->i_fs;
1179 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1180 if (lbn < NDADDR + NINDIR(fs)) {
1181 cg = ino_to_cg(fs, ip->i_number);
1182 return (cgbase(fs, cg) + fs->fs_frag);
1183 }
1184 /*
1185 * Find a cylinder with greater than average number of
1186 * unused data blocks.
1187 */
1188 if (indx == 0 || bap[indx - 1] == 0)
1189 startcg =
1190 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
1191 else
1192 startcg = dtog(fs, bap[indx - 1]) + 1;
1193 startcg %= fs->fs_ncg;
1194 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1195 for (cg = startcg; cg < fs->fs_ncg; cg++)
1196 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1197 fs->fs_cgrotor = cg;
1198 return (cgbase(fs, cg) + fs->fs_frag);
1199 }
1200 for (cg = 0; cg <= startcg; cg++)
1201 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1202 fs->fs_cgrotor = cg;
1203 return (cgbase(fs, cg) + fs->fs_frag);
1204 }
1205 return (0);
1206 }
1207 /*
1208 * We just always try to lay things out contiguously.
1209 */
1210 return (bap[indx - 1] + fs->fs_frag);
1211}
1212
1213/*
1214 * Same as above, but for UFS2
1215 */
1216ufs2_daddr_t
1217ffs_blkpref_ufs2(ip, lbn, indx, bap)
1218 struct inode *ip;
1219 ufs_lbn_t lbn;
1220 int indx;
1221 ufs2_daddr_t *bap;
1222{
1223 struct fs *fs;
1179
1180 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1181 fs = ip->i_fs;
1182 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1183 if (lbn < NDADDR + NINDIR(fs)) {
1184 cg = ino_to_cg(fs, ip->i_number);
1185 return (cgbase(fs, cg) + fs->fs_frag);
1186 }
1187 /*
1188 * Find a cylinder with greater than average number of
1189 * unused data blocks.
1190 */
1191 if (indx == 0 || bap[indx - 1] == 0)
1192 startcg =
1193 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
1194 else
1195 startcg = dtog(fs, bap[indx - 1]) + 1;
1196 startcg %= fs->fs_ncg;
1197 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1198 for (cg = startcg; cg < fs->fs_ncg; cg++)
1199 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1200 fs->fs_cgrotor = cg;
1201 return (cgbase(fs, cg) + fs->fs_frag);
1202 }
1203 for (cg = 0; cg <= startcg; cg++)
1204 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1205 fs->fs_cgrotor = cg;
1206 return (cgbase(fs, cg) + fs->fs_frag);
1207 }
1208 return (0);
1209 }
1210 /*
1211 * We just always try to lay things out contiguously.
1212 */
1213 return (bap[indx - 1] + fs->fs_frag);
1214}
1215
1216/*
1217 * Same as above, but for UFS2
1218 */
1219ufs2_daddr_t
1220ffs_blkpref_ufs2(ip, lbn, indx, bap)
1221 struct inode *ip;
1222 ufs_lbn_t lbn;
1223 int indx;
1224 ufs2_daddr_t *bap;
1225{
1226 struct fs *fs;
1224 int cg;
1225 int avgbfree, startcg;
1227 u_int cg;
1228 u_int avgbfree, startcg;
1226
1227 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1228 fs = ip->i_fs;
1229 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1230 if (lbn < NDADDR + NINDIR(fs)) {
1231 cg = ino_to_cg(fs, ip->i_number);
1232 return (cgbase(fs, cg) + fs->fs_frag);
1233 }
1234 /*
1235 * Find a cylinder with greater than average number of
1236 * unused data blocks.
1237 */
1238 if (indx == 0 || bap[indx - 1] == 0)
1239 startcg =
1240 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
1241 else
1242 startcg = dtog(fs, bap[indx - 1]) + 1;
1243 startcg %= fs->fs_ncg;
1244 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1245 for (cg = startcg; cg < fs->fs_ncg; cg++)
1246 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1247 fs->fs_cgrotor = cg;
1248 return (cgbase(fs, cg) + fs->fs_frag);
1249 }
1250 for (cg = 0; cg <= startcg; cg++)
1251 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1252 fs->fs_cgrotor = cg;
1253 return (cgbase(fs, cg) + fs->fs_frag);
1254 }
1255 return (0);
1256 }
1257 /*
1258 * We just always try to lay things out contiguously.
1259 */
1260 return (bap[indx - 1] + fs->fs_frag);
1261}
1262
1263/*
1264 * Implement the cylinder overflow algorithm.
1265 *
1266 * The policy implemented by this algorithm is:
1267 * 1) allocate the block in its requested cylinder group.
1268 * 2) quadradically rehash on the cylinder group number.
1269 * 3) brute force search for a free block.
1270 *
1271 * Must be called with the UFS lock held. Will release the lock on success
1272 * and return with it held on failure.
1273 */
1274/*VARARGS5*/
1275static ufs2_daddr_t
1276ffs_hashalloc(ip, cg, pref, size, allocator)
1277 struct inode *ip;
1229
1230 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1231 fs = ip->i_fs;
1232 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1233 if (lbn < NDADDR + NINDIR(fs)) {
1234 cg = ino_to_cg(fs, ip->i_number);
1235 return (cgbase(fs, cg) + fs->fs_frag);
1236 }
1237 /*
1238 * Find a cylinder with greater than average number of
1239 * unused data blocks.
1240 */
1241 if (indx == 0 || bap[indx - 1] == 0)
1242 startcg =
1243 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
1244 else
1245 startcg = dtog(fs, bap[indx - 1]) + 1;
1246 startcg %= fs->fs_ncg;
1247 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1248 for (cg = startcg; cg < fs->fs_ncg; cg++)
1249 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1250 fs->fs_cgrotor = cg;
1251 return (cgbase(fs, cg) + fs->fs_frag);
1252 }
1253 for (cg = 0; cg <= startcg; cg++)
1254 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1255 fs->fs_cgrotor = cg;
1256 return (cgbase(fs, cg) + fs->fs_frag);
1257 }
1258 return (0);
1259 }
1260 /*
1261 * We just always try to lay things out contiguously.
1262 */
1263 return (bap[indx - 1] + fs->fs_frag);
1264}
1265
1266/*
1267 * Implement the cylinder overflow algorithm.
1268 *
1269 * The policy implemented by this algorithm is:
1270 * 1) allocate the block in its requested cylinder group.
1271 * 2) quadradically rehash on the cylinder group number.
1272 * 3) brute force search for a free block.
1273 *
1274 * Must be called with the UFS lock held. Will release the lock on success
1275 * and return with it held on failure.
1276 */
1277/*VARARGS5*/
1278static ufs2_daddr_t
1279ffs_hashalloc(ip, cg, pref, size, allocator)
1280 struct inode *ip;
1278 int cg;
1281 u_int cg;
1279 ufs2_daddr_t pref;
1280 int size; /* size for data blocks, mode for inodes */
1281 allocfcn_t *allocator;
1282{
1283 struct fs *fs;
1284 ufs2_daddr_t result;
1282 ufs2_daddr_t pref;
1283 int size; /* size for data blocks, mode for inodes */
1284 allocfcn_t *allocator;
1285{
1286 struct fs *fs;
1287 ufs2_daddr_t result;
1285 int i, icg = cg;
1288 u_int i, icg = cg;
1286
1287 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1288#ifdef INVARIANTS
1289 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1290 panic("ffs_hashalloc: allocation on suspended filesystem");
1291#endif
1292 fs = ip->i_fs;
1293 /*
1294 * 1: preferred cylinder group
1295 */
1296 result = (*allocator)(ip, cg, pref, size);
1297 if (result)
1298 return (result);
1299 /*
1300 * 2: quadratic rehash
1301 */
1302 for (i = 1; i < fs->fs_ncg; i *= 2) {
1303 cg += i;
1304 if (cg >= fs->fs_ncg)
1305 cg -= fs->fs_ncg;
1306 result = (*allocator)(ip, cg, 0, size);
1307 if (result)
1308 return (result);
1309 }
1310 /*
1311 * 3: brute force search
1312 * Note that we start at i == 2, since 0 was checked initially,
1313 * and 1 is always checked in the quadratic rehash.
1314 */
1315 cg = (icg + 2) % fs->fs_ncg;
1316 for (i = 2; i < fs->fs_ncg; i++) {
1317 result = (*allocator)(ip, cg, 0, size);
1318 if (result)
1319 return (result);
1320 cg++;
1321 if (cg == fs->fs_ncg)
1322 cg = 0;
1323 }
1324 return (0);
1325}
1326
1327/*
1328 * Determine whether a fragment can be extended.
1329 *
1330 * Check to see if the necessary fragments are available, and
1331 * if they are, allocate them.
1332 */
1333static ufs2_daddr_t
1334ffs_fragextend(ip, cg, bprev, osize, nsize)
1335 struct inode *ip;
1289
1290 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1291#ifdef INVARIANTS
1292 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1293 panic("ffs_hashalloc: allocation on suspended filesystem");
1294#endif
1295 fs = ip->i_fs;
1296 /*
1297 * 1: preferred cylinder group
1298 */
1299 result = (*allocator)(ip, cg, pref, size);
1300 if (result)
1301 return (result);
1302 /*
1303 * 2: quadratic rehash
1304 */
1305 for (i = 1; i < fs->fs_ncg; i *= 2) {
1306 cg += i;
1307 if (cg >= fs->fs_ncg)
1308 cg -= fs->fs_ncg;
1309 result = (*allocator)(ip, cg, 0, size);
1310 if (result)
1311 return (result);
1312 }
1313 /*
1314 * 3: brute force search
1315 * Note that we start at i == 2, since 0 was checked initially,
1316 * and 1 is always checked in the quadratic rehash.
1317 */
1318 cg = (icg + 2) % fs->fs_ncg;
1319 for (i = 2; i < fs->fs_ncg; i++) {
1320 result = (*allocator)(ip, cg, 0, size);
1321 if (result)
1322 return (result);
1323 cg++;
1324 if (cg == fs->fs_ncg)
1325 cg = 0;
1326 }
1327 return (0);
1328}
1329
1330/*
1331 * Determine whether a fragment can be extended.
1332 *
1333 * Check to see if the necessary fragments are available, and
1334 * if they are, allocate them.
1335 */
1336static ufs2_daddr_t
1337ffs_fragextend(ip, cg, bprev, osize, nsize)
1338 struct inode *ip;
1336 int cg;
1339 u_int cg;
1337 ufs2_daddr_t bprev;
1338 int osize, nsize;
1339{
1340 struct fs *fs;
1341 struct cg *cgp;
1342 struct buf *bp;
1343 struct ufsmount *ump;
1344 int nffree;
1345 long bno;
1346 int frags, bbase;
1347 int i, error;
1348 u_int8_t *blksfree;
1349
1350 ump = ip->i_ump;
1351 fs = ip->i_fs;
1352 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1353 return (0);
1354 frags = numfrags(fs, nsize);
1355 bbase = fragnum(fs, bprev);
1356 if (bbase > fragnum(fs, (bprev + frags - 1))) {
1357 /* cannot extend across a block boundary */
1358 return (0);
1359 }
1360 UFS_UNLOCK(ump);
1361 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1362 (int)fs->fs_cgsize, NOCRED, &bp);
1363 if (error)
1364 goto fail;
1365 cgp = (struct cg *)bp->b_data;
1366 if (!cg_chkmagic(cgp))
1367 goto fail;
1368 bp->b_xflags |= BX_BKGRDWRITE;
1369 cgp->cg_old_time = cgp->cg_time = time_second;
1370 bno = dtogd(fs, bprev);
1371 blksfree = cg_blksfree(cgp);
1372 for (i = numfrags(fs, osize); i < frags; i++)
1373 if (isclr(blksfree, bno + i))
1374 goto fail;
1375 /*
1376 * the current fragment can be extended
1377 * deduct the count on fragment being extended into
1378 * increase the count on the remaining fragment (if any)
1379 * allocate the extended piece
1380 */
1381 for (i = frags; i < fs->fs_frag - bbase; i++)
1382 if (isclr(blksfree, bno + i))
1383 break;
1384 cgp->cg_frsum[i - numfrags(fs, osize)]--;
1385 if (i != frags)
1386 cgp->cg_frsum[i - frags]++;
1387 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1388 clrbit(blksfree, bno + i);
1389 cgp->cg_cs.cs_nffree--;
1390 nffree++;
1391 }
1392 UFS_LOCK(ump);
1393 fs->fs_cstotal.cs_nffree -= nffree;
1394 fs->fs_cs(fs, cg).cs_nffree -= nffree;
1395 fs->fs_fmod = 1;
1396 ACTIVECLEAR(fs, cg);
1397 UFS_UNLOCK(ump);
1398 if (DOINGSOFTDEP(ITOV(ip)))
1399 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev);
1400 bdwrite(bp);
1401 return (bprev);
1402
1403fail:
1404 brelse(bp);
1405 UFS_LOCK(ump);
1406 return (0);
1407
1408}
1409
1410/*
1411 * Determine whether a block can be allocated.
1412 *
1413 * Check to see if a block of the appropriate size is available,
1414 * and if it is, allocate it.
1415 */
1416static ufs2_daddr_t
1417ffs_alloccg(ip, cg, bpref, size)
1418 struct inode *ip;
1340 ufs2_daddr_t bprev;
1341 int osize, nsize;
1342{
1343 struct fs *fs;
1344 struct cg *cgp;
1345 struct buf *bp;
1346 struct ufsmount *ump;
1347 int nffree;
1348 long bno;
1349 int frags, bbase;
1350 int i, error;
1351 u_int8_t *blksfree;
1352
1353 ump = ip->i_ump;
1354 fs = ip->i_fs;
1355 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1356 return (0);
1357 frags = numfrags(fs, nsize);
1358 bbase = fragnum(fs, bprev);
1359 if (bbase > fragnum(fs, (bprev + frags - 1))) {
1360 /* cannot extend across a block boundary */
1361 return (0);
1362 }
1363 UFS_UNLOCK(ump);
1364 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1365 (int)fs->fs_cgsize, NOCRED, &bp);
1366 if (error)
1367 goto fail;
1368 cgp = (struct cg *)bp->b_data;
1369 if (!cg_chkmagic(cgp))
1370 goto fail;
1371 bp->b_xflags |= BX_BKGRDWRITE;
1372 cgp->cg_old_time = cgp->cg_time = time_second;
1373 bno = dtogd(fs, bprev);
1374 blksfree = cg_blksfree(cgp);
1375 for (i = numfrags(fs, osize); i < frags; i++)
1376 if (isclr(blksfree, bno + i))
1377 goto fail;
1378 /*
1379 * the current fragment can be extended
1380 * deduct the count on fragment being extended into
1381 * increase the count on the remaining fragment (if any)
1382 * allocate the extended piece
1383 */
1384 for (i = frags; i < fs->fs_frag - bbase; i++)
1385 if (isclr(blksfree, bno + i))
1386 break;
1387 cgp->cg_frsum[i - numfrags(fs, osize)]--;
1388 if (i != frags)
1389 cgp->cg_frsum[i - frags]++;
1390 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1391 clrbit(blksfree, bno + i);
1392 cgp->cg_cs.cs_nffree--;
1393 nffree++;
1394 }
1395 UFS_LOCK(ump);
1396 fs->fs_cstotal.cs_nffree -= nffree;
1397 fs->fs_cs(fs, cg).cs_nffree -= nffree;
1398 fs->fs_fmod = 1;
1399 ACTIVECLEAR(fs, cg);
1400 UFS_UNLOCK(ump);
1401 if (DOINGSOFTDEP(ITOV(ip)))
1402 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev);
1403 bdwrite(bp);
1404 return (bprev);
1405
1406fail:
1407 brelse(bp);
1408 UFS_LOCK(ump);
1409 return (0);
1410
1411}
1412
1413/*
1414 * Determine whether a block can be allocated.
1415 *
1416 * Check to see if a block of the appropriate size is available,
1417 * and if it is, allocate it.
1418 */
1419static ufs2_daddr_t
1420ffs_alloccg(ip, cg, bpref, size)
1421 struct inode *ip;
1419 int cg;
1422 u_int cg;
1420 ufs2_daddr_t bpref;
1421 int size;
1422{
1423 struct fs *fs;
1424 struct cg *cgp;
1425 struct buf *bp;
1426 struct ufsmount *ump;
1427 ufs1_daddr_t bno;
1428 ufs2_daddr_t blkno;
1429 int i, allocsiz, error, frags;
1430 u_int8_t *blksfree;
1431
1432 ump = ip->i_ump;
1433 fs = ip->i_fs;
1434 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1435 return (0);
1436 UFS_UNLOCK(ump);
1437 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1438 (int)fs->fs_cgsize, NOCRED, &bp);
1439 if (error)
1440 goto fail;
1441 cgp = (struct cg *)bp->b_data;
1442 if (!cg_chkmagic(cgp) ||
1443 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1444 goto fail;
1445 bp->b_xflags |= BX_BKGRDWRITE;
1446 cgp->cg_old_time = cgp->cg_time = time_second;
1447 if (size == fs->fs_bsize) {
1448 UFS_LOCK(ump);
1449 blkno = ffs_alloccgblk(ip, bp, bpref);
1450 ACTIVECLEAR(fs, cg);
1451 UFS_UNLOCK(ump);
1452 bdwrite(bp);
1453 return (blkno);
1454 }
1455 /*
1456 * check to see if any fragments are already available
1457 * allocsiz is the size which will be allocated, hacking
1458 * it down to a smaller size if necessary
1459 */
1460 blksfree = cg_blksfree(cgp);
1461 frags = numfrags(fs, size);
1462 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1463 if (cgp->cg_frsum[allocsiz] != 0)
1464 break;
1465 if (allocsiz == fs->fs_frag) {
1466 /*
1467 * no fragments were available, so a block will be
1468 * allocated, and hacked up
1469 */
1470 if (cgp->cg_cs.cs_nbfree == 0)
1471 goto fail;
1472 UFS_LOCK(ump);
1473 blkno = ffs_alloccgblk(ip, bp, bpref);
1474 bno = dtogd(fs, blkno);
1475 for (i = frags; i < fs->fs_frag; i++)
1476 setbit(blksfree, bno + i);
1477 i = fs->fs_frag - frags;
1478 cgp->cg_cs.cs_nffree += i;
1479 fs->fs_cstotal.cs_nffree += i;
1480 fs->fs_cs(fs, cg).cs_nffree += i;
1481 fs->fs_fmod = 1;
1482 cgp->cg_frsum[i]++;
1483 ACTIVECLEAR(fs, cg);
1484 UFS_UNLOCK(ump);
1485 bdwrite(bp);
1486 return (blkno);
1487 }
1488 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1489 if (bno < 0)
1490 goto fail;
1491 for (i = 0; i < frags; i++)
1492 clrbit(blksfree, bno + i);
1493 cgp->cg_cs.cs_nffree -= frags;
1494 cgp->cg_frsum[allocsiz]--;
1495 if (frags != allocsiz)
1496 cgp->cg_frsum[allocsiz - frags]++;
1497 UFS_LOCK(ump);
1498 fs->fs_cstotal.cs_nffree -= frags;
1499 fs->fs_cs(fs, cg).cs_nffree -= frags;
1500 fs->fs_fmod = 1;
1501 blkno = cgbase(fs, cg) + bno;
1502 ACTIVECLEAR(fs, cg);
1503 UFS_UNLOCK(ump);
1504 if (DOINGSOFTDEP(ITOV(ip)))
1505 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno);
1506 bdwrite(bp);
1507 return (blkno);
1508
1509fail:
1510 brelse(bp);
1511 UFS_LOCK(ump);
1512 return (0);
1513}
1514
1515/*
1516 * Allocate a block in a cylinder group.
1517 *
1518 * This algorithm implements the following policy:
1519 * 1) allocate the requested block.
1520 * 2) allocate a rotationally optimal block in the same cylinder.
1521 * 3) allocate the next available block on the block rotor for the
1522 * specified cylinder group.
1523 * Note that this routine only allocates fs_bsize blocks; these
1524 * blocks may be fragmented by the routine that allocates them.
1525 */
1526static ufs2_daddr_t
1527ffs_alloccgblk(ip, bp, bpref)
1528 struct inode *ip;
1529 struct buf *bp;
1530 ufs2_daddr_t bpref;
1531{
1532 struct fs *fs;
1533 struct cg *cgp;
1534 struct ufsmount *ump;
1535 ufs1_daddr_t bno;
1536 ufs2_daddr_t blkno;
1537 u_int8_t *blksfree;
1538
1539 fs = ip->i_fs;
1540 ump = ip->i_ump;
1541 mtx_assert(UFS_MTX(ump), MA_OWNED);
1542 cgp = (struct cg *)bp->b_data;
1543 blksfree = cg_blksfree(cgp);
1544 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) {
1545 bpref = cgp->cg_rotor;
1546 } else {
1547 bpref = blknum(fs, bpref);
1548 bno = dtogd(fs, bpref);
1549 /*
1550 * if the requested block is available, use it
1551 */
1552 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1553 goto gotit;
1554 }
1555 /*
1556 * Take the next available block in this cylinder group.
1557 */
1558 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1559 if (bno < 0)
1560 return (0);
1561 cgp->cg_rotor = bno;
1562gotit:
1563 blkno = fragstoblks(fs, bno);
1564 ffs_clrblock(fs, blksfree, (long)blkno);
1565 ffs_clusteracct(ump, fs, cgp, blkno, -1);
1566 cgp->cg_cs.cs_nbfree--;
1567 fs->fs_cstotal.cs_nbfree--;
1568 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1569 fs->fs_fmod = 1;
1570 blkno = cgbase(fs, cgp->cg_cgx) + bno;
1571 /* XXX Fixme. */
1572 UFS_UNLOCK(ump);
1573 if (DOINGSOFTDEP(ITOV(ip)))
1574 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno);
1575 UFS_LOCK(ump);
1576 return (blkno);
1577}
1578
1579/*
1580 * Determine whether a cluster can be allocated.
1581 *
1582 * We do not currently check for optimal rotational layout if there
1583 * are multiple choices in the same cylinder group. Instead we just
1584 * take the first one that we find following bpref.
1585 */
1586static ufs2_daddr_t
1587ffs_clusteralloc(ip, cg, bpref, len)
1588 struct inode *ip;
1423 ufs2_daddr_t bpref;
1424 int size;
1425{
1426 struct fs *fs;
1427 struct cg *cgp;
1428 struct buf *bp;
1429 struct ufsmount *ump;
1430 ufs1_daddr_t bno;
1431 ufs2_daddr_t blkno;
1432 int i, allocsiz, error, frags;
1433 u_int8_t *blksfree;
1434
1435 ump = ip->i_ump;
1436 fs = ip->i_fs;
1437 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1438 return (0);
1439 UFS_UNLOCK(ump);
1440 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1441 (int)fs->fs_cgsize, NOCRED, &bp);
1442 if (error)
1443 goto fail;
1444 cgp = (struct cg *)bp->b_data;
1445 if (!cg_chkmagic(cgp) ||
1446 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1447 goto fail;
1448 bp->b_xflags |= BX_BKGRDWRITE;
1449 cgp->cg_old_time = cgp->cg_time = time_second;
1450 if (size == fs->fs_bsize) {
1451 UFS_LOCK(ump);
1452 blkno = ffs_alloccgblk(ip, bp, bpref);
1453 ACTIVECLEAR(fs, cg);
1454 UFS_UNLOCK(ump);
1455 bdwrite(bp);
1456 return (blkno);
1457 }
1458 /*
1459 * check to see if any fragments are already available
1460 * allocsiz is the size which will be allocated, hacking
1461 * it down to a smaller size if necessary
1462 */
1463 blksfree = cg_blksfree(cgp);
1464 frags = numfrags(fs, size);
1465 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1466 if (cgp->cg_frsum[allocsiz] != 0)
1467 break;
1468 if (allocsiz == fs->fs_frag) {
1469 /*
1470 * no fragments were available, so a block will be
1471 * allocated, and hacked up
1472 */
1473 if (cgp->cg_cs.cs_nbfree == 0)
1474 goto fail;
1475 UFS_LOCK(ump);
1476 blkno = ffs_alloccgblk(ip, bp, bpref);
1477 bno = dtogd(fs, blkno);
1478 for (i = frags; i < fs->fs_frag; i++)
1479 setbit(blksfree, bno + i);
1480 i = fs->fs_frag - frags;
1481 cgp->cg_cs.cs_nffree += i;
1482 fs->fs_cstotal.cs_nffree += i;
1483 fs->fs_cs(fs, cg).cs_nffree += i;
1484 fs->fs_fmod = 1;
1485 cgp->cg_frsum[i]++;
1486 ACTIVECLEAR(fs, cg);
1487 UFS_UNLOCK(ump);
1488 bdwrite(bp);
1489 return (blkno);
1490 }
1491 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1492 if (bno < 0)
1493 goto fail;
1494 for (i = 0; i < frags; i++)
1495 clrbit(blksfree, bno + i);
1496 cgp->cg_cs.cs_nffree -= frags;
1497 cgp->cg_frsum[allocsiz]--;
1498 if (frags != allocsiz)
1499 cgp->cg_frsum[allocsiz - frags]++;
1500 UFS_LOCK(ump);
1501 fs->fs_cstotal.cs_nffree -= frags;
1502 fs->fs_cs(fs, cg).cs_nffree -= frags;
1503 fs->fs_fmod = 1;
1504 blkno = cgbase(fs, cg) + bno;
1505 ACTIVECLEAR(fs, cg);
1506 UFS_UNLOCK(ump);
1507 if (DOINGSOFTDEP(ITOV(ip)))
1508 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno);
1509 bdwrite(bp);
1510 return (blkno);
1511
1512fail:
1513 brelse(bp);
1514 UFS_LOCK(ump);
1515 return (0);
1516}
1517
1518/*
1519 * Allocate a block in a cylinder group.
1520 *
1521 * This algorithm implements the following policy:
1522 * 1) allocate the requested block.
1523 * 2) allocate a rotationally optimal block in the same cylinder.
1524 * 3) allocate the next available block on the block rotor for the
1525 * specified cylinder group.
1526 * Note that this routine only allocates fs_bsize blocks; these
1527 * blocks may be fragmented by the routine that allocates them.
1528 */
1529static ufs2_daddr_t
1530ffs_alloccgblk(ip, bp, bpref)
1531 struct inode *ip;
1532 struct buf *bp;
1533 ufs2_daddr_t bpref;
1534{
1535 struct fs *fs;
1536 struct cg *cgp;
1537 struct ufsmount *ump;
1538 ufs1_daddr_t bno;
1539 ufs2_daddr_t blkno;
1540 u_int8_t *blksfree;
1541
1542 fs = ip->i_fs;
1543 ump = ip->i_ump;
1544 mtx_assert(UFS_MTX(ump), MA_OWNED);
1545 cgp = (struct cg *)bp->b_data;
1546 blksfree = cg_blksfree(cgp);
1547 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) {
1548 bpref = cgp->cg_rotor;
1549 } else {
1550 bpref = blknum(fs, bpref);
1551 bno = dtogd(fs, bpref);
1552 /*
1553 * if the requested block is available, use it
1554 */
1555 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1556 goto gotit;
1557 }
1558 /*
1559 * Take the next available block in this cylinder group.
1560 */
1561 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1562 if (bno < 0)
1563 return (0);
1564 cgp->cg_rotor = bno;
1565gotit:
1566 blkno = fragstoblks(fs, bno);
1567 ffs_clrblock(fs, blksfree, (long)blkno);
1568 ffs_clusteracct(ump, fs, cgp, blkno, -1);
1569 cgp->cg_cs.cs_nbfree--;
1570 fs->fs_cstotal.cs_nbfree--;
1571 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1572 fs->fs_fmod = 1;
1573 blkno = cgbase(fs, cgp->cg_cgx) + bno;
1574 /* XXX Fixme. */
1575 UFS_UNLOCK(ump);
1576 if (DOINGSOFTDEP(ITOV(ip)))
1577 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno);
1578 UFS_LOCK(ump);
1579 return (blkno);
1580}
1581
1582/*
1583 * Determine whether a cluster can be allocated.
1584 *
1585 * We do not currently check for optimal rotational layout if there
1586 * are multiple choices in the same cylinder group. Instead we just
1587 * take the first one that we find following bpref.
1588 */
1589static ufs2_daddr_t
1590ffs_clusteralloc(ip, cg, bpref, len)
1591 struct inode *ip;
1589 int cg;
1592 u_int cg;
1590 ufs2_daddr_t bpref;
1591 int len;
1592{
1593 struct fs *fs;
1594 struct cg *cgp;
1595 struct buf *bp;
1596 struct ufsmount *ump;
1597 int i, run, bit, map, got;
1598 ufs2_daddr_t bno;
1599 u_char *mapp;
1600 int32_t *lp;
1601 u_int8_t *blksfree;
1602
1603 fs = ip->i_fs;
1604 ump = ip->i_ump;
1605 if (fs->fs_maxcluster[cg] < len)
1606 return (0);
1607 UFS_UNLOCK(ump);
1608 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
1609 NOCRED, &bp))
1610 goto fail_lock;
1611 cgp = (struct cg *)bp->b_data;
1612 if (!cg_chkmagic(cgp))
1613 goto fail_lock;
1614 bp->b_xflags |= BX_BKGRDWRITE;
1615 /*
1616 * Check to see if a cluster of the needed size (or bigger) is
1617 * available in this cylinder group.
1618 */
1619 lp = &cg_clustersum(cgp)[len];
1620 for (i = len; i <= fs->fs_contigsumsize; i++)
1621 if (*lp++ > 0)
1622 break;
1623 if (i > fs->fs_contigsumsize) {
1624 /*
1625 * This is the first time looking for a cluster in this
1626 * cylinder group. Update the cluster summary information
1627 * to reflect the true maximum sized cluster so that
1628 * future cluster allocation requests can avoid reading
1629 * the cylinder group map only to find no clusters.
1630 */
1631 lp = &cg_clustersum(cgp)[len - 1];
1632 for (i = len - 1; i > 0; i--)
1633 if (*lp-- > 0)
1634 break;
1635 UFS_LOCK(ump);
1636 fs->fs_maxcluster[cg] = i;
1637 goto fail;
1638 }
1639 /*
1640 * Search the cluster map to find a big enough cluster.
1641 * We take the first one that we find, even if it is larger
1642 * than we need as we prefer to get one close to the previous
1643 * block allocation. We do not search before the current
1644 * preference point as we do not want to allocate a block
1645 * that is allocated before the previous one (as we will
1646 * then have to wait for another pass of the elevator
1647 * algorithm before it will be read). We prefer to fail and
1648 * be recalled to try an allocation in the next cylinder group.
1649 */
1650 if (dtog(fs, bpref) != cg)
1651 bpref = 0;
1652 else
1653 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref)));
1654 mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1655 map = *mapp++;
1656 bit = 1 << (bpref % NBBY);
1657 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1658 if ((map & bit) == 0) {
1659 run = 0;
1660 } else {
1661 run++;
1662 if (run == len)
1663 break;
1664 }
1665 if ((got & (NBBY - 1)) != (NBBY - 1)) {
1666 bit <<= 1;
1667 } else {
1668 map = *mapp++;
1669 bit = 1;
1670 }
1671 }
1672 if (got >= cgp->cg_nclusterblks)
1673 goto fail_lock;
1674 /*
1675 * Allocate the cluster that we have found.
1676 */
1677 blksfree = cg_blksfree(cgp);
1678 for (i = 1; i <= len; i++)
1679 if (!ffs_isblock(fs, blksfree, got - run + i))
1680 panic("ffs_clusteralloc: map mismatch");
1681 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
1682 if (dtog(fs, bno) != cg)
1683 panic("ffs_clusteralloc: allocated out of group");
1684 len = blkstofrags(fs, len);
1685 UFS_LOCK(ump);
1686 for (i = 0; i < len; i += fs->fs_frag)
1687 if (ffs_alloccgblk(ip, bp, bno + i) != bno + i)
1688 panic("ffs_clusteralloc: lost block");
1689 ACTIVECLEAR(fs, cg);
1690 UFS_UNLOCK(ump);
1691 bdwrite(bp);
1692 return (bno);
1693
1694fail_lock:
1695 UFS_LOCK(ump);
1696fail:
1697 brelse(bp);
1698 return (0);
1699}
1700
1701/*
1702 * Determine whether an inode can be allocated.
1703 *
1704 * Check to see if an inode is available, and if it is,
1705 * allocate it using the following policy:
1706 * 1) allocate the requested inode.
1707 * 2) allocate the next available inode after the requested
1708 * inode in the specified cylinder group.
1709 */
1710static ufs2_daddr_t
1711ffs_nodealloccg(ip, cg, ipref, mode)
1712 struct inode *ip;
1593 ufs2_daddr_t bpref;
1594 int len;
1595{
1596 struct fs *fs;
1597 struct cg *cgp;
1598 struct buf *bp;
1599 struct ufsmount *ump;
1600 int i, run, bit, map, got;
1601 ufs2_daddr_t bno;
1602 u_char *mapp;
1603 int32_t *lp;
1604 u_int8_t *blksfree;
1605
1606 fs = ip->i_fs;
1607 ump = ip->i_ump;
1608 if (fs->fs_maxcluster[cg] < len)
1609 return (0);
1610 UFS_UNLOCK(ump);
1611 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
1612 NOCRED, &bp))
1613 goto fail_lock;
1614 cgp = (struct cg *)bp->b_data;
1615 if (!cg_chkmagic(cgp))
1616 goto fail_lock;
1617 bp->b_xflags |= BX_BKGRDWRITE;
1618 /*
1619 * Check to see if a cluster of the needed size (or bigger) is
1620 * available in this cylinder group.
1621 */
1622 lp = &cg_clustersum(cgp)[len];
1623 for (i = len; i <= fs->fs_contigsumsize; i++)
1624 if (*lp++ > 0)
1625 break;
1626 if (i > fs->fs_contigsumsize) {
1627 /*
1628 * This is the first time looking for a cluster in this
1629 * cylinder group. Update the cluster summary information
1630 * to reflect the true maximum sized cluster so that
1631 * future cluster allocation requests can avoid reading
1632 * the cylinder group map only to find no clusters.
1633 */
1634 lp = &cg_clustersum(cgp)[len - 1];
1635 for (i = len - 1; i > 0; i--)
1636 if (*lp-- > 0)
1637 break;
1638 UFS_LOCK(ump);
1639 fs->fs_maxcluster[cg] = i;
1640 goto fail;
1641 }
1642 /*
1643 * Search the cluster map to find a big enough cluster.
1644 * We take the first one that we find, even if it is larger
1645 * than we need as we prefer to get one close to the previous
1646 * block allocation. We do not search before the current
1647 * preference point as we do not want to allocate a block
1648 * that is allocated before the previous one (as we will
1649 * then have to wait for another pass of the elevator
1650 * algorithm before it will be read). We prefer to fail and
1651 * be recalled to try an allocation in the next cylinder group.
1652 */
1653 if (dtog(fs, bpref) != cg)
1654 bpref = 0;
1655 else
1656 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref)));
1657 mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1658 map = *mapp++;
1659 bit = 1 << (bpref % NBBY);
1660 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1661 if ((map & bit) == 0) {
1662 run = 0;
1663 } else {
1664 run++;
1665 if (run == len)
1666 break;
1667 }
1668 if ((got & (NBBY - 1)) != (NBBY - 1)) {
1669 bit <<= 1;
1670 } else {
1671 map = *mapp++;
1672 bit = 1;
1673 }
1674 }
1675 if (got >= cgp->cg_nclusterblks)
1676 goto fail_lock;
1677 /*
1678 * Allocate the cluster that we have found.
1679 */
1680 blksfree = cg_blksfree(cgp);
1681 for (i = 1; i <= len; i++)
1682 if (!ffs_isblock(fs, blksfree, got - run + i))
1683 panic("ffs_clusteralloc: map mismatch");
1684 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
1685 if (dtog(fs, bno) != cg)
1686 panic("ffs_clusteralloc: allocated out of group");
1687 len = blkstofrags(fs, len);
1688 UFS_LOCK(ump);
1689 for (i = 0; i < len; i += fs->fs_frag)
1690 if (ffs_alloccgblk(ip, bp, bno + i) != bno + i)
1691 panic("ffs_clusteralloc: lost block");
1692 ACTIVECLEAR(fs, cg);
1693 UFS_UNLOCK(ump);
1694 bdwrite(bp);
1695 return (bno);
1696
1697fail_lock:
1698 UFS_LOCK(ump);
1699fail:
1700 brelse(bp);
1701 return (0);
1702}
1703
1704/*
1705 * Determine whether an inode can be allocated.
1706 *
1707 * Check to see if an inode is available, and if it is,
1708 * allocate it using the following policy:
1709 * 1) allocate the requested inode.
1710 * 2) allocate the next available inode after the requested
1711 * inode in the specified cylinder group.
1712 */
1713static ufs2_daddr_t
1714ffs_nodealloccg(ip, cg, ipref, mode)
1715 struct inode *ip;
1713 int cg;
1716 u_int cg;
1714 ufs2_daddr_t ipref;
1715 int mode;
1716{
1717 struct fs *fs;
1718 struct cg *cgp;
1719 struct buf *bp, *ibp;
1720 struct ufsmount *ump;
1721 u_int8_t *inosused;
1722 struct ufs2_dinode *dp2;
1723 int error, start, len, loc, map, i;
1724
1725 fs = ip->i_fs;
1726 ump = ip->i_ump;
1727 if (fs->fs_cs(fs, cg).cs_nifree == 0)
1728 return (0);
1729 UFS_UNLOCK(ump);
1730 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1731 (int)fs->fs_cgsize, NOCRED, &bp);
1732 if (error) {
1733 brelse(bp);
1734 UFS_LOCK(ump);
1735 return (0);
1736 }
1737 cgp = (struct cg *)bp->b_data;
1738 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
1739 brelse(bp);
1740 UFS_LOCK(ump);
1741 return (0);
1742 }
1743 bp->b_xflags |= BX_BKGRDWRITE;
1744 cgp->cg_old_time = cgp->cg_time = time_second;
1745 inosused = cg_inosused(cgp);
1746 if (ipref) {
1747 ipref %= fs->fs_ipg;
1748 if (isclr(inosused, ipref))
1749 goto gotit;
1750 }
1751 start = cgp->cg_irotor / NBBY;
1752 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
1753 loc = skpc(0xff, len, &inosused[start]);
1754 if (loc == 0) {
1755 len = start + 1;
1756 start = 0;
1757 loc = skpc(0xff, len, &inosused[0]);
1758 if (loc == 0) {
1759 printf("cg = %d, irotor = %ld, fs = %s\n",
1760 cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
1761 panic("ffs_nodealloccg: map corrupted");
1762 /* NOTREACHED */
1763 }
1764 }
1765 i = start + len - loc;
1766 map = inosused[i];
1767 ipref = i * NBBY;
1768 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
1769 if ((map & i) == 0) {
1770 cgp->cg_irotor = ipref;
1771 goto gotit;
1772 }
1773 }
1774 printf("fs = %s\n", fs->fs_fsmnt);
1775 panic("ffs_nodealloccg: block not in map");
1776 /* NOTREACHED */
1777gotit:
1778 /*
1779 * Check to see if we need to initialize more inodes.
1780 */
1781 ibp = NULL;
1782 if (fs->fs_magic == FS_UFS2_MAGIC &&
1783 ipref + INOPB(fs) > cgp->cg_initediblk &&
1784 cgp->cg_initediblk < cgp->cg_niblk) {
1785 ibp = getblk(ip->i_devvp, fsbtodb(fs,
1786 ino_to_fsba(fs, cg * fs->fs_ipg + cgp->cg_initediblk)),
1787 (int)fs->fs_bsize, 0, 0, 0);
1788 bzero(ibp->b_data, (int)fs->fs_bsize);
1789 dp2 = (struct ufs2_dinode *)(ibp->b_data);
1790 for (i = 0; i < INOPB(fs); i++) {
1791 dp2->di_gen = arc4random() / 2 + 1;
1792 dp2++;
1793 }
1794 cgp->cg_initediblk += INOPB(fs);
1795 }
1796 UFS_LOCK(ump);
1797 ACTIVECLEAR(fs, cg);
1798 setbit(inosused, ipref);
1799 cgp->cg_cs.cs_nifree--;
1800 fs->fs_cstotal.cs_nifree--;
1801 fs->fs_cs(fs, cg).cs_nifree--;
1802 fs->fs_fmod = 1;
1803 if ((mode & IFMT) == IFDIR) {
1804 cgp->cg_cs.cs_ndir++;
1805 fs->fs_cstotal.cs_ndir++;
1806 fs->fs_cs(fs, cg).cs_ndir++;
1807 }
1808 UFS_UNLOCK(ump);
1809 if (DOINGSOFTDEP(ITOV(ip)))
1810 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref);
1811 bdwrite(bp);
1812 if (ibp != NULL)
1813 bawrite(ibp);
1717 ufs2_daddr_t ipref;
1718 int mode;
1719{
1720 struct fs *fs;
1721 struct cg *cgp;
1722 struct buf *bp, *ibp;
1723 struct ufsmount *ump;
1724 u_int8_t *inosused;
1725 struct ufs2_dinode *dp2;
1726 int error, start, len, loc, map, i;
1727
1728 fs = ip->i_fs;
1729 ump = ip->i_ump;
1730 if (fs->fs_cs(fs, cg).cs_nifree == 0)
1731 return (0);
1732 UFS_UNLOCK(ump);
1733 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1734 (int)fs->fs_cgsize, NOCRED, &bp);
1735 if (error) {
1736 brelse(bp);
1737 UFS_LOCK(ump);
1738 return (0);
1739 }
1740 cgp = (struct cg *)bp->b_data;
1741 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
1742 brelse(bp);
1743 UFS_LOCK(ump);
1744 return (0);
1745 }
1746 bp->b_xflags |= BX_BKGRDWRITE;
1747 cgp->cg_old_time = cgp->cg_time = time_second;
1748 inosused = cg_inosused(cgp);
1749 if (ipref) {
1750 ipref %= fs->fs_ipg;
1751 if (isclr(inosused, ipref))
1752 goto gotit;
1753 }
1754 start = cgp->cg_irotor / NBBY;
1755 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
1756 loc = skpc(0xff, len, &inosused[start]);
1757 if (loc == 0) {
1758 len = start + 1;
1759 start = 0;
1760 loc = skpc(0xff, len, &inosused[0]);
1761 if (loc == 0) {
1762 printf("cg = %d, irotor = %ld, fs = %s\n",
1763 cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
1764 panic("ffs_nodealloccg: map corrupted");
1765 /* NOTREACHED */
1766 }
1767 }
1768 i = start + len - loc;
1769 map = inosused[i];
1770 ipref = i * NBBY;
1771 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
1772 if ((map & i) == 0) {
1773 cgp->cg_irotor = ipref;
1774 goto gotit;
1775 }
1776 }
1777 printf("fs = %s\n", fs->fs_fsmnt);
1778 panic("ffs_nodealloccg: block not in map");
1779 /* NOTREACHED */
1780gotit:
1781 /*
1782 * Check to see if we need to initialize more inodes.
1783 */
1784 ibp = NULL;
1785 if (fs->fs_magic == FS_UFS2_MAGIC &&
1786 ipref + INOPB(fs) > cgp->cg_initediblk &&
1787 cgp->cg_initediblk < cgp->cg_niblk) {
1788 ibp = getblk(ip->i_devvp, fsbtodb(fs,
1789 ino_to_fsba(fs, cg * fs->fs_ipg + cgp->cg_initediblk)),
1790 (int)fs->fs_bsize, 0, 0, 0);
1791 bzero(ibp->b_data, (int)fs->fs_bsize);
1792 dp2 = (struct ufs2_dinode *)(ibp->b_data);
1793 for (i = 0; i < INOPB(fs); i++) {
1794 dp2->di_gen = arc4random() / 2 + 1;
1795 dp2++;
1796 }
1797 cgp->cg_initediblk += INOPB(fs);
1798 }
1799 UFS_LOCK(ump);
1800 ACTIVECLEAR(fs, cg);
1801 setbit(inosused, ipref);
1802 cgp->cg_cs.cs_nifree--;
1803 fs->fs_cstotal.cs_nifree--;
1804 fs->fs_cs(fs, cg).cs_nifree--;
1805 fs->fs_fmod = 1;
1806 if ((mode & IFMT) == IFDIR) {
1807 cgp->cg_cs.cs_ndir++;
1808 fs->fs_cstotal.cs_ndir++;
1809 fs->fs_cs(fs, cg).cs_ndir++;
1810 }
1811 UFS_UNLOCK(ump);
1812 if (DOINGSOFTDEP(ITOV(ip)))
1813 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref);
1814 bdwrite(bp);
1815 if (ibp != NULL)
1816 bawrite(ibp);
1814 return (cg * fs->fs_ipg + ipref);
1817 return ((ino_t)(cg * fs->fs_ipg + ipref));
1815}
1816
1817/*
1818 * check if a block is free
1819 */
1820static int
1821ffs_isfreeblock(struct fs *fs, u_char *cp, ufs1_daddr_t h)
1822{
1823
1824 switch ((int)fs->fs_frag) {
1825 case 8:
1826 return (cp[h] == 0);
1827 case 4:
1828 return ((cp[h >> 1] & (0x0f << ((h & 0x1) << 2))) == 0);
1829 case 2:
1830 return ((cp[h >> 2] & (0x03 << ((h & 0x3) << 1))) == 0);
1831 case 1:
1832 return ((cp[h >> 3] & (0x01 << (h & 0x7))) == 0);
1833 default:
1834 panic("ffs_isfreeblock");
1835 }
1836 return (0);
1837}
1838
1839/*
1840 * Free a block or fragment.
1841 *
1842 * The specified block or fragment is placed back in the
1843 * free map. If a fragment is deallocated, a possible
1844 * block reassembly is checked.
1845 */
1846void
1847ffs_blkfree(ump, fs, devvp, bno, size, inum)
1848 struct ufsmount *ump;
1849 struct fs *fs;
1850 struct vnode *devvp;
1851 ufs2_daddr_t bno;
1852 long size;
1853 ino_t inum;
1854{
1855 struct cg *cgp;
1856 struct buf *bp;
1857 ufs1_daddr_t fragno, cgbno;
1858 ufs2_daddr_t cgblkno;
1818}
1819
1820/*
1821 * check if a block is free
1822 */
1823static int
1824ffs_isfreeblock(struct fs *fs, u_char *cp, ufs1_daddr_t h)
1825{
1826
1827 switch ((int)fs->fs_frag) {
1828 case 8:
1829 return (cp[h] == 0);
1830 case 4:
1831 return ((cp[h >> 1] & (0x0f << ((h & 0x1) << 2))) == 0);
1832 case 2:
1833 return ((cp[h >> 2] & (0x03 << ((h & 0x3) << 1))) == 0);
1834 case 1:
1835 return ((cp[h >> 3] & (0x01 << (h & 0x7))) == 0);
1836 default:
1837 panic("ffs_isfreeblock");
1838 }
1839 return (0);
1840}
1841
1842/*
1843 * Free a block or fragment.
1844 *
1845 * The specified block or fragment is placed back in the
1846 * free map. If a fragment is deallocated, a possible
1847 * block reassembly is checked.
1848 */
1849void
1850ffs_blkfree(ump, fs, devvp, bno, size, inum)
1851 struct ufsmount *ump;
1852 struct fs *fs;
1853 struct vnode *devvp;
1854 ufs2_daddr_t bno;
1855 long size;
1856 ino_t inum;
1857{
1858 struct cg *cgp;
1859 struct buf *bp;
1860 ufs1_daddr_t fragno, cgbno;
1861 ufs2_daddr_t cgblkno;
1859 int i, cg, blk, frags, bbase;
1862 int i, blk, frags, bbase;
1863 u_int cg;
1860 u_int8_t *blksfree;
1861 struct cdev *dev;
1862
1863 cg = dtog(fs, bno);
1864 if (devvp->v_type == VREG) {
1865 /* devvp is a snapshot */
1866 dev = VTOI(devvp)->i_devvp->v_rdev;
1867 cgblkno = fragstoblks(fs, cgtod(fs, cg));
1868 } else {
1869 /* devvp is a normal disk device */
1870 dev = devvp->v_rdev;
1871 cgblkno = fsbtodb(fs, cgtod(fs, cg));
1872 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree");
1873 if ((devvp->v_vflag & VV_COPYONWRITE) &&
1874 ffs_snapblkfree(fs, devvp, bno, size, inum))
1875 return;
1876 }
1877#ifdef INVARIANTS
1878 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
1879 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
1880 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
1881 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
1882 size, fs->fs_fsmnt);
1883 panic("ffs_blkfree: bad size");
1884 }
1885#endif
1886 if ((u_int)bno >= fs->fs_size) {
1887 printf("bad block %jd, ino %lu\n", (intmax_t)bno,
1888 (u_long)inum);
1889 ffs_fserr(fs, inum, "bad block");
1890 return;
1891 }
1892 if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) {
1893 brelse(bp);
1894 return;
1895 }
1896 cgp = (struct cg *)bp->b_data;
1897 if (!cg_chkmagic(cgp)) {
1898 brelse(bp);
1899 return;
1900 }
1901 bp->b_xflags |= BX_BKGRDWRITE;
1902 cgp->cg_old_time = cgp->cg_time = time_second;
1903 cgbno = dtogd(fs, bno);
1904 blksfree = cg_blksfree(cgp);
1905 UFS_LOCK(ump);
1906 if (size == fs->fs_bsize) {
1907 fragno = fragstoblks(fs, cgbno);
1908 if (!ffs_isfreeblock(fs, blksfree, fragno)) {
1909 if (devvp->v_type == VREG) {
1910 UFS_UNLOCK(ump);
1911 /* devvp is a snapshot */
1912 brelse(bp);
1913 return;
1914 }
1915 printf("dev = %s, block = %jd, fs = %s\n",
1916 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
1917 panic("ffs_blkfree: freeing free block");
1918 }
1919 ffs_setblock(fs, blksfree, fragno);
1920 ffs_clusteracct(ump, fs, cgp, fragno, 1);
1921 cgp->cg_cs.cs_nbfree++;
1922 fs->fs_cstotal.cs_nbfree++;
1923 fs->fs_cs(fs, cg).cs_nbfree++;
1924 } else {
1925 bbase = cgbno - fragnum(fs, cgbno);
1926 /*
1927 * decrement the counts associated with the old frags
1928 */
1929 blk = blkmap(fs, blksfree, bbase);
1930 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
1931 /*
1932 * deallocate the fragment
1933 */
1934 frags = numfrags(fs, size);
1935 for (i = 0; i < frags; i++) {
1936 if (isset(blksfree, cgbno + i)) {
1937 printf("dev = %s, block = %jd, fs = %s\n",
1938 devtoname(dev), (intmax_t)(bno + i),
1939 fs->fs_fsmnt);
1940 panic("ffs_blkfree: freeing free frag");
1941 }
1942 setbit(blksfree, cgbno + i);
1943 }
1944 cgp->cg_cs.cs_nffree += i;
1945 fs->fs_cstotal.cs_nffree += i;
1946 fs->fs_cs(fs, cg).cs_nffree += i;
1947 /*
1948 * add back in counts associated with the new frags
1949 */
1950 blk = blkmap(fs, blksfree, bbase);
1951 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
1952 /*
1953 * if a complete block has been reassembled, account for it
1954 */
1955 fragno = fragstoblks(fs, bbase);
1956 if (ffs_isblock(fs, blksfree, fragno)) {
1957 cgp->cg_cs.cs_nffree -= fs->fs_frag;
1958 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
1959 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
1960 ffs_clusteracct(ump, fs, cgp, fragno, 1);
1961 cgp->cg_cs.cs_nbfree++;
1962 fs->fs_cstotal.cs_nbfree++;
1963 fs->fs_cs(fs, cg).cs_nbfree++;
1964 }
1965 }
1966 fs->fs_fmod = 1;
1967 ACTIVECLEAR(fs, cg);
1968 UFS_UNLOCK(ump);
1969 bdwrite(bp);
1970}
1971
1972#ifdef INVARIANTS
1973/*
1974 * Verify allocation of a block or fragment. Returns true if block or
1975 * fragment is allocated, false if it is free.
1976 */
1977static int
1978ffs_checkblk(ip, bno, size)
1979 struct inode *ip;
1980 ufs2_daddr_t bno;
1981 long size;
1982{
1983 struct fs *fs;
1984 struct cg *cgp;
1985 struct buf *bp;
1986 ufs1_daddr_t cgbno;
1987 int i, error, frags, free;
1988 u_int8_t *blksfree;
1989
1990 fs = ip->i_fs;
1991 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
1992 printf("bsize = %ld, size = %ld, fs = %s\n",
1993 (long)fs->fs_bsize, size, fs->fs_fsmnt);
1994 panic("ffs_checkblk: bad size");
1995 }
1996 if ((u_int)bno >= fs->fs_size)
1997 panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
1998 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))),
1999 (int)fs->fs_cgsize, NOCRED, &bp);
2000 if (error)
2001 panic("ffs_checkblk: cg bread failed");
2002 cgp = (struct cg *)bp->b_data;
2003 if (!cg_chkmagic(cgp))
2004 panic("ffs_checkblk: cg magic mismatch");
2005 bp->b_xflags |= BX_BKGRDWRITE;
2006 blksfree = cg_blksfree(cgp);
2007 cgbno = dtogd(fs, bno);
2008 if (size == fs->fs_bsize) {
2009 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2010 } else {
2011 frags = numfrags(fs, size);
2012 for (free = 0, i = 0; i < frags; i++)
2013 if (isset(blksfree, cgbno + i))
2014 free++;
2015 if (free != 0 && free != frags)
2016 panic("ffs_checkblk: partially free fragment");
2017 }
2018 brelse(bp);
2019 return (!free);
2020}
2021#endif /* INVARIANTS */
2022
2023/*
2024 * Free an inode.
2025 */
2026int
2027ffs_vfree(pvp, ino, mode)
2028 struct vnode *pvp;
2029 ino_t ino;
2030 int mode;
2031{
2032 struct inode *ip;
2033
2034 if (DOINGSOFTDEP(pvp)) {
2035 softdep_freefile(pvp, ino, mode);
2036 return (0);
2037 }
2038 ip = VTOI(pvp);
2039 return (ffs_freefile(ip->i_ump, ip->i_fs, ip->i_devvp, ino, mode));
2040}
2041
2042/*
2043 * Do the actual free operation.
2044 * The specified inode is placed back in the free map.
2045 */
2046int
2047ffs_freefile(ump, fs, devvp, ino, mode)
2048 struct ufsmount *ump;
2049 struct fs *fs;
2050 struct vnode *devvp;
2051 ino_t ino;
2052 int mode;
2053{
2054 struct cg *cgp;
2055 struct buf *bp;
2056 ufs2_daddr_t cgbno;
1864 u_int8_t *blksfree;
1865 struct cdev *dev;
1866
1867 cg = dtog(fs, bno);
1868 if (devvp->v_type == VREG) {
1869 /* devvp is a snapshot */
1870 dev = VTOI(devvp)->i_devvp->v_rdev;
1871 cgblkno = fragstoblks(fs, cgtod(fs, cg));
1872 } else {
1873 /* devvp is a normal disk device */
1874 dev = devvp->v_rdev;
1875 cgblkno = fsbtodb(fs, cgtod(fs, cg));
1876 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree");
1877 if ((devvp->v_vflag & VV_COPYONWRITE) &&
1878 ffs_snapblkfree(fs, devvp, bno, size, inum))
1879 return;
1880 }
1881#ifdef INVARIANTS
1882 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
1883 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
1884 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
1885 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
1886 size, fs->fs_fsmnt);
1887 panic("ffs_blkfree: bad size");
1888 }
1889#endif
1890 if ((u_int)bno >= fs->fs_size) {
1891 printf("bad block %jd, ino %lu\n", (intmax_t)bno,
1892 (u_long)inum);
1893 ffs_fserr(fs, inum, "bad block");
1894 return;
1895 }
1896 if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) {
1897 brelse(bp);
1898 return;
1899 }
1900 cgp = (struct cg *)bp->b_data;
1901 if (!cg_chkmagic(cgp)) {
1902 brelse(bp);
1903 return;
1904 }
1905 bp->b_xflags |= BX_BKGRDWRITE;
1906 cgp->cg_old_time = cgp->cg_time = time_second;
1907 cgbno = dtogd(fs, bno);
1908 blksfree = cg_blksfree(cgp);
1909 UFS_LOCK(ump);
1910 if (size == fs->fs_bsize) {
1911 fragno = fragstoblks(fs, cgbno);
1912 if (!ffs_isfreeblock(fs, blksfree, fragno)) {
1913 if (devvp->v_type == VREG) {
1914 UFS_UNLOCK(ump);
1915 /* devvp is a snapshot */
1916 brelse(bp);
1917 return;
1918 }
1919 printf("dev = %s, block = %jd, fs = %s\n",
1920 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
1921 panic("ffs_blkfree: freeing free block");
1922 }
1923 ffs_setblock(fs, blksfree, fragno);
1924 ffs_clusteracct(ump, fs, cgp, fragno, 1);
1925 cgp->cg_cs.cs_nbfree++;
1926 fs->fs_cstotal.cs_nbfree++;
1927 fs->fs_cs(fs, cg).cs_nbfree++;
1928 } else {
1929 bbase = cgbno - fragnum(fs, cgbno);
1930 /*
1931 * decrement the counts associated with the old frags
1932 */
1933 blk = blkmap(fs, blksfree, bbase);
1934 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
1935 /*
1936 * deallocate the fragment
1937 */
1938 frags = numfrags(fs, size);
1939 for (i = 0; i < frags; i++) {
1940 if (isset(blksfree, cgbno + i)) {
1941 printf("dev = %s, block = %jd, fs = %s\n",
1942 devtoname(dev), (intmax_t)(bno + i),
1943 fs->fs_fsmnt);
1944 panic("ffs_blkfree: freeing free frag");
1945 }
1946 setbit(blksfree, cgbno + i);
1947 }
1948 cgp->cg_cs.cs_nffree += i;
1949 fs->fs_cstotal.cs_nffree += i;
1950 fs->fs_cs(fs, cg).cs_nffree += i;
1951 /*
1952 * add back in counts associated with the new frags
1953 */
1954 blk = blkmap(fs, blksfree, bbase);
1955 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
1956 /*
1957 * if a complete block has been reassembled, account for it
1958 */
1959 fragno = fragstoblks(fs, bbase);
1960 if (ffs_isblock(fs, blksfree, fragno)) {
1961 cgp->cg_cs.cs_nffree -= fs->fs_frag;
1962 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
1963 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
1964 ffs_clusteracct(ump, fs, cgp, fragno, 1);
1965 cgp->cg_cs.cs_nbfree++;
1966 fs->fs_cstotal.cs_nbfree++;
1967 fs->fs_cs(fs, cg).cs_nbfree++;
1968 }
1969 }
1970 fs->fs_fmod = 1;
1971 ACTIVECLEAR(fs, cg);
1972 UFS_UNLOCK(ump);
1973 bdwrite(bp);
1974}
1975
1976#ifdef INVARIANTS
1977/*
1978 * Verify allocation of a block or fragment. Returns true if block or
1979 * fragment is allocated, false if it is free.
1980 */
1981static int
1982ffs_checkblk(ip, bno, size)
1983 struct inode *ip;
1984 ufs2_daddr_t bno;
1985 long size;
1986{
1987 struct fs *fs;
1988 struct cg *cgp;
1989 struct buf *bp;
1990 ufs1_daddr_t cgbno;
1991 int i, error, frags, free;
1992 u_int8_t *blksfree;
1993
1994 fs = ip->i_fs;
1995 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
1996 printf("bsize = %ld, size = %ld, fs = %s\n",
1997 (long)fs->fs_bsize, size, fs->fs_fsmnt);
1998 panic("ffs_checkblk: bad size");
1999 }
2000 if ((u_int)bno >= fs->fs_size)
2001 panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
2002 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))),
2003 (int)fs->fs_cgsize, NOCRED, &bp);
2004 if (error)
2005 panic("ffs_checkblk: cg bread failed");
2006 cgp = (struct cg *)bp->b_data;
2007 if (!cg_chkmagic(cgp))
2008 panic("ffs_checkblk: cg magic mismatch");
2009 bp->b_xflags |= BX_BKGRDWRITE;
2010 blksfree = cg_blksfree(cgp);
2011 cgbno = dtogd(fs, bno);
2012 if (size == fs->fs_bsize) {
2013 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2014 } else {
2015 frags = numfrags(fs, size);
2016 for (free = 0, i = 0; i < frags; i++)
2017 if (isset(blksfree, cgbno + i))
2018 free++;
2019 if (free != 0 && free != frags)
2020 panic("ffs_checkblk: partially free fragment");
2021 }
2022 brelse(bp);
2023 return (!free);
2024}
2025#endif /* INVARIANTS */
2026
2027/*
2028 * Free an inode.
2029 */
2030int
2031ffs_vfree(pvp, ino, mode)
2032 struct vnode *pvp;
2033 ino_t ino;
2034 int mode;
2035{
2036 struct inode *ip;
2037
2038 if (DOINGSOFTDEP(pvp)) {
2039 softdep_freefile(pvp, ino, mode);
2040 return (0);
2041 }
2042 ip = VTOI(pvp);
2043 return (ffs_freefile(ip->i_ump, ip->i_fs, ip->i_devvp, ino, mode));
2044}
2045
2046/*
2047 * Do the actual free operation.
2048 * The specified inode is placed back in the free map.
2049 */
2050int
2051ffs_freefile(ump, fs, devvp, ino, mode)
2052 struct ufsmount *ump;
2053 struct fs *fs;
2054 struct vnode *devvp;
2055 ino_t ino;
2056 int mode;
2057{
2058 struct cg *cgp;
2059 struct buf *bp;
2060 ufs2_daddr_t cgbno;
2057 int error, cg;
2061 int error;
2062 u_int cg;
2058 u_int8_t *inosused;
2059 struct cdev *dev;
2060
2061 cg = ino_to_cg(fs, ino);
2062 if (devvp->v_type == VREG) {
2063 /* devvp is a snapshot */
2064 dev = VTOI(devvp)->i_devvp->v_rdev;
2065 cgbno = fragstoblks(fs, cgtod(fs, cg));
2066 } else {
2067 /* devvp is a normal disk device */
2068 dev = devvp->v_rdev;
2069 cgbno = fsbtodb(fs, cgtod(fs, cg));
2070 }
2063 u_int8_t *inosused;
2064 struct cdev *dev;
2065
2066 cg = ino_to_cg(fs, ino);
2067 if (devvp->v_type == VREG) {
2068 /* devvp is a snapshot */
2069 dev = VTOI(devvp)->i_devvp->v_rdev;
2070 cgbno = fragstoblks(fs, cgtod(fs, cg));
2071 } else {
2072 /* devvp is a normal disk device */
2073 dev = devvp->v_rdev;
2074 cgbno = fsbtodb(fs, cgtod(fs, cg));
2075 }
2071 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg)
2076 if (ino >= fs->fs_ipg * fs->fs_ncg)
2072 panic("ffs_freefile: range: dev = %s, ino = %lu, fs = %s",
2073 devtoname(dev), (u_long)ino, fs->fs_fsmnt);
2074 if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) {
2075 brelse(bp);
2076 return (error);
2077 }
2078 cgp = (struct cg *)bp->b_data;
2079 if (!cg_chkmagic(cgp)) {
2080 brelse(bp);
2081 return (0);
2082 }
2083 bp->b_xflags |= BX_BKGRDWRITE;
2084 cgp->cg_old_time = cgp->cg_time = time_second;
2085 inosused = cg_inosused(cgp);
2086 ino %= fs->fs_ipg;
2087 if (isclr(inosused, ino)) {
2077 panic("ffs_freefile: range: dev = %s, ino = %lu, fs = %s",
2078 devtoname(dev), (u_long)ino, fs->fs_fsmnt);
2079 if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) {
2080 brelse(bp);
2081 return (error);
2082 }
2083 cgp = (struct cg *)bp->b_data;
2084 if (!cg_chkmagic(cgp)) {
2085 brelse(bp);
2086 return (0);
2087 }
2088 bp->b_xflags |= BX_BKGRDWRITE;
2089 cgp->cg_old_time = cgp->cg_time = time_second;
2090 inosused = cg_inosused(cgp);
2091 ino %= fs->fs_ipg;
2092 if (isclr(inosused, ino)) {
2088 printf("dev = %s, ino = %lu, fs = %s\n", devtoname(dev),
2089 (u_long)ino + cg * fs->fs_ipg, fs->fs_fsmnt);
2093 printf("dev = %s, ino = %u, fs = %s\n", devtoname(dev),
2094 ino + cg * fs->fs_ipg, fs->fs_fsmnt);
2090 if (fs->fs_ronly == 0)
2091 panic("ffs_freefile: freeing free inode");
2092 }
2093 clrbit(inosused, ino);
2094 if (ino < cgp->cg_irotor)
2095 cgp->cg_irotor = ino;
2096 cgp->cg_cs.cs_nifree++;
2097 UFS_LOCK(ump);
2098 fs->fs_cstotal.cs_nifree++;
2099 fs->fs_cs(fs, cg).cs_nifree++;
2100 if ((mode & IFMT) == IFDIR) {
2101 cgp->cg_cs.cs_ndir--;
2102 fs->fs_cstotal.cs_ndir--;
2103 fs->fs_cs(fs, cg).cs_ndir--;
2104 }
2105 fs->fs_fmod = 1;
2106 ACTIVECLEAR(fs, cg);
2107 UFS_UNLOCK(ump);
2108 bdwrite(bp);
2109 return (0);
2110}
2111
2112/*
2113 * Check to see if a file is free.
2114 */
2115int
2116ffs_checkfreefile(fs, devvp, ino)
2117 struct fs *fs;
2118 struct vnode *devvp;
2119 ino_t ino;
2120{
2121 struct cg *cgp;
2122 struct buf *bp;
2123 ufs2_daddr_t cgbno;
2095 if (fs->fs_ronly == 0)
2096 panic("ffs_freefile: freeing free inode");
2097 }
2098 clrbit(inosused, ino);
2099 if (ino < cgp->cg_irotor)
2100 cgp->cg_irotor = ino;
2101 cgp->cg_cs.cs_nifree++;
2102 UFS_LOCK(ump);
2103 fs->fs_cstotal.cs_nifree++;
2104 fs->fs_cs(fs, cg).cs_nifree++;
2105 if ((mode & IFMT) == IFDIR) {
2106 cgp->cg_cs.cs_ndir--;
2107 fs->fs_cstotal.cs_ndir--;
2108 fs->fs_cs(fs, cg).cs_ndir--;
2109 }
2110 fs->fs_fmod = 1;
2111 ACTIVECLEAR(fs, cg);
2112 UFS_UNLOCK(ump);
2113 bdwrite(bp);
2114 return (0);
2115}
2116
2117/*
2118 * Check to see if a file is free.
2119 */
2120int
2121ffs_checkfreefile(fs, devvp, ino)
2122 struct fs *fs;
2123 struct vnode *devvp;
2124 ino_t ino;
2125{
2126 struct cg *cgp;
2127 struct buf *bp;
2128 ufs2_daddr_t cgbno;
2124 int ret, cg;
2129 int ret;
2130 u_int cg;
2125 u_int8_t *inosused;
2126
2127 cg = ino_to_cg(fs, ino);
2128 if (devvp->v_type == VREG) {
2129 /* devvp is a snapshot */
2130 cgbno = fragstoblks(fs, cgtod(fs, cg));
2131 } else {
2132 /* devvp is a normal disk device */
2133 cgbno = fsbtodb(fs, cgtod(fs, cg));
2134 }
2131 u_int8_t *inosused;
2132
2133 cg = ino_to_cg(fs, ino);
2134 if (devvp->v_type == VREG) {
2135 /* devvp is a snapshot */
2136 cgbno = fragstoblks(fs, cgtod(fs, cg));
2137 } else {
2138 /* devvp is a normal disk device */
2139 cgbno = fsbtodb(fs, cgtod(fs, cg));
2140 }
2135 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg)
2141 if (ino >= fs->fs_ipg * fs->fs_ncg)
2136 return (1);
2137 if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) {
2138 brelse(bp);
2139 return (1);
2140 }
2141 cgp = (struct cg *)bp->b_data;
2142 if (!cg_chkmagic(cgp)) {
2143 brelse(bp);
2144 return (1);
2145 }
2146 inosused = cg_inosused(cgp);
2147 ino %= fs->fs_ipg;
2148 ret = isclr(inosused, ino);
2149 brelse(bp);
2150 return (ret);
2151}
2152
2153/*
2154 * Find a block of the specified size in the specified cylinder group.
2155 *
2156 * It is a panic if a request is made to find a block if none are
2157 * available.
2158 */
2159static ufs1_daddr_t
2160ffs_mapsearch(fs, cgp, bpref, allocsiz)
2161 struct fs *fs;
2162 struct cg *cgp;
2163 ufs2_daddr_t bpref;
2164 int allocsiz;
2165{
2166 ufs1_daddr_t bno;
2167 int start, len, loc, i;
2168 int blk, field, subfield, pos;
2169 u_int8_t *blksfree;
2170
2171 /*
2172 * find the fragment by searching through the free block
2173 * map for an appropriate bit pattern
2174 */
2175 if (bpref)
2176 start = dtogd(fs, bpref) / NBBY;
2177 else
2178 start = cgp->cg_frotor / NBBY;
2179 blksfree = cg_blksfree(cgp);
2180 len = howmany(fs->fs_fpg, NBBY) - start;
2181 loc = scanc((u_int)len, (u_char *)&blksfree[start],
2182 fragtbl[fs->fs_frag],
2183 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2184 if (loc == 0) {
2185 len = start + 1;
2186 start = 0;
2187 loc = scanc((u_int)len, (u_char *)&blksfree[0],
2188 fragtbl[fs->fs_frag],
2189 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2190 if (loc == 0) {
2191 printf("start = %d, len = %d, fs = %s\n",
2192 start, len, fs->fs_fsmnt);
2193 panic("ffs_alloccg: map corrupted");
2194 /* NOTREACHED */
2195 }
2196 }
2197 bno = (start + len - loc) * NBBY;
2198 cgp->cg_frotor = bno;
2199 /*
2200 * found the byte in the map
2201 * sift through the bits to find the selected frag
2202 */
2203 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2204 blk = blkmap(fs, blksfree, bno);
2205 blk <<= 1;
2206 field = around[allocsiz];
2207 subfield = inside[allocsiz];
2208 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2209 if ((blk & field) == subfield)
2210 return (bno + pos);
2211 field <<= 1;
2212 subfield <<= 1;
2213 }
2214 }
2215 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2216 panic("ffs_alloccg: block not in map");
2217 return (-1);
2218}
2219
2220/*
2221 * Update the cluster map because of an allocation or free.
2222 *
2223 * Cnt == 1 means free; cnt == -1 means allocating.
2224 */
2225void
2226ffs_clusteracct(ump, fs, cgp, blkno, cnt)
2227 struct ufsmount *ump;
2228 struct fs *fs;
2229 struct cg *cgp;
2230 ufs1_daddr_t blkno;
2231 int cnt;
2232{
2233 int32_t *sump;
2234 int32_t *lp;
2235 u_char *freemapp, *mapp;
2236 int i, start, end, forw, back, map, bit;
2237
2238 mtx_assert(UFS_MTX(ump), MA_OWNED);
2239
2240 if (fs->fs_contigsumsize <= 0)
2241 return;
2242 freemapp = cg_clustersfree(cgp);
2243 sump = cg_clustersum(cgp);
2244 /*
2245 * Allocate or clear the actual block.
2246 */
2247 if (cnt > 0)
2248 setbit(freemapp, blkno);
2249 else
2250 clrbit(freemapp, blkno);
2251 /*
2252 * Find the size of the cluster going forward.
2253 */
2254 start = blkno + 1;
2255 end = start + fs->fs_contigsumsize;
2256 if (end >= cgp->cg_nclusterblks)
2257 end = cgp->cg_nclusterblks;
2258 mapp = &freemapp[start / NBBY];
2259 map = *mapp++;
2260 bit = 1 << (start % NBBY);
2261 for (i = start; i < end; i++) {
2262 if ((map & bit) == 0)
2263 break;
2264 if ((i & (NBBY - 1)) != (NBBY - 1)) {
2265 bit <<= 1;
2266 } else {
2267 map = *mapp++;
2268 bit = 1;
2269 }
2270 }
2271 forw = i - start;
2272 /*
2273 * Find the size of the cluster going backward.
2274 */
2275 start = blkno - 1;
2276 end = start - fs->fs_contigsumsize;
2277 if (end < 0)
2278 end = -1;
2279 mapp = &freemapp[start / NBBY];
2280 map = *mapp--;
2281 bit = 1 << (start % NBBY);
2282 for (i = start; i > end; i--) {
2283 if ((map & bit) == 0)
2284 break;
2285 if ((i & (NBBY - 1)) != 0) {
2286 bit >>= 1;
2287 } else {
2288 map = *mapp--;
2289 bit = 1 << (NBBY - 1);
2290 }
2291 }
2292 back = start - i;
2293 /*
2294 * Account for old cluster and the possibly new forward and
2295 * back clusters.
2296 */
2297 i = back + forw + 1;
2298 if (i > fs->fs_contigsumsize)
2299 i = fs->fs_contigsumsize;
2300 sump[i] += cnt;
2301 if (back > 0)
2302 sump[back] -= cnt;
2303 if (forw > 0)
2304 sump[forw] -= cnt;
2305 /*
2306 * Update cluster summary information.
2307 */
2308 lp = &sump[fs->fs_contigsumsize];
2309 for (i = fs->fs_contigsumsize; i > 0; i--)
2310 if (*lp-- > 0)
2311 break;
2312 fs->fs_maxcluster[cgp->cg_cgx] = i;
2313}
2314
2315/*
2316 * Fserr prints the name of a filesystem with an error diagnostic.
2317 *
2318 * The form of the error message is:
2319 * fs: error message
2320 */
2321static void
2322ffs_fserr(fs, inum, cp)
2323 struct fs *fs;
2324 ino_t inum;
2325 char *cp;
2326{
2327 struct thread *td = curthread; /* XXX */
2328 struct proc *p = td->td_proc;
2329
2330 log(LOG_ERR, "pid %d (%s), uid %d inumber %d on %s: %s\n",
2331 p->p_pid, p->p_comm, td->td_ucred->cr_uid, inum, fs->fs_fsmnt, cp);
2332}
2333
2334/*
2335 * This function provides the capability for the fsck program to
2336 * update an active filesystem. Fourteen operations are provided:
2337 *
2338 * adjrefcnt(inode, amt) - adjusts the reference count on the
2339 * specified inode by the specified amount. Under normal
2340 * operation the count should always go down. Decrementing
2341 * the count to zero will cause the inode to be freed.
2342 * adjblkcnt(inode, amt) - adjust the number of blocks used to
2343 * by the specifed amount.
2344 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
2345 * adjust the superblock summary.
2346 * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
2347 * are marked as free. Inodes should never have to be marked
2348 * as in use.
2349 * freefiles(inode, count) - file inodes [inode..inode + count - 1]
2350 * are marked as free. Inodes should never have to be marked
2351 * as in use.
2352 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
2353 * are marked as free. Blocks should never have to be marked
2354 * as in use.
2355 * setflags(flags, set/clear) - the fs_flags field has the specified
2356 * flags set (second parameter +1) or cleared (second parameter -1).
2357 * setcwd(dirinode) - set the current directory to dirinode in the
2358 * filesystem associated with the snapshot.
2359 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
2360 * in the current directory is oldvalue then change it to newvalue.
2361 * unlink(nameptr, oldvalue) - Verify that the inode number associated
2362 * with nameptr in the current directory is oldvalue then unlink it.
2363 */
2364
2365static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
2366
2367SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
2368 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
2369
2370static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
2371 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
2372
2373static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR,
2374 sysctl_ffs_fsck, "Adjust number of directories");
2375
2376static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR,
2377 sysctl_ffs_fsck, "Adjust number of free blocks");
2378
2379static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR,
2380 sysctl_ffs_fsck, "Adjust number of free inodes");
2381
2382static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR,
2383 sysctl_ffs_fsck, "Adjust number of free frags");
2384
2385static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR,
2386 sysctl_ffs_fsck, "Adjust number of free clusters");
2387
2388static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
2389 sysctl_ffs_fsck, "Free Range of Directory Inodes");
2390
2391static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
2392 sysctl_ffs_fsck, "Free Range of File Inodes");
2393
2394static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
2395 sysctl_ffs_fsck, "Free Range of Blocks");
2396
2397static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
2398 sysctl_ffs_fsck, "Change Filesystem Flags");
2399
2400static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR,
2401 sysctl_ffs_fsck, "Set Current Working Directory");
2402
2403static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR,
2404 sysctl_ffs_fsck, "Change Value of .. Entry");
2405
2406static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR,
2407 sysctl_ffs_fsck, "Unlink a Duplicate Name");
2408
2409#ifdef DEBUG
2410static int fsckcmds = 0;
2411SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
2412#endif /* DEBUG */
2413
2414static int
2415sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
2416{
2417 struct thread *td = curthread;
2418 struct fsck_cmd cmd;
2419 struct ufsmount *ump;
2420 struct vnode *vp, *vpold, *dvp, *fdvp;
2421 struct inode *ip, *dp;
2422 struct mount *mp;
2423 struct fs *fs;
2424 ufs2_daddr_t blkno;
2425 long blkcnt, blksize;
2426 struct filedesc *fdp;
2427 struct file *fp;
2428 int vfslocked, filetype, error;
2429
2430 if (req->newlen > sizeof cmd)
2431 return (EBADRPC);
2432 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
2433 return (error);
2434 if (cmd.version != FFS_CMD_VERSION)
2435 return (ERPCMISMATCH);
2436 if ((error = getvnode(curproc->p_fd, cmd.handle, &fp)) != 0)
2437 return (error);
2438 vp = fp->f_data;
2439 if (vp->v_type != VREG && vp->v_type != VDIR) {
2440 fdrop(fp, td);
2441 return (EINVAL);
2442 }
2443 vn_start_write(vp, &mp, V_WAIT);
2444 if (mp == 0 || strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
2445 vn_finished_write(mp);
2446 fdrop(fp, td);
2447 return (EINVAL);
2448 }
2449 if (mp->mnt_flag & MNT_RDONLY) {
2450 vn_finished_write(mp);
2451 fdrop(fp, td);
2452 return (EROFS);
2453 }
2454 ump = VFSTOUFS(mp);
2455 fs = ump->um_fs;
2456 filetype = IFREG;
2457
2458 switch (oidp->oid_number) {
2459
2460 case FFS_SET_FLAGS:
2461#ifdef DEBUG
2462 if (fsckcmds)
2463 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
2464 cmd.size > 0 ? "set" : "clear");
2465#endif /* DEBUG */
2466 if (cmd.size > 0)
2467 fs->fs_flags |= (long)cmd.value;
2468 else
2469 fs->fs_flags &= ~(long)cmd.value;
2470 break;
2471
2472 case FFS_ADJ_REFCNT:
2473#ifdef DEBUG
2474 if (fsckcmds) {
2475 printf("%s: adjust inode %jd count by %jd\n",
2476 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2477 (intmax_t)cmd.size);
2478 }
2479#endif /* DEBUG */
2480 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2481 break;
2482 ip = VTOI(vp);
2483 ip->i_nlink += cmd.size;
2484 DIP_SET(ip, i_nlink, ip->i_nlink);
2485 ip->i_effnlink += cmd.size;
2486 ip->i_flag |= IN_CHANGE;
2487 if (DOINGSOFTDEP(vp))
2488 softdep_change_linkcnt(ip);
2489 vput(vp);
2490 break;
2491
2492 case FFS_ADJ_BLKCNT:
2493#ifdef DEBUG
2494 if (fsckcmds) {
2495 printf("%s: adjust inode %jd block count by %jd\n",
2496 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2497 (intmax_t)cmd.size);
2498 }
2499#endif /* DEBUG */
2500 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2501 break;
2502 ip = VTOI(vp);
2503 if (ip->i_flag & IN_SPACECOUNTED) {
2504 UFS_LOCK(ump);
2505 fs->fs_pendingblocks += cmd.size;
2506 UFS_UNLOCK(ump);
2507 }
2508 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
2509 ip->i_flag |= IN_CHANGE;
2510 vput(vp);
2511 break;
2512
2513 case FFS_DIR_FREE:
2514 filetype = IFDIR;
2515 /* fall through */
2516
2517 case FFS_FILE_FREE:
2518#ifdef DEBUG
2519 if (fsckcmds) {
2520 if (cmd.size == 1)
2521 printf("%s: free %s inode %d\n",
2522 mp->mnt_stat.f_mntonname,
2523 filetype == IFDIR ? "directory" : "file",
2524 (ino_t)cmd.value);
2525 else
2526 printf("%s: free %s inodes %d-%d\n",
2527 mp->mnt_stat.f_mntonname,
2528 filetype == IFDIR ? "directory" : "file",
2529 (ino_t)cmd.value,
2530 (ino_t)(cmd.value + cmd.size - 1));
2531 }
2532#endif /* DEBUG */
2533 while (cmd.size > 0) {
2534 if ((error = ffs_freefile(ump, fs, ump->um_devvp,
2535 cmd.value, filetype)))
2536 break;
2537 cmd.size -= 1;
2538 cmd.value += 1;
2539 }
2540 break;
2541
2542 case FFS_BLK_FREE:
2543#ifdef DEBUG
2544 if (fsckcmds) {
2545 if (cmd.size == 1)
2546 printf("%s: free block %jd\n",
2547 mp->mnt_stat.f_mntonname,
2548 (intmax_t)cmd.value);
2549 else
2550 printf("%s: free blocks %jd-%jd\n",
2551 mp->mnt_stat.f_mntonname,
2552 (intmax_t)cmd.value,
2553 (intmax_t)cmd.value + cmd.size - 1);
2554 }
2555#endif /* DEBUG */
2556 blkno = cmd.value;
2557 blkcnt = cmd.size;
2558 blksize = fs->fs_frag - (blkno % fs->fs_frag);
2559 while (blkcnt > 0) {
2560 if (blksize > blkcnt)
2561 blksize = blkcnt;
2562 ffs_blkfree(ump, fs, ump->um_devvp, blkno,
2563 blksize * fs->fs_fsize, ROOTINO);
2564 blkno += blksize;
2565 blkcnt -= blksize;
2566 blksize = fs->fs_frag;
2567 }
2568 break;
2569
2570 /*
2571 * Adjust superblock summaries. fsck(8) is expected to
2572 * submit deltas when necessary.
2573 */
2574 case FFS_ADJ_NDIR:
2575#ifdef DEBUG
2576 if (fsckcmds) {
2577 printf("%s: adjust number of directories by %jd\n",
2578 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2579 }
2580#endif /* DEBUG */
2581 fs->fs_cstotal.cs_ndir += cmd.value;
2582 break;
2583
2584 case FFS_ADJ_NBFREE:
2585#ifdef DEBUG
2586 if (fsckcmds) {
2587 printf("%s: adjust number of free blocks by %+jd\n",
2588 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2589 }
2590#endif /* DEBUG */
2591 fs->fs_cstotal.cs_nbfree += cmd.value;
2592 break;
2593
2594 case FFS_ADJ_NIFREE:
2595#ifdef DEBUG
2596 if (fsckcmds) {
2597 printf("%s: adjust number of free inodes by %+jd\n",
2598 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2599 }
2600#endif /* DEBUG */
2601 fs->fs_cstotal.cs_nifree += cmd.value;
2602 break;
2603
2604 case FFS_ADJ_NFFREE:
2605#ifdef DEBUG
2606 if (fsckcmds) {
2607 printf("%s: adjust number of free frags by %+jd\n",
2608 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2609 }
2610#endif /* DEBUG */
2611 fs->fs_cstotal.cs_nffree += cmd.value;
2612 break;
2613
2614 case FFS_ADJ_NUMCLUSTERS:
2615#ifdef DEBUG
2616 if (fsckcmds) {
2617 printf("%s: adjust number of free clusters by %+jd\n",
2618 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2619 }
2620#endif /* DEBUG */
2621 fs->fs_cstotal.cs_numclusters += cmd.value;
2622 break;
2623
2624 case FFS_SET_CWD:
2625#ifdef DEBUG
2626 if (fsckcmds) {
2627 printf("%s: set current directory to inode %jd\n",
2628 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2629 }
2630#endif /* DEBUG */
2631 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
2632 break;
2633 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2634 AUDIT_ARG_VNODE1(vp);
2635 if ((error = change_dir(vp, td)) != 0) {
2636 vput(vp);
2637 VFS_UNLOCK_GIANT(vfslocked);
2638 break;
2639 }
2640 VOP_UNLOCK(vp, 0);
2641 VFS_UNLOCK_GIANT(vfslocked);
2642 fdp = td->td_proc->p_fd;
2643 FILEDESC_XLOCK(fdp);
2644 vpold = fdp->fd_cdir;
2645 fdp->fd_cdir = vp;
2646 FILEDESC_XUNLOCK(fdp);
2647 vfslocked = VFS_LOCK_GIANT(vpold->v_mount);
2648 vrele(vpold);
2649 VFS_UNLOCK_GIANT(vfslocked);
2650 break;
2651
2652 case FFS_SET_DOTDOT:
2653#ifdef DEBUG
2654 if (fsckcmds) {
2655 printf("%s: change .. in cwd from %jd to %jd\n",
2656 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2657 (intmax_t)cmd.size);
2658 }
2659#endif /* DEBUG */
2660 /*
2661 * First we have to get and lock the parent directory
2662 * to which ".." points.
2663 */
2664 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
2665 if (error)
2666 break;
2667 /*
2668 * Now we get and lock the child directory containing "..".
2669 */
2670 FILEDESC_SLOCK(td->td_proc->p_fd);
2671 dvp = td->td_proc->p_fd->fd_cdir;
2672 FILEDESC_SUNLOCK(td->td_proc->p_fd);
2673 if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
2674 vput(fdvp);
2675 break;
2676 }
2677 dp = VTOI(dvp);
2678 dp->i_offset = 12; /* XXX mastertemplate.dot_reclen */
2679 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
2680 DT_DIR, 0);
2681 cache_purge(fdvp);
2682 cache_purge(dvp);
2683 vput(dvp);
2684 vput(fdvp);
2685 break;
2686
2687 case FFS_UNLINK:
2688#ifdef DEBUG
2689 if (fsckcmds) {
2690 char buf[32];
2691
2692 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
2693 strncpy(buf, "Name_too_long", 32);
2694 printf("%s: unlink %s (inode %jd)\n",
2695 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
2696 }
2697#endif /* DEBUG */
2698 /*
2699 * kern_unlinkat will do its own start/finish writes and
2700 * they do not nest, so drop ours here. Setting mp == NULL
2701 * indicates that vn_finished_write is not needed down below.
2702 */
2703 vn_finished_write(mp);
2704 mp = NULL;
2705 error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value,
2706 UIO_USERSPACE, (ino_t)cmd.size);
2707 break;
2708
2709 default:
2710#ifdef DEBUG
2711 if (fsckcmds) {
2712 printf("Invalid request %d from fsck\n",
2713 oidp->oid_number);
2714 }
2715#endif /* DEBUG */
2716 error = EINVAL;
2717 break;
2718
2719 }
2720 fdrop(fp, td);
2721 vn_finished_write(mp);
2722 return (error);
2723}
2142 return (1);
2143 if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) {
2144 brelse(bp);
2145 return (1);
2146 }
2147 cgp = (struct cg *)bp->b_data;
2148 if (!cg_chkmagic(cgp)) {
2149 brelse(bp);
2150 return (1);
2151 }
2152 inosused = cg_inosused(cgp);
2153 ino %= fs->fs_ipg;
2154 ret = isclr(inosused, ino);
2155 brelse(bp);
2156 return (ret);
2157}
2158
2159/*
2160 * Find a block of the specified size in the specified cylinder group.
2161 *
2162 * It is a panic if a request is made to find a block if none are
2163 * available.
2164 */
2165static ufs1_daddr_t
2166ffs_mapsearch(fs, cgp, bpref, allocsiz)
2167 struct fs *fs;
2168 struct cg *cgp;
2169 ufs2_daddr_t bpref;
2170 int allocsiz;
2171{
2172 ufs1_daddr_t bno;
2173 int start, len, loc, i;
2174 int blk, field, subfield, pos;
2175 u_int8_t *blksfree;
2176
2177 /*
2178 * find the fragment by searching through the free block
2179 * map for an appropriate bit pattern
2180 */
2181 if (bpref)
2182 start = dtogd(fs, bpref) / NBBY;
2183 else
2184 start = cgp->cg_frotor / NBBY;
2185 blksfree = cg_blksfree(cgp);
2186 len = howmany(fs->fs_fpg, NBBY) - start;
2187 loc = scanc((u_int)len, (u_char *)&blksfree[start],
2188 fragtbl[fs->fs_frag],
2189 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2190 if (loc == 0) {
2191 len = start + 1;
2192 start = 0;
2193 loc = scanc((u_int)len, (u_char *)&blksfree[0],
2194 fragtbl[fs->fs_frag],
2195 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2196 if (loc == 0) {
2197 printf("start = %d, len = %d, fs = %s\n",
2198 start, len, fs->fs_fsmnt);
2199 panic("ffs_alloccg: map corrupted");
2200 /* NOTREACHED */
2201 }
2202 }
2203 bno = (start + len - loc) * NBBY;
2204 cgp->cg_frotor = bno;
2205 /*
2206 * found the byte in the map
2207 * sift through the bits to find the selected frag
2208 */
2209 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2210 blk = blkmap(fs, blksfree, bno);
2211 blk <<= 1;
2212 field = around[allocsiz];
2213 subfield = inside[allocsiz];
2214 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2215 if ((blk & field) == subfield)
2216 return (bno + pos);
2217 field <<= 1;
2218 subfield <<= 1;
2219 }
2220 }
2221 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2222 panic("ffs_alloccg: block not in map");
2223 return (-1);
2224}
2225
2226/*
2227 * Update the cluster map because of an allocation or free.
2228 *
2229 * Cnt == 1 means free; cnt == -1 means allocating.
2230 */
2231void
2232ffs_clusteracct(ump, fs, cgp, blkno, cnt)
2233 struct ufsmount *ump;
2234 struct fs *fs;
2235 struct cg *cgp;
2236 ufs1_daddr_t blkno;
2237 int cnt;
2238{
2239 int32_t *sump;
2240 int32_t *lp;
2241 u_char *freemapp, *mapp;
2242 int i, start, end, forw, back, map, bit;
2243
2244 mtx_assert(UFS_MTX(ump), MA_OWNED);
2245
2246 if (fs->fs_contigsumsize <= 0)
2247 return;
2248 freemapp = cg_clustersfree(cgp);
2249 sump = cg_clustersum(cgp);
2250 /*
2251 * Allocate or clear the actual block.
2252 */
2253 if (cnt > 0)
2254 setbit(freemapp, blkno);
2255 else
2256 clrbit(freemapp, blkno);
2257 /*
2258 * Find the size of the cluster going forward.
2259 */
2260 start = blkno + 1;
2261 end = start + fs->fs_contigsumsize;
2262 if (end >= cgp->cg_nclusterblks)
2263 end = cgp->cg_nclusterblks;
2264 mapp = &freemapp[start / NBBY];
2265 map = *mapp++;
2266 bit = 1 << (start % NBBY);
2267 for (i = start; i < end; i++) {
2268 if ((map & bit) == 0)
2269 break;
2270 if ((i & (NBBY - 1)) != (NBBY - 1)) {
2271 bit <<= 1;
2272 } else {
2273 map = *mapp++;
2274 bit = 1;
2275 }
2276 }
2277 forw = i - start;
2278 /*
2279 * Find the size of the cluster going backward.
2280 */
2281 start = blkno - 1;
2282 end = start - fs->fs_contigsumsize;
2283 if (end < 0)
2284 end = -1;
2285 mapp = &freemapp[start / NBBY];
2286 map = *mapp--;
2287 bit = 1 << (start % NBBY);
2288 for (i = start; i > end; i--) {
2289 if ((map & bit) == 0)
2290 break;
2291 if ((i & (NBBY - 1)) != 0) {
2292 bit >>= 1;
2293 } else {
2294 map = *mapp--;
2295 bit = 1 << (NBBY - 1);
2296 }
2297 }
2298 back = start - i;
2299 /*
2300 * Account for old cluster and the possibly new forward and
2301 * back clusters.
2302 */
2303 i = back + forw + 1;
2304 if (i > fs->fs_contigsumsize)
2305 i = fs->fs_contigsumsize;
2306 sump[i] += cnt;
2307 if (back > 0)
2308 sump[back] -= cnt;
2309 if (forw > 0)
2310 sump[forw] -= cnt;
2311 /*
2312 * Update cluster summary information.
2313 */
2314 lp = &sump[fs->fs_contigsumsize];
2315 for (i = fs->fs_contigsumsize; i > 0; i--)
2316 if (*lp-- > 0)
2317 break;
2318 fs->fs_maxcluster[cgp->cg_cgx] = i;
2319}
2320
2321/*
2322 * Fserr prints the name of a filesystem with an error diagnostic.
2323 *
2324 * The form of the error message is:
2325 * fs: error message
2326 */
2327static void
2328ffs_fserr(fs, inum, cp)
2329 struct fs *fs;
2330 ino_t inum;
2331 char *cp;
2332{
2333 struct thread *td = curthread; /* XXX */
2334 struct proc *p = td->td_proc;
2335
2336 log(LOG_ERR, "pid %d (%s), uid %d inumber %d on %s: %s\n",
2337 p->p_pid, p->p_comm, td->td_ucred->cr_uid, inum, fs->fs_fsmnt, cp);
2338}
2339
2340/*
2341 * This function provides the capability for the fsck program to
2342 * update an active filesystem. Fourteen operations are provided:
2343 *
2344 * adjrefcnt(inode, amt) - adjusts the reference count on the
2345 * specified inode by the specified amount. Under normal
2346 * operation the count should always go down. Decrementing
2347 * the count to zero will cause the inode to be freed.
2348 * adjblkcnt(inode, amt) - adjust the number of blocks used to
2349 * by the specifed amount.
2350 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
2351 * adjust the superblock summary.
2352 * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
2353 * are marked as free. Inodes should never have to be marked
2354 * as in use.
2355 * freefiles(inode, count) - file inodes [inode..inode + count - 1]
2356 * are marked as free. Inodes should never have to be marked
2357 * as in use.
2358 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
2359 * are marked as free. Blocks should never have to be marked
2360 * as in use.
2361 * setflags(flags, set/clear) - the fs_flags field has the specified
2362 * flags set (second parameter +1) or cleared (second parameter -1).
2363 * setcwd(dirinode) - set the current directory to dirinode in the
2364 * filesystem associated with the snapshot.
2365 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
2366 * in the current directory is oldvalue then change it to newvalue.
2367 * unlink(nameptr, oldvalue) - Verify that the inode number associated
2368 * with nameptr in the current directory is oldvalue then unlink it.
2369 */
2370
2371static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
2372
2373SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
2374 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
2375
2376static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
2377 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
2378
2379static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR,
2380 sysctl_ffs_fsck, "Adjust number of directories");
2381
2382static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR,
2383 sysctl_ffs_fsck, "Adjust number of free blocks");
2384
2385static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR,
2386 sysctl_ffs_fsck, "Adjust number of free inodes");
2387
2388static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR,
2389 sysctl_ffs_fsck, "Adjust number of free frags");
2390
2391static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR,
2392 sysctl_ffs_fsck, "Adjust number of free clusters");
2393
2394static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
2395 sysctl_ffs_fsck, "Free Range of Directory Inodes");
2396
2397static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
2398 sysctl_ffs_fsck, "Free Range of File Inodes");
2399
2400static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
2401 sysctl_ffs_fsck, "Free Range of Blocks");
2402
2403static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
2404 sysctl_ffs_fsck, "Change Filesystem Flags");
2405
2406static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR,
2407 sysctl_ffs_fsck, "Set Current Working Directory");
2408
2409static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR,
2410 sysctl_ffs_fsck, "Change Value of .. Entry");
2411
2412static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR,
2413 sysctl_ffs_fsck, "Unlink a Duplicate Name");
2414
2415#ifdef DEBUG
2416static int fsckcmds = 0;
2417SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
2418#endif /* DEBUG */
2419
2420static int
2421sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
2422{
2423 struct thread *td = curthread;
2424 struct fsck_cmd cmd;
2425 struct ufsmount *ump;
2426 struct vnode *vp, *vpold, *dvp, *fdvp;
2427 struct inode *ip, *dp;
2428 struct mount *mp;
2429 struct fs *fs;
2430 ufs2_daddr_t blkno;
2431 long blkcnt, blksize;
2432 struct filedesc *fdp;
2433 struct file *fp;
2434 int vfslocked, filetype, error;
2435
2436 if (req->newlen > sizeof cmd)
2437 return (EBADRPC);
2438 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
2439 return (error);
2440 if (cmd.version != FFS_CMD_VERSION)
2441 return (ERPCMISMATCH);
2442 if ((error = getvnode(curproc->p_fd, cmd.handle, &fp)) != 0)
2443 return (error);
2444 vp = fp->f_data;
2445 if (vp->v_type != VREG && vp->v_type != VDIR) {
2446 fdrop(fp, td);
2447 return (EINVAL);
2448 }
2449 vn_start_write(vp, &mp, V_WAIT);
2450 if (mp == 0 || strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
2451 vn_finished_write(mp);
2452 fdrop(fp, td);
2453 return (EINVAL);
2454 }
2455 if (mp->mnt_flag & MNT_RDONLY) {
2456 vn_finished_write(mp);
2457 fdrop(fp, td);
2458 return (EROFS);
2459 }
2460 ump = VFSTOUFS(mp);
2461 fs = ump->um_fs;
2462 filetype = IFREG;
2463
2464 switch (oidp->oid_number) {
2465
2466 case FFS_SET_FLAGS:
2467#ifdef DEBUG
2468 if (fsckcmds)
2469 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
2470 cmd.size > 0 ? "set" : "clear");
2471#endif /* DEBUG */
2472 if (cmd.size > 0)
2473 fs->fs_flags |= (long)cmd.value;
2474 else
2475 fs->fs_flags &= ~(long)cmd.value;
2476 break;
2477
2478 case FFS_ADJ_REFCNT:
2479#ifdef DEBUG
2480 if (fsckcmds) {
2481 printf("%s: adjust inode %jd count by %jd\n",
2482 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2483 (intmax_t)cmd.size);
2484 }
2485#endif /* DEBUG */
2486 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2487 break;
2488 ip = VTOI(vp);
2489 ip->i_nlink += cmd.size;
2490 DIP_SET(ip, i_nlink, ip->i_nlink);
2491 ip->i_effnlink += cmd.size;
2492 ip->i_flag |= IN_CHANGE;
2493 if (DOINGSOFTDEP(vp))
2494 softdep_change_linkcnt(ip);
2495 vput(vp);
2496 break;
2497
2498 case FFS_ADJ_BLKCNT:
2499#ifdef DEBUG
2500 if (fsckcmds) {
2501 printf("%s: adjust inode %jd block count by %jd\n",
2502 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2503 (intmax_t)cmd.size);
2504 }
2505#endif /* DEBUG */
2506 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2507 break;
2508 ip = VTOI(vp);
2509 if (ip->i_flag & IN_SPACECOUNTED) {
2510 UFS_LOCK(ump);
2511 fs->fs_pendingblocks += cmd.size;
2512 UFS_UNLOCK(ump);
2513 }
2514 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
2515 ip->i_flag |= IN_CHANGE;
2516 vput(vp);
2517 break;
2518
2519 case FFS_DIR_FREE:
2520 filetype = IFDIR;
2521 /* fall through */
2522
2523 case FFS_FILE_FREE:
2524#ifdef DEBUG
2525 if (fsckcmds) {
2526 if (cmd.size == 1)
2527 printf("%s: free %s inode %d\n",
2528 mp->mnt_stat.f_mntonname,
2529 filetype == IFDIR ? "directory" : "file",
2530 (ino_t)cmd.value);
2531 else
2532 printf("%s: free %s inodes %d-%d\n",
2533 mp->mnt_stat.f_mntonname,
2534 filetype == IFDIR ? "directory" : "file",
2535 (ino_t)cmd.value,
2536 (ino_t)(cmd.value + cmd.size - 1));
2537 }
2538#endif /* DEBUG */
2539 while (cmd.size > 0) {
2540 if ((error = ffs_freefile(ump, fs, ump->um_devvp,
2541 cmd.value, filetype)))
2542 break;
2543 cmd.size -= 1;
2544 cmd.value += 1;
2545 }
2546 break;
2547
2548 case FFS_BLK_FREE:
2549#ifdef DEBUG
2550 if (fsckcmds) {
2551 if (cmd.size == 1)
2552 printf("%s: free block %jd\n",
2553 mp->mnt_stat.f_mntonname,
2554 (intmax_t)cmd.value);
2555 else
2556 printf("%s: free blocks %jd-%jd\n",
2557 mp->mnt_stat.f_mntonname,
2558 (intmax_t)cmd.value,
2559 (intmax_t)cmd.value + cmd.size - 1);
2560 }
2561#endif /* DEBUG */
2562 blkno = cmd.value;
2563 blkcnt = cmd.size;
2564 blksize = fs->fs_frag - (blkno % fs->fs_frag);
2565 while (blkcnt > 0) {
2566 if (blksize > blkcnt)
2567 blksize = blkcnt;
2568 ffs_blkfree(ump, fs, ump->um_devvp, blkno,
2569 blksize * fs->fs_fsize, ROOTINO);
2570 blkno += blksize;
2571 blkcnt -= blksize;
2572 blksize = fs->fs_frag;
2573 }
2574 break;
2575
2576 /*
2577 * Adjust superblock summaries. fsck(8) is expected to
2578 * submit deltas when necessary.
2579 */
2580 case FFS_ADJ_NDIR:
2581#ifdef DEBUG
2582 if (fsckcmds) {
2583 printf("%s: adjust number of directories by %jd\n",
2584 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2585 }
2586#endif /* DEBUG */
2587 fs->fs_cstotal.cs_ndir += cmd.value;
2588 break;
2589
2590 case FFS_ADJ_NBFREE:
2591#ifdef DEBUG
2592 if (fsckcmds) {
2593 printf("%s: adjust number of free blocks by %+jd\n",
2594 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2595 }
2596#endif /* DEBUG */
2597 fs->fs_cstotal.cs_nbfree += cmd.value;
2598 break;
2599
2600 case FFS_ADJ_NIFREE:
2601#ifdef DEBUG
2602 if (fsckcmds) {
2603 printf("%s: adjust number of free inodes by %+jd\n",
2604 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2605 }
2606#endif /* DEBUG */
2607 fs->fs_cstotal.cs_nifree += cmd.value;
2608 break;
2609
2610 case FFS_ADJ_NFFREE:
2611#ifdef DEBUG
2612 if (fsckcmds) {
2613 printf("%s: adjust number of free frags by %+jd\n",
2614 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2615 }
2616#endif /* DEBUG */
2617 fs->fs_cstotal.cs_nffree += cmd.value;
2618 break;
2619
2620 case FFS_ADJ_NUMCLUSTERS:
2621#ifdef DEBUG
2622 if (fsckcmds) {
2623 printf("%s: adjust number of free clusters by %+jd\n",
2624 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2625 }
2626#endif /* DEBUG */
2627 fs->fs_cstotal.cs_numclusters += cmd.value;
2628 break;
2629
2630 case FFS_SET_CWD:
2631#ifdef DEBUG
2632 if (fsckcmds) {
2633 printf("%s: set current directory to inode %jd\n",
2634 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2635 }
2636#endif /* DEBUG */
2637 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
2638 break;
2639 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2640 AUDIT_ARG_VNODE1(vp);
2641 if ((error = change_dir(vp, td)) != 0) {
2642 vput(vp);
2643 VFS_UNLOCK_GIANT(vfslocked);
2644 break;
2645 }
2646 VOP_UNLOCK(vp, 0);
2647 VFS_UNLOCK_GIANT(vfslocked);
2648 fdp = td->td_proc->p_fd;
2649 FILEDESC_XLOCK(fdp);
2650 vpold = fdp->fd_cdir;
2651 fdp->fd_cdir = vp;
2652 FILEDESC_XUNLOCK(fdp);
2653 vfslocked = VFS_LOCK_GIANT(vpold->v_mount);
2654 vrele(vpold);
2655 VFS_UNLOCK_GIANT(vfslocked);
2656 break;
2657
2658 case FFS_SET_DOTDOT:
2659#ifdef DEBUG
2660 if (fsckcmds) {
2661 printf("%s: change .. in cwd from %jd to %jd\n",
2662 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2663 (intmax_t)cmd.size);
2664 }
2665#endif /* DEBUG */
2666 /*
2667 * First we have to get and lock the parent directory
2668 * to which ".." points.
2669 */
2670 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
2671 if (error)
2672 break;
2673 /*
2674 * Now we get and lock the child directory containing "..".
2675 */
2676 FILEDESC_SLOCK(td->td_proc->p_fd);
2677 dvp = td->td_proc->p_fd->fd_cdir;
2678 FILEDESC_SUNLOCK(td->td_proc->p_fd);
2679 if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
2680 vput(fdvp);
2681 break;
2682 }
2683 dp = VTOI(dvp);
2684 dp->i_offset = 12; /* XXX mastertemplate.dot_reclen */
2685 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
2686 DT_DIR, 0);
2687 cache_purge(fdvp);
2688 cache_purge(dvp);
2689 vput(dvp);
2690 vput(fdvp);
2691 break;
2692
2693 case FFS_UNLINK:
2694#ifdef DEBUG
2695 if (fsckcmds) {
2696 char buf[32];
2697
2698 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
2699 strncpy(buf, "Name_too_long", 32);
2700 printf("%s: unlink %s (inode %jd)\n",
2701 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
2702 }
2703#endif /* DEBUG */
2704 /*
2705 * kern_unlinkat will do its own start/finish writes and
2706 * they do not nest, so drop ours here. Setting mp == NULL
2707 * indicates that vn_finished_write is not needed down below.
2708 */
2709 vn_finished_write(mp);
2710 mp = NULL;
2711 error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value,
2712 UIO_USERSPACE, (ino_t)cmd.size);
2713 break;
2714
2715 default:
2716#ifdef DEBUG
2717 if (fsckcmds) {
2718 printf("Invalid request %d from fsck\n",
2719 oidp->oid_number);
2720 }
2721#endif /* DEBUG */
2722 error = EINVAL;
2723 break;
2724
2725 }
2726 fdrop(fp, td);
2727 vn_finished_write(mp);
2728 return (error);
2729}