Deleted Added
full compact
ext2_inode.c (125781) ext2_inode.c (125962)
1/*
2 * modified for Lites 1.1
3 *
4 * Aug 1995, Godmar Back (gback@cs.utah.edu)
5 * University of Utah, Department of Computer Science
6 */
7/*
8 * Copyright (c) 1982, 1986, 1989, 1993
9 * The Regents of the University of California. All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)ffs_inode.c 8.5 (Berkeley) 12/30/93
1/*
2 * modified for Lites 1.1
3 *
4 * Aug 1995, Godmar Back (gback@cs.utah.edu)
5 * University of Utah, Department of Computer Science
6 */
7/*
8 * Copyright (c) 1982, 1986, 1989, 1993
9 * The Regents of the University of California. All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)ffs_inode.c 8.5 (Berkeley) 12/30/93
40 * $FreeBSD: head/sys/gnu/fs/ext2fs/ext2_inode.c 125781 2004-02-13 17:49:03Z bde $
40 * $FreeBSD: head/sys/gnu/fs/ext2fs/ext2_inode.c 125962 2004-02-18 14:08:25Z tjr $
41 */
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/mount.h>
46#include <sys/bio.h>
47#include <sys/buf.h>
48#include <sys/vnode.h>
49#include <sys/malloc.h>
50
51#include <vm/vm.h>
52#include <vm/vm_extern.h>
53
54#include <gnu/ext2fs/inode.h>
55#include <gnu/ext2fs/ext2_mount.h>
56#include <gnu/ext2fs/ext2_fs.h>
57#include <gnu/ext2fs/ext2_fs_sb.h>
58#include <gnu/ext2fs/fs.h>
59#include <gnu/ext2fs/ext2_extern.h>
60
61static int ext2_indirtrunc(struct inode *, int32_t, int32_t, int32_t, int,
62 long *);
63
64/*
65 * Update the access, modified, and inode change times as specified by the
66 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode
67 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
68 * the timestamp update). The IN_LAZYMOD flag is set to force a write
69 * later if not now. If we write now, then clear both IN_MODIFIED and
70 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is
71 * set, then wait for the write to complete.
72 */
73int
74ext2_update(vp, waitfor)
75 struct vnode *vp;
76 int waitfor;
77{
78 struct ext2_sb_info *fs;
79 struct buf *bp;
80 struct inode *ip;
81 int error;
82
83 ext2_itimes(vp);
84 ip = VTOI(vp);
85 if ((ip->i_flag & IN_MODIFIED) == 0)
86 return (0);
87 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED);
88 if (vp->v_mount->mnt_flag & MNT_RDONLY)
89 return (0);
90 fs = ip->i_e2fs;
91 if ((error = bread(ip->i_devvp,
92 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
93 (int)fs->s_blocksize, NOCRED, &bp)) != 0) {
94 brelse(bp);
95 return (error);
96 }
97 ext2_i2ei(ip, (struct ext2_inode *)((char *)bp->b_data +
98 EXT2_INODE_SIZE * ino_to_fsbo(fs, ip->i_number)));
99 if (waitfor && (vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
100 return (bwrite(bp));
101 else {
102 bdwrite(bp);
103 return (0);
104 }
105}
106
107#define SINGLE 0 /* index of single indirect block */
108#define DOUBLE 1 /* index of double indirect block */
109#define TRIPLE 2 /* index of triple indirect block */
110/*
111 * Truncate the inode oip to at most length size, freeing the
112 * disk blocks.
113 */
114int
115ext2_truncate(vp, length, flags, cred, td)
116 struct vnode *vp;
117 off_t length;
118 int flags;
119 struct ucred *cred;
120 struct thread *td;
121{
122 struct vnode *ovp = vp;
123 int32_t lastblock;
124 struct inode *oip;
125 int32_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
126 int32_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
127 struct ext2_sb_info *fs;
128 struct buf *bp;
129 int offset, size, level;
130 long count, nblocks, blocksreleased = 0;
131 int aflags, error, i, allerror;
132 off_t osize;
133/*
134printf("ext2_truncate called %d to %d\n", VTOI(ovp)->i_number, length);
135*/ /*
136 * negative file sizes will totally break the code below and
137 * are not meaningful anyways.
138 */
139 if (length < 0)
140 return EFBIG;
141
142 oip = VTOI(ovp);
143 if (ovp->v_type == VLNK &&
144 oip->i_size < ovp->v_mount->mnt_maxsymlinklen) {
145#if DIAGNOSTIC
146 if (length != 0)
147 panic("ext2_truncate: partial truncate of symlink");
148#endif
149 bzero((char *)&oip->i_shortlink, (u_int)oip->i_size);
150 oip->i_size = 0;
151 oip->i_flag |= IN_CHANGE | IN_UPDATE;
152 return (ext2_update(ovp, 1));
153 }
154 if (oip->i_size == length) {
155 oip->i_flag |= IN_CHANGE | IN_UPDATE;
156 return (ext2_update(ovp, 0));
157 }
158 fs = oip->i_e2fs;
159 osize = oip->i_size;
160 ext2_discard_prealloc(oip);
161 /*
162 * Lengthen the size of the file. We must ensure that the
163 * last byte of the file is allocated. Since the smallest
164 * value of oszie is 0, length will be at least 1.
165 */
166 if (osize < length) {
41 */
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/mount.h>
46#include <sys/bio.h>
47#include <sys/buf.h>
48#include <sys/vnode.h>
49#include <sys/malloc.h>
50
51#include <vm/vm.h>
52#include <vm/vm_extern.h>
53
54#include <gnu/ext2fs/inode.h>
55#include <gnu/ext2fs/ext2_mount.h>
56#include <gnu/ext2fs/ext2_fs.h>
57#include <gnu/ext2fs/ext2_fs_sb.h>
58#include <gnu/ext2fs/fs.h>
59#include <gnu/ext2fs/ext2_extern.h>
60
61static int ext2_indirtrunc(struct inode *, int32_t, int32_t, int32_t, int,
62 long *);
63
64/*
65 * Update the access, modified, and inode change times as specified by the
66 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode
67 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
68 * the timestamp update). The IN_LAZYMOD flag is set to force a write
69 * later if not now. If we write now, then clear both IN_MODIFIED and
70 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is
71 * set, then wait for the write to complete.
72 */
73int
74ext2_update(vp, waitfor)
75 struct vnode *vp;
76 int waitfor;
77{
78 struct ext2_sb_info *fs;
79 struct buf *bp;
80 struct inode *ip;
81 int error;
82
83 ext2_itimes(vp);
84 ip = VTOI(vp);
85 if ((ip->i_flag & IN_MODIFIED) == 0)
86 return (0);
87 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED);
88 if (vp->v_mount->mnt_flag & MNT_RDONLY)
89 return (0);
90 fs = ip->i_e2fs;
91 if ((error = bread(ip->i_devvp,
92 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
93 (int)fs->s_blocksize, NOCRED, &bp)) != 0) {
94 brelse(bp);
95 return (error);
96 }
97 ext2_i2ei(ip, (struct ext2_inode *)((char *)bp->b_data +
98 EXT2_INODE_SIZE * ino_to_fsbo(fs, ip->i_number)));
99 if (waitfor && (vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
100 return (bwrite(bp));
101 else {
102 bdwrite(bp);
103 return (0);
104 }
105}
106
107#define SINGLE 0 /* index of single indirect block */
108#define DOUBLE 1 /* index of double indirect block */
109#define TRIPLE 2 /* index of triple indirect block */
110/*
111 * Truncate the inode oip to at most length size, freeing the
112 * disk blocks.
113 */
114int
115ext2_truncate(vp, length, flags, cred, td)
116 struct vnode *vp;
117 off_t length;
118 int flags;
119 struct ucred *cred;
120 struct thread *td;
121{
122 struct vnode *ovp = vp;
123 int32_t lastblock;
124 struct inode *oip;
125 int32_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
126 int32_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
127 struct ext2_sb_info *fs;
128 struct buf *bp;
129 int offset, size, level;
130 long count, nblocks, blocksreleased = 0;
131 int aflags, error, i, allerror;
132 off_t osize;
133/*
134printf("ext2_truncate called %d to %d\n", VTOI(ovp)->i_number, length);
135*/ /*
136 * negative file sizes will totally break the code below and
137 * are not meaningful anyways.
138 */
139 if (length < 0)
140 return EFBIG;
141
142 oip = VTOI(ovp);
143 if (ovp->v_type == VLNK &&
144 oip->i_size < ovp->v_mount->mnt_maxsymlinklen) {
145#if DIAGNOSTIC
146 if (length != 0)
147 panic("ext2_truncate: partial truncate of symlink");
148#endif
149 bzero((char *)&oip->i_shortlink, (u_int)oip->i_size);
150 oip->i_size = 0;
151 oip->i_flag |= IN_CHANGE | IN_UPDATE;
152 return (ext2_update(ovp, 1));
153 }
154 if (oip->i_size == length) {
155 oip->i_flag |= IN_CHANGE | IN_UPDATE;
156 return (ext2_update(ovp, 0));
157 }
158 fs = oip->i_e2fs;
159 osize = oip->i_size;
160 ext2_discard_prealloc(oip);
161 /*
162 * Lengthen the size of the file. We must ensure that the
163 * last byte of the file is allocated. Since the smallest
164 * value of oszie is 0, length will be at least 1.
165 */
166 if (osize < length) {
167 /*
168 * XXX Refuse to extend files past 2GB on old format
169 * filesystems or ones that don't already have the
170 * large file flag set in the superblock.
171 */
172 if (osize < 0x8000000 && length >= 0x80000000 &&
173 (oip->i_e2fs->s_es->s_rev_level == EXT2_GOOD_OLD_REV ||
174 (oip->i_e2fs->s_es->s_feature_ro_compat &
175 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) == 0))
176 return (EFBIG);
167 offset = blkoff(fs, length - 1);
168 lbn = lblkno(fs, length - 1);
169 aflags = B_CLRBUF;
170 if (flags & IO_SYNC)
171 aflags |= B_SYNC;
172 vnode_pager_setsize(ovp, length);
173 if ((error = ext2_balloc(oip, lbn, offset + 1, cred, &bp,
174 aflags)) != 0)
175 return (error);
176 oip->i_size = length;
177 if (aflags & IO_SYNC)
178 bwrite(bp);
179 else
180 bawrite(bp);
181 oip->i_flag |= IN_CHANGE | IN_UPDATE;
182 return (ext2_update(ovp, 1));
183 }
184 /*
185 * Shorten the size of the file. If the file is not being
186 * truncated to a block boundry, the contents of the
187 * partial block following the end of the file must be
188 * zero'ed in case it ever become accessible again because
189 * of subsequent file growth.
190 */
191 /* I don't understand the comment above */
192 offset = blkoff(fs, length);
193 if (offset == 0) {
194 oip->i_size = length;
195 } else {
196 lbn = lblkno(fs, length);
197 aflags = B_CLRBUF;
198 if (flags & IO_SYNC)
199 aflags |= B_SYNC;
200 if ((error = ext2_balloc(oip, lbn, offset, cred, &bp,
201 aflags)) != 0)
202 return (error);
203 oip->i_size = length;
204 size = blksize(fs, oip, lbn);
205 bzero((char *)bp->b_data + offset, (u_int)(size - offset));
206 allocbuf(bp, size);
207 if (aflags & IO_SYNC)
208 bwrite(bp);
209 else
210 bawrite(bp);
211 }
212 /*
213 * Calculate index into inode's block list of
214 * last direct and indirect blocks (if any)
215 * which we want to keep. Lastblock is -1 when
216 * the file is truncated to 0.
217 */
218 lastblock = lblkno(fs, length + fs->s_blocksize - 1) - 1;
219 lastiblock[SINGLE] = lastblock - NDADDR;
220 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
221 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
222 nblocks = btodb(fs->s_blocksize);
223 /*
224 * Update file and block pointers on disk before we start freeing
225 * blocks. If we crash before free'ing blocks below, the blocks
226 * will be returned to the free list. lastiblock values are also
227 * normalized to -1 for calls to ext2_indirtrunc below.
228 */
229 bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks);
230 for (level = TRIPLE; level >= SINGLE; level--)
231 if (lastiblock[level] < 0) {
232 oip->i_ib[level] = 0;
233 lastiblock[level] = -1;
234 }
235 for (i = NDADDR - 1; i > lastblock; i--)
236 oip->i_db[i] = 0;
237 oip->i_flag |= IN_CHANGE | IN_UPDATE;
238 allerror = ext2_update(ovp, 1);
239
240 /*
241 * Having written the new inode to disk, save its new configuration
242 * and put back the old block pointers long enough to process them.
243 * Note that we save the new block configuration so we can check it
244 * when we are done.
245 */
246 bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks);
247 bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
248 oip->i_size = osize;
249 error = vtruncbuf(ovp, cred, td, length, (int)fs->s_blocksize);
250 if (error && (allerror == 0))
251 allerror = error;
252
253 /*
254 * Indirect blocks first.
255 */
256 indir_lbn[SINGLE] = -NDADDR;
257 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
258 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
259 for (level = TRIPLE; level >= SINGLE; level--) {
260 bn = oip->i_ib[level];
261 if (bn != 0) {
262 error = ext2_indirtrunc(oip, indir_lbn[level],
263 fsbtodb(fs, bn), lastiblock[level], level, &count);
264 if (error)
265 allerror = error;
266 blocksreleased += count;
267 if (lastiblock[level] < 0) {
268 oip->i_ib[level] = 0;
269 ext2_blkfree(oip, bn, fs->s_frag_size);
270 blocksreleased += nblocks;
271 }
272 }
273 if (lastiblock[level] >= 0)
274 goto done;
275 }
276
277 /*
278 * All whole direct blocks or frags.
279 */
280 for (i = NDADDR - 1; i > lastblock; i--) {
281 long bsize;
282
283 bn = oip->i_db[i];
284 if (bn == 0)
285 continue;
286 oip->i_db[i] = 0;
287 bsize = blksize(fs, oip, i);
288 ext2_blkfree(oip, bn, bsize);
289 blocksreleased += btodb(bsize);
290 }
291 if (lastblock < 0)
292 goto done;
293
294 /*
295 * Finally, look for a change in size of the
296 * last direct block; release any frags.
297 */
298 bn = oip->i_db[lastblock];
299 if (bn != 0) {
300 long oldspace, newspace;
301
302 /*
303 * Calculate amount of space we're giving
304 * back as old block size minus new block size.
305 */
306 oldspace = blksize(fs, oip, lastblock);
307 oip->i_size = length;
308 newspace = blksize(fs, oip, lastblock);
309 if (newspace == 0)
310 panic("itrunc: newspace");
311 if (oldspace - newspace > 0) {
312 /*
313 * Block number of space to be free'd is
314 * the old block # plus the number of frags
315 * required for the storage we're keeping.
316 */
317 bn += numfrags(fs, newspace);
318 ext2_blkfree(oip, bn, oldspace - newspace);
319 blocksreleased += btodb(oldspace - newspace);
320 }
321 }
322done:
323#if DIAGNOSTIC
324 for (level = SINGLE; level <= TRIPLE; level++)
325 if (newblks[NDADDR + level] != oip->i_ib[level])
326 panic("itrunc1");
327 for (i = 0; i < NDADDR; i++)
328 if (newblks[i] != oip->i_db[i])
329 panic("itrunc2");
330 VI_LOCK(ovp);
331 if (length == 0 && (!TAILQ_EMPTY(&ovp->v_dirtyblkhd) ||
332 !TAILQ_EMPTY(&ovp->v_cleanblkhd)))
333 panic("itrunc3");
334 VI_UNLOCK(ovp);
335#endif /* DIAGNOSTIC */
336 /*
337 * Put back the real size.
338 */
339 oip->i_size = length;
340 oip->i_blocks -= blocksreleased;
341 if (oip->i_blocks < 0) /* sanity */
342 oip->i_blocks = 0;
343 oip->i_flag |= IN_CHANGE;
344 vnode_pager_setsize(ovp, length);
345 return (allerror);
346}
347
348/*
349 * Release blocks associated with the inode ip and stored in the indirect
350 * block bn. Blocks are free'd in LIFO order up to (but not including)
351 * lastbn. If level is greater than SINGLE, the block is an indirect block
352 * and recursive calls to indirtrunc must be used to cleanse other indirect
353 * blocks.
354 *
355 * NB: triple indirect blocks are untested.
356 */
357
358static int
359ext2_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
360 struct inode *ip;
361 int32_t lbn, lastbn;
362 int32_t dbn;
363 int level;
364 long *countp;
365{
366 struct buf *bp;
367 struct ext2_sb_info *fs = ip->i_e2fs;
368 struct vnode *vp;
369 int32_t *bap, *copy, nb, nlbn, last;
370 long blkcount, factor;
371 int i, nblocks, blocksreleased = 0;
372 int error = 0, allerror = 0;
373
374 /*
375 * Calculate index in current block of last
376 * block to be kept. -1 indicates the entire
377 * block so we need not calculate the index.
378 */
379 factor = 1;
380 for (i = SINGLE; i < level; i++)
381 factor *= NINDIR(fs);
382 last = lastbn;
383 if (lastbn > 0)
384 last /= factor;
385 nblocks = btodb(fs->s_blocksize);
386 /*
387 * Get buffer of block pointers, zero those entries corresponding
388 * to blocks to be free'd, and update on disk copy first. Since
389 * double(triple) indirect before single(double) indirect, calls
390 * to bmap on these blocks will fail. However, we already have
391 * the on disk address, so we have to set the b_blkno field
392 * explicitly instead of letting bread do everything for us.
393 */
394 vp = ITOV(ip);
395 bp = getblk(vp, lbn, (int)fs->s_blocksize, 0, 0, 0);
396 if (bp->b_flags & (B_DONE | B_DELWRI)) {
397 } else {
398 bp->b_iocmd = BIO_READ;
399 if (bp->b_bcount > bp->b_bufsize)
400 panic("ext2_indirtrunc: bad buffer size");
401 bp->b_blkno = dbn;
402 vfs_busy_pages(bp, 0);
403 bp->b_iooffset = dbtob(bp->b_blkno);
404 VOP_STRATEGY(vp, bp);
405 error = bufwait(bp);
406 }
407 if (error) {
408 brelse(bp);
409 *countp = 0;
410 return (error);
411 }
412
413 bap = (int32_t *)bp->b_data;
414 MALLOC(copy, int32_t *, fs->s_blocksize, M_TEMP, M_WAITOK);
415 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->s_blocksize);
416 bzero((caddr_t)&bap[last + 1],
417 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (int32_t));
418 if (last == -1)
419 bp->b_flags |= B_INVAL;
420 error = bwrite(bp);
421 if (error)
422 allerror = error;
423 bap = copy;
424
425 /*
426 * Recursively free totally unused blocks.
427 */
428 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
429 i--, nlbn += factor) {
430 nb = bap[i];
431 if (nb == 0)
432 continue;
433 if (level > SINGLE) {
434 if ((error = ext2_indirtrunc(ip, nlbn,
435 fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0)
436 allerror = error;
437 blocksreleased += blkcount;
438 }
439 ext2_blkfree(ip, nb, fs->s_blocksize);
440 blocksreleased += nblocks;
441 }
442
443 /*
444 * Recursively free last partial block.
445 */
446 if (level > SINGLE && lastbn >= 0) {
447 last = lastbn % factor;
448 nb = bap[i];
449 if (nb != 0) {
450 if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
451 last, level - 1, &blkcount)) != 0)
452 allerror = error;
453 blocksreleased += blkcount;
454 }
455 }
456 FREE(copy, M_TEMP);
457 *countp = blocksreleased;
458 return (allerror);
459}
460
461/*
462 * discard preallocated blocks
463 */
464int
465ext2_inactive(ap)
466 struct vop_inactive_args /* {
467 struct vnode *a_vp;
468 struct thread *a_td;
469 } */ *ap;
470{
471 struct vnode *vp = ap->a_vp;
472 struct inode *ip = VTOI(vp);
473 struct thread *td = ap->a_td;
474 int mode, error = 0;
475
476 ext2_discard_prealloc(ip);
477 if (prtactive && vrefcnt(vp) != 0)
478 vprint("ext2_inactive: pushing active", vp);
479
480 /*
481 * Ignore inodes related to stale file handles.
482 */
483 if (ip->i_mode == 0)
484 goto out;
485 if (ip->i_nlink <= 0) {
486 (void) vn_write_suspend_wait(vp, NULL, V_WAIT);
487 error = ext2_truncate(vp, (off_t)0, 0, NOCRED, td);
488 ip->i_rdev = 0;
489 mode = ip->i_mode;
490 ip->i_mode = 0;
491 ip->i_flag |= IN_CHANGE | IN_UPDATE;
492 ext2_vfree(vp, ip->i_number, mode);
493 }
494 if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) {
495 if ((ip->i_flag & (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
496 vn_write_suspend_wait(vp, NULL, V_NOWAIT)) {
497 ip->i_flag &= ~IN_ACCESS;
498 } else {
499 (void) vn_write_suspend_wait(vp, NULL, V_WAIT);
500 ext2_update(vp, 0);
501 }
502 }
503out:
504 VOP_UNLOCK(vp, 0, td);
505 /*
506 * If we are done with the inode, reclaim it
507 * so that it can be reused immediately.
508 */
509 if (ip->i_mode == 0)
510 vrecycle(vp, NULL, td);
511 return (error);
512}
513
514/*
515 * Reclaim an inode so that it can be used for other purposes.
516 */
517int
518ext2_reclaim(ap)
519 struct vop_reclaim_args /* {
520 struct vnode *a_vp;
521 struct thread *a_td;
522 } */ *ap;
523{
524 struct inode *ip;
525 struct vnode *vp = ap->a_vp;
526
527 if (prtactive && vrefcnt(vp) != 0)
528 vprint("ufs_reclaim: pushing active", vp);
529 ip = VTOI(vp);
530 if (ip->i_flag & IN_LAZYMOD) {
531 ip->i_flag |= IN_MODIFIED;
532 ext2_update(vp, 0);
533 }
534 /*
535 * Remove the inode from its hash chain.
536 */
537 ext2_ihashrem(ip);
538 /*
539 * Purge old data structures associated with the inode.
540 */
541 if (ip->i_devvp) {
542 vrele(ip->i_devvp);
543 ip->i_devvp = 0;
544 }
545 FREE(vp->v_data, M_EXT2NODE);
546 vp->v_data = 0;
547 return (0);
548}
177 offset = blkoff(fs, length - 1);
178 lbn = lblkno(fs, length - 1);
179 aflags = B_CLRBUF;
180 if (flags & IO_SYNC)
181 aflags |= B_SYNC;
182 vnode_pager_setsize(ovp, length);
183 if ((error = ext2_balloc(oip, lbn, offset + 1, cred, &bp,
184 aflags)) != 0)
185 return (error);
186 oip->i_size = length;
187 if (aflags & IO_SYNC)
188 bwrite(bp);
189 else
190 bawrite(bp);
191 oip->i_flag |= IN_CHANGE | IN_UPDATE;
192 return (ext2_update(ovp, 1));
193 }
194 /*
195 * Shorten the size of the file. If the file is not being
196 * truncated to a block boundry, the contents of the
197 * partial block following the end of the file must be
198 * zero'ed in case it ever become accessible again because
199 * of subsequent file growth.
200 */
201 /* I don't understand the comment above */
202 offset = blkoff(fs, length);
203 if (offset == 0) {
204 oip->i_size = length;
205 } else {
206 lbn = lblkno(fs, length);
207 aflags = B_CLRBUF;
208 if (flags & IO_SYNC)
209 aflags |= B_SYNC;
210 if ((error = ext2_balloc(oip, lbn, offset, cred, &bp,
211 aflags)) != 0)
212 return (error);
213 oip->i_size = length;
214 size = blksize(fs, oip, lbn);
215 bzero((char *)bp->b_data + offset, (u_int)(size - offset));
216 allocbuf(bp, size);
217 if (aflags & IO_SYNC)
218 bwrite(bp);
219 else
220 bawrite(bp);
221 }
222 /*
223 * Calculate index into inode's block list of
224 * last direct and indirect blocks (if any)
225 * which we want to keep. Lastblock is -1 when
226 * the file is truncated to 0.
227 */
228 lastblock = lblkno(fs, length + fs->s_blocksize - 1) - 1;
229 lastiblock[SINGLE] = lastblock - NDADDR;
230 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
231 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
232 nblocks = btodb(fs->s_blocksize);
233 /*
234 * Update file and block pointers on disk before we start freeing
235 * blocks. If we crash before free'ing blocks below, the blocks
236 * will be returned to the free list. lastiblock values are also
237 * normalized to -1 for calls to ext2_indirtrunc below.
238 */
239 bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks);
240 for (level = TRIPLE; level >= SINGLE; level--)
241 if (lastiblock[level] < 0) {
242 oip->i_ib[level] = 0;
243 lastiblock[level] = -1;
244 }
245 for (i = NDADDR - 1; i > lastblock; i--)
246 oip->i_db[i] = 0;
247 oip->i_flag |= IN_CHANGE | IN_UPDATE;
248 allerror = ext2_update(ovp, 1);
249
250 /*
251 * Having written the new inode to disk, save its new configuration
252 * and put back the old block pointers long enough to process them.
253 * Note that we save the new block configuration so we can check it
254 * when we are done.
255 */
256 bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks);
257 bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
258 oip->i_size = osize;
259 error = vtruncbuf(ovp, cred, td, length, (int)fs->s_blocksize);
260 if (error && (allerror == 0))
261 allerror = error;
262
263 /*
264 * Indirect blocks first.
265 */
266 indir_lbn[SINGLE] = -NDADDR;
267 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
268 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
269 for (level = TRIPLE; level >= SINGLE; level--) {
270 bn = oip->i_ib[level];
271 if (bn != 0) {
272 error = ext2_indirtrunc(oip, indir_lbn[level],
273 fsbtodb(fs, bn), lastiblock[level], level, &count);
274 if (error)
275 allerror = error;
276 blocksreleased += count;
277 if (lastiblock[level] < 0) {
278 oip->i_ib[level] = 0;
279 ext2_blkfree(oip, bn, fs->s_frag_size);
280 blocksreleased += nblocks;
281 }
282 }
283 if (lastiblock[level] >= 0)
284 goto done;
285 }
286
287 /*
288 * All whole direct blocks or frags.
289 */
290 for (i = NDADDR - 1; i > lastblock; i--) {
291 long bsize;
292
293 bn = oip->i_db[i];
294 if (bn == 0)
295 continue;
296 oip->i_db[i] = 0;
297 bsize = blksize(fs, oip, i);
298 ext2_blkfree(oip, bn, bsize);
299 blocksreleased += btodb(bsize);
300 }
301 if (lastblock < 0)
302 goto done;
303
304 /*
305 * Finally, look for a change in size of the
306 * last direct block; release any frags.
307 */
308 bn = oip->i_db[lastblock];
309 if (bn != 0) {
310 long oldspace, newspace;
311
312 /*
313 * Calculate amount of space we're giving
314 * back as old block size minus new block size.
315 */
316 oldspace = blksize(fs, oip, lastblock);
317 oip->i_size = length;
318 newspace = blksize(fs, oip, lastblock);
319 if (newspace == 0)
320 panic("itrunc: newspace");
321 if (oldspace - newspace > 0) {
322 /*
323 * Block number of space to be free'd is
324 * the old block # plus the number of frags
325 * required for the storage we're keeping.
326 */
327 bn += numfrags(fs, newspace);
328 ext2_blkfree(oip, bn, oldspace - newspace);
329 blocksreleased += btodb(oldspace - newspace);
330 }
331 }
332done:
333#if DIAGNOSTIC
334 for (level = SINGLE; level <= TRIPLE; level++)
335 if (newblks[NDADDR + level] != oip->i_ib[level])
336 panic("itrunc1");
337 for (i = 0; i < NDADDR; i++)
338 if (newblks[i] != oip->i_db[i])
339 panic("itrunc2");
340 VI_LOCK(ovp);
341 if (length == 0 && (!TAILQ_EMPTY(&ovp->v_dirtyblkhd) ||
342 !TAILQ_EMPTY(&ovp->v_cleanblkhd)))
343 panic("itrunc3");
344 VI_UNLOCK(ovp);
345#endif /* DIAGNOSTIC */
346 /*
347 * Put back the real size.
348 */
349 oip->i_size = length;
350 oip->i_blocks -= blocksreleased;
351 if (oip->i_blocks < 0) /* sanity */
352 oip->i_blocks = 0;
353 oip->i_flag |= IN_CHANGE;
354 vnode_pager_setsize(ovp, length);
355 return (allerror);
356}
357
358/*
359 * Release blocks associated with the inode ip and stored in the indirect
360 * block bn. Blocks are free'd in LIFO order up to (but not including)
361 * lastbn. If level is greater than SINGLE, the block is an indirect block
362 * and recursive calls to indirtrunc must be used to cleanse other indirect
363 * blocks.
364 *
365 * NB: triple indirect blocks are untested.
366 */
367
368static int
369ext2_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
370 struct inode *ip;
371 int32_t lbn, lastbn;
372 int32_t dbn;
373 int level;
374 long *countp;
375{
376 struct buf *bp;
377 struct ext2_sb_info *fs = ip->i_e2fs;
378 struct vnode *vp;
379 int32_t *bap, *copy, nb, nlbn, last;
380 long blkcount, factor;
381 int i, nblocks, blocksreleased = 0;
382 int error = 0, allerror = 0;
383
384 /*
385 * Calculate index in current block of last
386 * block to be kept. -1 indicates the entire
387 * block so we need not calculate the index.
388 */
389 factor = 1;
390 for (i = SINGLE; i < level; i++)
391 factor *= NINDIR(fs);
392 last = lastbn;
393 if (lastbn > 0)
394 last /= factor;
395 nblocks = btodb(fs->s_blocksize);
396 /*
397 * Get buffer of block pointers, zero those entries corresponding
398 * to blocks to be free'd, and update on disk copy first. Since
399 * double(triple) indirect before single(double) indirect, calls
400 * to bmap on these blocks will fail. However, we already have
401 * the on disk address, so we have to set the b_blkno field
402 * explicitly instead of letting bread do everything for us.
403 */
404 vp = ITOV(ip);
405 bp = getblk(vp, lbn, (int)fs->s_blocksize, 0, 0, 0);
406 if (bp->b_flags & (B_DONE | B_DELWRI)) {
407 } else {
408 bp->b_iocmd = BIO_READ;
409 if (bp->b_bcount > bp->b_bufsize)
410 panic("ext2_indirtrunc: bad buffer size");
411 bp->b_blkno = dbn;
412 vfs_busy_pages(bp, 0);
413 bp->b_iooffset = dbtob(bp->b_blkno);
414 VOP_STRATEGY(vp, bp);
415 error = bufwait(bp);
416 }
417 if (error) {
418 brelse(bp);
419 *countp = 0;
420 return (error);
421 }
422
423 bap = (int32_t *)bp->b_data;
424 MALLOC(copy, int32_t *, fs->s_blocksize, M_TEMP, M_WAITOK);
425 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->s_blocksize);
426 bzero((caddr_t)&bap[last + 1],
427 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (int32_t));
428 if (last == -1)
429 bp->b_flags |= B_INVAL;
430 error = bwrite(bp);
431 if (error)
432 allerror = error;
433 bap = copy;
434
435 /*
436 * Recursively free totally unused blocks.
437 */
438 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
439 i--, nlbn += factor) {
440 nb = bap[i];
441 if (nb == 0)
442 continue;
443 if (level > SINGLE) {
444 if ((error = ext2_indirtrunc(ip, nlbn,
445 fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0)
446 allerror = error;
447 blocksreleased += blkcount;
448 }
449 ext2_blkfree(ip, nb, fs->s_blocksize);
450 blocksreleased += nblocks;
451 }
452
453 /*
454 * Recursively free last partial block.
455 */
456 if (level > SINGLE && lastbn >= 0) {
457 last = lastbn % factor;
458 nb = bap[i];
459 if (nb != 0) {
460 if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
461 last, level - 1, &blkcount)) != 0)
462 allerror = error;
463 blocksreleased += blkcount;
464 }
465 }
466 FREE(copy, M_TEMP);
467 *countp = blocksreleased;
468 return (allerror);
469}
470
471/*
472 * discard preallocated blocks
473 */
474int
475ext2_inactive(ap)
476 struct vop_inactive_args /* {
477 struct vnode *a_vp;
478 struct thread *a_td;
479 } */ *ap;
480{
481 struct vnode *vp = ap->a_vp;
482 struct inode *ip = VTOI(vp);
483 struct thread *td = ap->a_td;
484 int mode, error = 0;
485
486 ext2_discard_prealloc(ip);
487 if (prtactive && vrefcnt(vp) != 0)
488 vprint("ext2_inactive: pushing active", vp);
489
490 /*
491 * Ignore inodes related to stale file handles.
492 */
493 if (ip->i_mode == 0)
494 goto out;
495 if (ip->i_nlink <= 0) {
496 (void) vn_write_suspend_wait(vp, NULL, V_WAIT);
497 error = ext2_truncate(vp, (off_t)0, 0, NOCRED, td);
498 ip->i_rdev = 0;
499 mode = ip->i_mode;
500 ip->i_mode = 0;
501 ip->i_flag |= IN_CHANGE | IN_UPDATE;
502 ext2_vfree(vp, ip->i_number, mode);
503 }
504 if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) {
505 if ((ip->i_flag & (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
506 vn_write_suspend_wait(vp, NULL, V_NOWAIT)) {
507 ip->i_flag &= ~IN_ACCESS;
508 } else {
509 (void) vn_write_suspend_wait(vp, NULL, V_WAIT);
510 ext2_update(vp, 0);
511 }
512 }
513out:
514 VOP_UNLOCK(vp, 0, td);
515 /*
516 * If we are done with the inode, reclaim it
517 * so that it can be reused immediately.
518 */
519 if (ip->i_mode == 0)
520 vrecycle(vp, NULL, td);
521 return (error);
522}
523
524/*
525 * Reclaim an inode so that it can be used for other purposes.
526 */
527int
528ext2_reclaim(ap)
529 struct vop_reclaim_args /* {
530 struct vnode *a_vp;
531 struct thread *a_td;
532 } */ *ap;
533{
534 struct inode *ip;
535 struct vnode *vp = ap->a_vp;
536
537 if (prtactive && vrefcnt(vp) != 0)
538 vprint("ufs_reclaim: pushing active", vp);
539 ip = VTOI(vp);
540 if (ip->i_flag & IN_LAZYMOD) {
541 ip->i_flag |= IN_MODIFIED;
542 ext2_update(vp, 0);
543 }
544 /*
545 * Remove the inode from its hash chain.
546 */
547 ext2_ihashrem(ip);
548 /*
549 * Purge old data structures associated with the inode.
550 */
551 if (ip->i_devvp) {
552 vrele(ip->i_devvp);
553 ip->i_devvp = 0;
554 }
555 FREE(vp->v_data, M_EXT2NODE);
556 vp->v_data = 0;
557 return (0);
558}