ffs_alloc.c revision 38907
11541Srgrimes/* 21541Srgrimes * Copyright (c) 1982, 1986, 1989, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * Redistribution and use in source and binary forms, with or without 61541Srgrimes * modification, are permitted provided that the following conditions 71541Srgrimes * are met: 81541Srgrimes * 1. Redistributions of source code must retain the above copyright 91541Srgrimes * notice, this list of conditions and the following disclaimer. 101541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 111541Srgrimes * notice, this list of conditions and the following disclaimer in the 121541Srgrimes * documentation and/or other materials provided with the distribution. 131541Srgrimes * 3. All advertising materials mentioning features or use of this software 141541Srgrimes * must display the following acknowledgement: 151541Srgrimes * This product includes software developed by the University of 161541Srgrimes * California, Berkeley and its contributors. 171541Srgrimes * 4. Neither the name of the University nor the names of its contributors 181541Srgrimes * may be used to endorse or promote products derived from this software 191541Srgrimes * without specific prior written permission. 201541Srgrimes * 211541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 221541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 231541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 241541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 251541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 261541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 271541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 281541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 291541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 301541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 311541Srgrimes * SUCH DAMAGE. 321541Srgrimes * 3322521Sdyson * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 3438907Sbde * $Id: ffs_alloc.c,v 1.52 1998/09/05 14:13:12 phk Exp $ 351541Srgrimes */ 361541Srgrimes 3713260Swollman#include "opt_quota.h" 3813260Swollman 391541Srgrimes#include <sys/param.h> 401541Srgrimes#include <sys/systm.h> 411541Srgrimes#include <sys/buf.h> 421541Srgrimes#include <sys/proc.h> 431541Srgrimes#include <sys/vnode.h> 441541Srgrimes#include <sys/mount.h> 4538408Sbde#ifdef notyet 4612911Sphk#include <sys/sysctl.h> 4738408Sbde#endif 481541Srgrimes#include <sys/syslog.h> 491541Srgrimes 501541Srgrimes#include <ufs/ufs/quota.h> 511541Srgrimes#include <ufs/ufs/inode.h> 5230474Sphk#include <ufs/ufs/ufsmount.h> 531541Srgrimes 541541Srgrimes#include <ufs/ffs/fs.h> 551541Srgrimes#include <ufs/ffs/ffs_extern.h> 561541Srgrimes 5722521Sdysontypedef ufs_daddr_t allocfcn_t __P((struct inode *ip, int cg, ufs_daddr_t bpref, 5822521Sdyson int size)); 5912590Sbde 6022521Sdysonstatic ufs_daddr_t ffs_alloccg __P((struct inode *, int, ufs_daddr_t, int)); 6134266Sjulianstatic ufs_daddr_t 6234266Sjulian ffs_alloccgblk __P((struct inode *, struct buf *, ufs_daddr_t)); 6331352Sbde#ifdef DIAGNOSTIC 6431352Sbdestatic int ffs_checkblk __P((struct inode *, ufs_daddr_t, long)); 6531352Sbde#endif 6622521Sdysonstatic void ffs_clusteracct __P((struct fs *, struct cg *, ufs_daddr_t, 6722521Sdyson int)); 6831351Sbde#ifdef notyet 6931351Sbdestatic ufs_daddr_t ffs_clusteralloc __P((struct inode *, int, ufs_daddr_t, 7031351Sbde int)); 7131351Sbde#endif 721541Srgrimesstatic ino_t ffs_dirpref __P((struct fs *)); 7322521Sdysonstatic ufs_daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 741541Srgrimesstatic void ffs_fserr __P((struct fs *, u_int, char *)); 751541Srgrimesstatic u_long ffs_hashalloc 7612590Sbde __P((struct inode *, int, long, int, allocfcn_t *)); 7722521Sdysonstatic ino_t ffs_nodealloccg __P((struct inode *, int, ufs_daddr_t, int)); 7822521Sdysonstatic ufs_daddr_t ffs_mapsearch __P((struct fs *, struct cg *, ufs_daddr_t, 7922521Sdyson int)); 801541Srgrimes 811541Srgrimes/* 821541Srgrimes * Allocate a block in the file system. 838876Srgrimes * 841541Srgrimes * The size of the requested block is given, which must be some 851541Srgrimes * multiple of fs_fsize and <= fs_bsize. 861541Srgrimes * A preference may be optionally specified. If a preference is given 871541Srgrimes * the following hierarchy is used to allocate a block: 881541Srgrimes * 1) allocate the requested block. 891541Srgrimes * 2) allocate a rotationally optimal block in the same cylinder. 901541Srgrimes * 3) allocate a block in the same cylinder group. 911541Srgrimes * 4) quadradically rehash into other cylinder groups, until an 921541Srgrimes * available block is located. 931541Srgrimes * If no block preference is given the following heirarchy is used 941541Srgrimes * to allocate a block: 951541Srgrimes * 1) allocate a block in the cylinder group that contains the 961541Srgrimes * inode for the file. 971541Srgrimes * 2) quadradically rehash into other cylinder groups, until an 981541Srgrimes * available block is located. 991541Srgrimes */ 1001549Srgrimesint 1011541Srgrimesffs_alloc(ip, lbn, bpref, size, cred, bnp) 1021541Srgrimes register struct inode *ip; 10322521Sdyson ufs_daddr_t lbn, bpref; 1041541Srgrimes int size; 1051541Srgrimes struct ucred *cred; 10622521Sdyson ufs_daddr_t *bnp; 1071541Srgrimes{ 1081541Srgrimes register struct fs *fs; 10922521Sdyson ufs_daddr_t bno; 1106357Sphk int cg; 1116357Sphk#ifdef QUOTA 1126357Sphk int error; 1136357Sphk#endif 1148876Srgrimes 1151541Srgrimes *bnp = 0; 1161541Srgrimes fs = ip->i_fs; 1171541Srgrimes#ifdef DIAGNOSTIC 1181541Srgrimes if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1193487Sphk printf("dev = 0x%lx, bsize = %ld, size = %d, fs = %s\n", 12037555Sbde (u_long)ip->i_dev, (long)fs->fs_bsize, size, fs->fs_fsmnt); 1211541Srgrimes panic("ffs_alloc: bad size"); 1221541Srgrimes } 1231541Srgrimes if (cred == NOCRED) 1247170Sdg panic("ffs_alloc: missing credential"); 1251541Srgrimes#endif /* DIAGNOSTIC */ 1261541Srgrimes if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 1271541Srgrimes goto nospace; 12829609Sphk if (cred->cr_uid != 0 && 12929609Sphk freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 1301541Srgrimes goto nospace; 1311541Srgrimes#ifdef QUOTA 1323487Sphk error = chkdq(ip, (long)btodb(size), cred, 0); 1333487Sphk if (error) 1341541Srgrimes return (error); 1351541Srgrimes#endif 1361541Srgrimes if (bpref >= fs->fs_size) 1371541Srgrimes bpref = 0; 1381541Srgrimes if (bpref == 0) 1391541Srgrimes cg = ino_to_cg(fs, ip->i_number); 1401541Srgrimes else 1411541Srgrimes cg = dtog(fs, bpref); 14222521Sdyson bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 14322521Sdyson ffs_alloccg); 1441541Srgrimes if (bno > 0) { 1451541Srgrimes ip->i_blocks += btodb(size); 1461541Srgrimes ip->i_flag |= IN_CHANGE | IN_UPDATE; 1471541Srgrimes *bnp = bno; 1481541Srgrimes return (0); 1491541Srgrimes } 1501541Srgrimes#ifdef QUOTA 1511541Srgrimes /* 1521541Srgrimes * Restore user's disk quota because allocation failed. 1531541Srgrimes */ 1541541Srgrimes (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 1551541Srgrimes#endif 1561541Srgrimesnospace: 1571541Srgrimes ffs_fserr(fs, cred->cr_uid, "file system full"); 1581541Srgrimes uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 1591541Srgrimes return (ENOSPC); 1601541Srgrimes} 1611541Srgrimes 1621541Srgrimes/* 1631541Srgrimes * Reallocate a fragment to a bigger size 1641541Srgrimes * 1651541Srgrimes * The number and size of the old block is given, and a preference 1661541Srgrimes * and new size is also specified. The allocator attempts to extend 1671541Srgrimes * the original block. Failing that, the regular block allocator is 1681541Srgrimes * invoked to get an appropriate block. 1691541Srgrimes */ 1701549Srgrimesint 1711541Srgrimesffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) 1721541Srgrimes register struct inode *ip; 17322521Sdyson ufs_daddr_t lbprev; 17422521Sdyson ufs_daddr_t bpref; 1751541Srgrimes int osize, nsize; 1761541Srgrimes struct ucred *cred; 1771541Srgrimes struct buf **bpp; 1781541Srgrimes{ 1791541Srgrimes register struct fs *fs; 1801541Srgrimes struct buf *bp; 1811541Srgrimes int cg, request, error; 18222521Sdyson ufs_daddr_t bprev, bno; 1838876Srgrimes 1841541Srgrimes *bpp = 0; 1851541Srgrimes fs = ip->i_fs; 1861541Srgrimes#ifdef DIAGNOSTIC 1871541Srgrimes if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 1881541Srgrimes (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 1891541Srgrimes printf( 19037555Sbde "dev = 0x%lx, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 19137555Sbde (u_long)ip->i_dev, (long)fs->fs_bsize, osize, 1928456Srgrimes nsize, fs->fs_fsmnt); 1931541Srgrimes panic("ffs_realloccg: bad size"); 1941541Srgrimes } 1951541Srgrimes if (cred == NOCRED) 1967170Sdg panic("ffs_realloccg: missing credential"); 1971541Srgrimes#endif /* DIAGNOSTIC */ 19829609Sphk if (cred->cr_uid != 0 && 19929609Sphk freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) 2001541Srgrimes goto nospace; 2011541Srgrimes if ((bprev = ip->i_db[lbprev]) == 0) { 2026357Sphk printf("dev = 0x%lx, bsize = %ld, bprev = %ld, fs = %s\n", 20337555Sbde (u_long)ip->i_dev, (long)fs->fs_bsize, (long)bprev, 20437555Sbde fs->fs_fsmnt); 2051541Srgrimes panic("ffs_realloccg: bad bprev"); 2061541Srgrimes } 2071541Srgrimes /* 2081541Srgrimes * Allocate the extra space in the buffer. 2091541Srgrimes */ 2103487Sphk error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp); 2113487Sphk if (error) { 2121541Srgrimes brelse(bp); 2131541Srgrimes return (error); 2141541Srgrimes } 2156864Sdg 2166864Sdg if( bp->b_blkno == bp->b_lblkno) { 2176864Sdg if( lbprev >= NDADDR) 2186864Sdg panic("ffs_realloccg: lbprev out of range"); 2196864Sdg bp->b_blkno = fsbtodb(fs, bprev); 2206864Sdg } 2218876Srgrimes 2221541Srgrimes#ifdef QUOTA 2233487Sphk error = chkdq(ip, (long)btodb(nsize - osize), cred, 0); 2243487Sphk if (error) { 2251541Srgrimes brelse(bp); 2261541Srgrimes return (error); 2271541Srgrimes } 2281541Srgrimes#endif 2291541Srgrimes /* 2301541Srgrimes * Check for extension in the existing location. 2311541Srgrimes */ 2321541Srgrimes cg = dtog(fs, bprev); 2333487Sphk bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize); 2343487Sphk if (bno) { 2351541Srgrimes if (bp->b_blkno != fsbtodb(fs, bno)) 23623560Smpp panic("ffs_realloccg: bad blockno"); 2371541Srgrimes ip->i_blocks += btodb(nsize - osize); 2381541Srgrimes ip->i_flag |= IN_CHANGE | IN_UPDATE; 2397399Sdg allocbuf(bp, nsize); 2401541Srgrimes bp->b_flags |= B_DONE; 2411541Srgrimes bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 2421541Srgrimes *bpp = bp; 2431541Srgrimes return (0); 2441541Srgrimes } 2451541Srgrimes /* 2461541Srgrimes * Allocate a new disk location. 2471541Srgrimes */ 2481541Srgrimes if (bpref >= fs->fs_size) 2491541Srgrimes bpref = 0; 2501541Srgrimes switch ((int)fs->fs_optim) { 2511541Srgrimes case FS_OPTSPACE: 2521541Srgrimes /* 2538876Srgrimes * Allocate an exact sized fragment. Although this makes 2548876Srgrimes * best use of space, we will waste time relocating it if 2551541Srgrimes * the file continues to grow. If the fragmentation is 2561541Srgrimes * less than half of the minimum free reserve, we choose 2571541Srgrimes * to begin optimizing for time. 2581541Srgrimes */ 2591541Srgrimes request = nsize; 2606993Sdg if (fs->fs_minfree <= 5 || 2611541Srgrimes fs->fs_cstotal.cs_nffree > 2621541Srgrimes fs->fs_dsize * fs->fs_minfree / (2 * 100)) 2631541Srgrimes break; 2641541Srgrimes log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 2651541Srgrimes fs->fs_fsmnt); 2661541Srgrimes fs->fs_optim = FS_OPTTIME; 2671541Srgrimes break; 2681541Srgrimes case FS_OPTTIME: 2691541Srgrimes /* 2701541Srgrimes * At this point we have discovered a file that is trying to 2711541Srgrimes * grow a small fragment to a larger fragment. To save time, 2721541Srgrimes * we allocate a full sized block, then free the unused portion. 2731541Srgrimes * If the file continues to grow, the `ffs_fragextend' call 2741541Srgrimes * above will be able to grow it in place without further 2751541Srgrimes * copying. If aberrant programs cause disk fragmentation to 2761541Srgrimes * grow within 2% of the free reserve, we choose to begin 2771541Srgrimes * optimizing for space. 2781541Srgrimes */ 2791541Srgrimes request = fs->fs_bsize; 2801541Srgrimes if (fs->fs_cstotal.cs_nffree < 2811541Srgrimes fs->fs_dsize * (fs->fs_minfree - 2) / 100) 2821541Srgrimes break; 2831541Srgrimes log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 2841541Srgrimes fs->fs_fsmnt); 2851541Srgrimes fs->fs_optim = FS_OPTSPACE; 2861541Srgrimes break; 2871541Srgrimes default: 2883487Sphk printf("dev = 0x%lx, optim = %ld, fs = %s\n", 28937555Sbde (u_long)ip->i_dev, (long)fs->fs_optim, fs->fs_fsmnt); 2901541Srgrimes panic("ffs_realloccg: bad optim"); 2911541Srgrimes /* NOTREACHED */ 2921541Srgrimes } 29322521Sdyson bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 29422521Sdyson ffs_alloccg); 2951541Srgrimes if (bno > 0) { 2961541Srgrimes bp->b_blkno = fsbtodb(fs, bno); 29734266Sjulian if (!DOINGSOFTDEP(ITOV(ip))) 29834266Sjulian ffs_blkfree(ip, bprev, (long)osize); 2991541Srgrimes if (nsize < request) 3001541Srgrimes ffs_blkfree(ip, bno + numfrags(fs, nsize), 3011541Srgrimes (long)(request - nsize)); 3021541Srgrimes ip->i_blocks += btodb(nsize - osize); 3031541Srgrimes ip->i_flag |= IN_CHANGE | IN_UPDATE; 3047399Sdg allocbuf(bp, nsize); 3051541Srgrimes bp->b_flags |= B_DONE; 3061541Srgrimes bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 3071541Srgrimes *bpp = bp; 3081541Srgrimes return (0); 3091541Srgrimes } 3101541Srgrimes#ifdef QUOTA 3111541Srgrimes /* 3121541Srgrimes * Restore user's disk quota because allocation failed. 3131541Srgrimes */ 3141541Srgrimes (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 3151541Srgrimes#endif 3161541Srgrimes brelse(bp); 3171541Srgrimesnospace: 3181541Srgrimes /* 3191541Srgrimes * no space available 3201541Srgrimes */ 3211541Srgrimes ffs_fserr(fs, cred->cr_uid, "file system full"); 3221541Srgrimes uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 3231541Srgrimes return (ENOSPC); 3241541Srgrimes} 3251541Srgrimes 32631351Sbde#ifdef notyet 32738907SbdeSYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 32838907Sbde 3291541Srgrimes/* 3301541Srgrimes * Reallocate a sequence of blocks into a contiguous sequence of blocks. 3311541Srgrimes * 3321541Srgrimes * The vnode and an array of buffer pointers for a range of sequential 3331541Srgrimes * logical blocks to be made contiguous is given. The allocator attempts 3341541Srgrimes * to find a range of sequential blocks starting as close as possible to 3351541Srgrimes * an fs_rotdelay offset from the end of the allocation for the logical 3361541Srgrimes * block immediately preceeding the current range. If successful, the 3371541Srgrimes * physical block numbers in the buffer pointers and in the inode are 3381541Srgrimes * changed to reflect the new allocation. If unsuccessful, the allocation 3391541Srgrimes * is left unchanged. The success in doing the reallocation is returned. 3401541Srgrimes * Note that the error return is not reflected back to the user. Rather 3411541Srgrimes * the previous block allocation will be used. 3421541Srgrimes */ 34312911Sphkstatic int doasyncfree = 1; 34422521SdysonSYSCTL_INT(_vfs_ffs, FFS_ASYNCFREE, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, ""); 34522521Sdyson 34631352Sbdestatic int doreallocblks = 1; 34722521SdysonSYSCTL_INT(_vfs_ffs, FFS_REALLOCBLKS, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 34822521Sdyson 34931351Sbdestatic int prtrealloc = 0; 35031351Sbde#endif 35131351Sbde 3521541Srgrimesint 3531541Srgrimesffs_reallocblks(ap) 3541541Srgrimes struct vop_reallocblks_args /* { 3551541Srgrimes struct vnode *a_vp; 3561541Srgrimes struct cluster_save *a_buflist; 3571541Srgrimes } */ *ap; 3581541Srgrimes{ 35912911Sphk#if !defined (not_yes) 36012405Sdyson return (ENOSPC); 36112405Sdyson#else 3621541Srgrimes struct fs *fs; 3631541Srgrimes struct inode *ip; 3641541Srgrimes struct vnode *vp; 3651541Srgrimes struct buf *sbp, *ebp; 36622521Sdyson ufs_daddr_t *bap, *sbap, *ebap = 0; 3671541Srgrimes struct cluster_save *buflist; 36822521Sdyson ufs_daddr_t start_lbn, end_lbn, soff, newblk, blkno; 3691541Srgrimes struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 3701541Srgrimes int i, len, start_lvl, end_lvl, pref, ssize; 37110269Sbde struct timeval tv; 3721541Srgrimes 37322521Sdyson if (doreallocblks == 0) 37422521Sdyson return (ENOSPC); 3751541Srgrimes vp = ap->a_vp; 3761541Srgrimes ip = VTOI(vp); 3771541Srgrimes fs = ip->i_fs; 3781541Srgrimes if (fs->fs_contigsumsize <= 0) 3791541Srgrimes return (ENOSPC); 3801541Srgrimes buflist = ap->a_buflist; 3811541Srgrimes len = buflist->bs_nchildren; 3821541Srgrimes start_lbn = buflist->bs_children[0]->b_lblkno; 3831541Srgrimes end_lbn = start_lbn + len - 1; 3841541Srgrimes#ifdef DIAGNOSTIC 38522521Sdyson for (i = 0; i < len; i++) 38622521Sdyson if (!ffs_checkblk(ip, 38722521Sdyson dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 38822521Sdyson panic("ffs_reallocblks: unallocated block 1"); 3891541Srgrimes for (i = 1; i < len; i++) 3901541Srgrimes if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 39122521Sdyson panic("ffs_reallocblks: non-logical cluster"); 39222521Sdyson blkno = buflist->bs_children[0]->b_blkno; 39322521Sdyson ssize = fsbtodb(fs, fs->fs_frag); 39422521Sdyson for (i = 1; i < len - 1; i++) 39522521Sdyson if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 39622521Sdyson panic("ffs_reallocblks: non-physical cluster %d", i); 3971541Srgrimes#endif 3981541Srgrimes /* 3991541Srgrimes * If the latest allocation is in a new cylinder group, assume that 4001541Srgrimes * the filesystem has decided to move and do not force it back to 4011541Srgrimes * the previous cylinder group. 4021541Srgrimes */ 4031541Srgrimes if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 4041541Srgrimes dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 4051541Srgrimes return (ENOSPC); 4061541Srgrimes if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 4071541Srgrimes ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 4081541Srgrimes return (ENOSPC); 4091541Srgrimes /* 4101541Srgrimes * Get the starting offset and block map for the first block. 4111541Srgrimes */ 4121541Srgrimes if (start_lvl == 0) { 4131541Srgrimes sbap = &ip->i_db[0]; 4141541Srgrimes soff = start_lbn; 4151541Srgrimes } else { 4161541Srgrimes idp = &start_ap[start_lvl - 1]; 4171541Srgrimes if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 4181541Srgrimes brelse(sbp); 4191541Srgrimes return (ENOSPC); 4201541Srgrimes } 42122521Sdyson sbap = (ufs_daddr_t *)sbp->b_data; 4221541Srgrimes soff = idp->in_off; 4231541Srgrimes } 4241541Srgrimes /* 4251541Srgrimes * Find the preferred location for the cluster. 4261541Srgrimes */ 4271541Srgrimes pref = ffs_blkpref(ip, start_lbn, soff, sbap); 4281541Srgrimes /* 4291541Srgrimes * If the block range spans two block maps, get the second map. 4301541Srgrimes */ 4311541Srgrimes if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 4321541Srgrimes ssize = len; 4331541Srgrimes } else { 4341541Srgrimes#ifdef DIAGNOSTIC 4351541Srgrimes if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 4361541Srgrimes panic("ffs_reallocblk: start == end"); 4371541Srgrimes#endif 4381541Srgrimes ssize = len - (idp->in_off + 1); 4391541Srgrimes if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 4401541Srgrimes goto fail; 44122521Sdyson ebap = (ufs_daddr_t *)ebp->b_data; 4421541Srgrimes } 4431541Srgrimes /* 4441541Srgrimes * Search the block map looking for an allocation of the desired size. 4451541Srgrimes */ 44622521Sdyson if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 44712590Sbde len, ffs_clusteralloc)) == 0) 4481541Srgrimes goto fail; 4491541Srgrimes /* 4501541Srgrimes * We have found a new contiguous block. 4511541Srgrimes * 4521541Srgrimes * First we have to replace the old block pointers with the new 4531541Srgrimes * block pointers in the inode and indirect blocks associated 4541541Srgrimes * with the file. 4551541Srgrimes */ 45622521Sdyson#ifdef DEBUG 45722521Sdyson if (prtrealloc) 45822521Sdyson printf("realloc: ino %d, lbns %d-%d\n\told:", ip->i_number, 45922521Sdyson start_lbn, end_lbn); 46022521Sdyson#endif 4611541Srgrimes blkno = newblk; 4621541Srgrimes for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 46334266Sjulian if (i == ssize) { 4641541Srgrimes bap = ebap; 46534266Sjulian soff = -i; 46634266Sjulian } 4671541Srgrimes#ifdef DIAGNOSTIC 46822521Sdyson if (!ffs_checkblk(ip, 46922521Sdyson dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 47022521Sdyson panic("ffs_reallocblks: unallocated block 2"); 47122521Sdyson if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 4721541Srgrimes panic("ffs_reallocblks: alloc mismatch"); 4731541Srgrimes#endif 47422521Sdyson#ifdef DEBUG 47522521Sdyson if (prtrealloc) 47622521Sdyson printf(" %d,", *bap); 47722521Sdyson#endif 47834266Sjulian if (DOINGSOFTDEP(vp)) { 47934266Sjulian if (sbap == &ip->i_db[0] && i < ssize) 48034266Sjulian softdep_setup_allocdirect(ip, start_lbn + i, 48134266Sjulian blkno, *bap, fs->fs_bsize, fs->fs_bsize, 48234266Sjulian buflist->bs_children[i]); 48334266Sjulian else 48434266Sjulian softdep_setup_allocindir_page(ip, start_lbn + i, 48534266Sjulian i < ssize ? sbp : ebp, soff + i, blkno, 48634266Sjulian *bap, buflist->bs_children[i]); 48734266Sjulian } 4881541Srgrimes *bap++ = blkno; 4891541Srgrimes } 4901541Srgrimes /* 4911541Srgrimes * Next we must write out the modified inode and indirect blocks. 4921541Srgrimes * For strict correctness, the writes should be synchronous since 4931541Srgrimes * the old block values may have been written to disk. In practise 4948876Srgrimes * they are almost never written, but if we are concerned about 4951541Srgrimes * strict correctness, the `doasyncfree' flag should be set to zero. 4961541Srgrimes * 4971541Srgrimes * The test on `doasyncfree' should be changed to test a flag 4981541Srgrimes * that shows whether the associated buffers and inodes have 4991541Srgrimes * been written. The flag should be set when the cluster is 5001541Srgrimes * started and cleared whenever the buffer or inode is flushed. 5011541Srgrimes * We can then check below to see if it is set, and do the 5021541Srgrimes * synchronous write only when it has been cleared. 5031541Srgrimes */ 5041541Srgrimes if (sbap != &ip->i_db[0]) { 5051541Srgrimes if (doasyncfree) 5061541Srgrimes bdwrite(sbp); 5071541Srgrimes else 5081541Srgrimes bwrite(sbp); 5091541Srgrimes } else { 5101541Srgrimes ip->i_flag |= IN_CHANGE | IN_UPDATE; 51110269Sbde if (!doasyncfree) { 51224101Sbde gettime(&tv); 51330492Sphk UFS_UPDATE(vp, &tv, &tv, 1); 51410269Sbde } 5151541Srgrimes } 5161541Srgrimes if (ssize < len) 5171541Srgrimes if (doasyncfree) 5181541Srgrimes bdwrite(ebp); 5191541Srgrimes else 5201541Srgrimes bwrite(ebp); 5211541Srgrimes /* 5221541Srgrimes * Last, free the old blocks and assign the new blocks to the buffers. 5231541Srgrimes */ 52422521Sdyson#ifdef DEBUG 52522521Sdyson if (prtrealloc) 52622521Sdyson printf("\n\tnew:"); 52722521Sdyson#endif 5281541Srgrimes for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 52934266Sjulian if (!DOINGSOFTDEP(vp)) 53034266Sjulian ffs_blkfree(ip, 53134266Sjulian dbtofsb(fs, buflist->bs_children[i]->b_blkno), 53234266Sjulian fs->fs_bsize); 5331541Srgrimes buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 53422521Sdyson#ifdef DEBUG 53522521Sdyson if (!ffs_checkblk(ip, 53622521Sdyson dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 53722521Sdyson panic("ffs_reallocblks: unallocated block 3"); 53822521Sdyson if (prtrealloc) 53922521Sdyson printf(" %d,", blkno); 54022521Sdyson#endif 5411541Srgrimes } 54222521Sdyson#ifdef DEBUG 54322521Sdyson if (prtrealloc) { 54422521Sdyson prtrealloc--; 54522521Sdyson printf("\n"); 54622521Sdyson } 54722521Sdyson#endif 5481541Srgrimes return (0); 5491541Srgrimes 5501541Srgrimesfail: 5511541Srgrimes if (ssize < len) 5521541Srgrimes brelse(ebp); 5531541Srgrimes if (sbap != &ip->i_db[0]) 5541541Srgrimes brelse(sbp); 5551541Srgrimes return (ENOSPC); 55612405Sdyson#endif 5571541Srgrimes} 5581541Srgrimes 5591541Srgrimes/* 5601541Srgrimes * Allocate an inode in the file system. 5618876Srgrimes * 5621541Srgrimes * If allocating a directory, use ffs_dirpref to select the inode. 5631541Srgrimes * If allocating in a directory, the following hierarchy is followed: 5641541Srgrimes * 1) allocate the preferred inode. 5651541Srgrimes * 2) allocate an inode in the same cylinder group. 5661541Srgrimes * 3) quadradically rehash into other cylinder groups, until an 5671541Srgrimes * available inode is located. 5681541Srgrimes * If no inode preference is given the following heirarchy is used 5691541Srgrimes * to allocate an inode: 5701541Srgrimes * 1) allocate an inode in cylinder group 0. 5711541Srgrimes * 2) quadradically rehash into other cylinder groups, until an 5721541Srgrimes * available inode is located. 5731541Srgrimes */ 5741549Srgrimesint 57530474Sphkffs_valloc(pvp, mode, cred, vpp) 57630474Sphk struct vnode *pvp; 57730474Sphk int mode; 57830474Sphk struct ucred *cred; 57930474Sphk struct vnode **vpp; 5801541Srgrimes{ 5811541Srgrimes register struct inode *pip; 5821541Srgrimes register struct fs *fs; 5831541Srgrimes register struct inode *ip; 5841541Srgrimes ino_t ino, ipref; 5851541Srgrimes int cg, error; 5868876Srgrimes 58730474Sphk *vpp = NULL; 5881541Srgrimes pip = VTOI(pvp); 5891541Srgrimes fs = pip->i_fs; 5901541Srgrimes if (fs->fs_cstotal.cs_nifree == 0) 5911541Srgrimes goto noinodes; 5921541Srgrimes 5931541Srgrimes if ((mode & IFMT) == IFDIR) 5941541Srgrimes ipref = ffs_dirpref(fs); 5951541Srgrimes else 5961541Srgrimes ipref = pip->i_number; 5971541Srgrimes if (ipref >= fs->fs_ncg * fs->fs_ipg) 5981541Srgrimes ipref = 0; 5991541Srgrimes cg = ino_to_cg(fs, ipref); 60012861Speter ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, 60112861Speter (allocfcn_t *)ffs_nodealloccg); 6021541Srgrimes if (ino == 0) 6031541Srgrimes goto noinodes; 60430474Sphk error = VFS_VGET(pvp->v_mount, ino, vpp); 6051541Srgrimes if (error) { 60630474Sphk UFS_VFREE(pvp, ino, mode); 6071541Srgrimes return (error); 6081541Srgrimes } 60930474Sphk ip = VTOI(*vpp); 6101541Srgrimes if (ip->i_mode) { 61137555Sbde printf("mode = 0%o, inum = %lu, fs = %s\n", 61237555Sbde ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); 6131541Srgrimes panic("ffs_valloc: dup alloc"); 6141541Srgrimes } 6151541Srgrimes if (ip->i_blocks) { /* XXX */ 61637555Sbde printf("free inode %s/%lu had %ld blocks\n", 61737555Sbde fs->fs_fsmnt, (u_long)ino, (long)ip->i_blocks); 6181541Srgrimes ip->i_blocks = 0; 6191541Srgrimes } 6201541Srgrimes ip->i_flags = 0; 6211541Srgrimes /* 6221541Srgrimes * Set up a new generation number for this inode. 6231541Srgrimes */ 62431484Sbde if (ip->i_gen == 0 || ++ip->i_gen == 0) 62524149Sguido ip->i_gen = random() / 2 + 1; 6261541Srgrimes return (0); 6271541Srgrimesnoinodes: 62830474Sphk ffs_fserr(fs, cred->cr_uid, "out of inodes"); 6291541Srgrimes uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 6301541Srgrimes return (ENOSPC); 6311541Srgrimes} 6321541Srgrimes 6331541Srgrimes/* 6341541Srgrimes * Find a cylinder to place a directory. 6351541Srgrimes * 6361541Srgrimes * The policy implemented by this algorithm is to select from 6371541Srgrimes * among those cylinder groups with above the average number of 6381541Srgrimes * free inodes, the one with the smallest number of directories. 6391541Srgrimes */ 6401541Srgrimesstatic ino_t 6411541Srgrimesffs_dirpref(fs) 6421541Srgrimes register struct fs *fs; 6431541Srgrimes{ 6441541Srgrimes int cg, minndir, mincg, avgifree; 6451541Srgrimes 6461541Srgrimes avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 6471541Srgrimes minndir = fs->fs_ipg; 6481541Srgrimes mincg = 0; 6491541Srgrimes for (cg = 0; cg < fs->fs_ncg; cg++) 6501541Srgrimes if (fs->fs_cs(fs, cg).cs_ndir < minndir && 6511541Srgrimes fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 6521541Srgrimes mincg = cg; 6531541Srgrimes minndir = fs->fs_cs(fs, cg).cs_ndir; 6541541Srgrimes } 6551541Srgrimes return ((ino_t)(fs->fs_ipg * mincg)); 6561541Srgrimes} 6571541Srgrimes 6581541Srgrimes/* 6591541Srgrimes * Select the desired position for the next block in a file. The file is 6601541Srgrimes * logically divided into sections. The first section is composed of the 6611541Srgrimes * direct blocks. Each additional section contains fs_maxbpg blocks. 6628876Srgrimes * 6631541Srgrimes * If no blocks have been allocated in the first section, the policy is to 6641541Srgrimes * request a block in the same cylinder group as the inode that describes 6651541Srgrimes * the file. If no blocks have been allocated in any other section, the 6661541Srgrimes * policy is to place the section in a cylinder group with a greater than 6671541Srgrimes * average number of free blocks. An appropriate cylinder group is found 6681541Srgrimes * by using a rotor that sweeps the cylinder groups. When a new group of 6691541Srgrimes * blocks is needed, the sweep begins in the cylinder group following the 6701541Srgrimes * cylinder group from which the previous allocation was made. The sweep 6711541Srgrimes * continues until a cylinder group with greater than the average number 6721541Srgrimes * of free blocks is found. If the allocation is for the first block in an 6731541Srgrimes * indirect block, the information on the previous allocation is unavailable; 6741541Srgrimes * here a best guess is made based upon the logical block number being 6751541Srgrimes * allocated. 6768876Srgrimes * 6771541Srgrimes * If a section is already partially allocated, the policy is to 6781541Srgrimes * contiguously allocate fs_maxcontig blocks. The end of one of these 6791541Srgrimes * contiguous blocks and the beginning of the next is physically separated 6801541Srgrimes * so that the disk head will be in transit between them for at least 6811541Srgrimes * fs_rotdelay milliseconds. This is to allow time for the processor to 6821541Srgrimes * schedule another I/O transfer. 6831541Srgrimes */ 68422521Sdysonufs_daddr_t 6851541Srgrimesffs_blkpref(ip, lbn, indx, bap) 6861541Srgrimes struct inode *ip; 68722521Sdyson ufs_daddr_t lbn; 6881541Srgrimes int indx; 68922521Sdyson ufs_daddr_t *bap; 6901541Srgrimes{ 6911541Srgrimes register struct fs *fs; 6921541Srgrimes register int cg; 6931541Srgrimes int avgbfree, startcg; 69422521Sdyson ufs_daddr_t nextblk; 6951541Srgrimes 6961541Srgrimes fs = ip->i_fs; 6971541Srgrimes if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 6981541Srgrimes if (lbn < NDADDR) { 6991541Srgrimes cg = ino_to_cg(fs, ip->i_number); 7001541Srgrimes return (fs->fs_fpg * cg + fs->fs_frag); 7011541Srgrimes } 7021541Srgrimes /* 7031541Srgrimes * Find a cylinder with greater than average number of 7041541Srgrimes * unused data blocks. 7051541Srgrimes */ 7061541Srgrimes if (indx == 0 || bap[indx - 1] == 0) 7071541Srgrimes startcg = 7081541Srgrimes ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 7091541Srgrimes else 7101541Srgrimes startcg = dtog(fs, bap[indx - 1]) + 1; 7111541Srgrimes startcg %= fs->fs_ncg; 7121541Srgrimes avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 7131541Srgrimes for (cg = startcg; cg < fs->fs_ncg; cg++) 7141541Srgrimes if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 7151541Srgrimes fs->fs_cgrotor = cg; 7161541Srgrimes return (fs->fs_fpg * cg + fs->fs_frag); 7171541Srgrimes } 7181541Srgrimes for (cg = 0; cg <= startcg; cg++) 7191541Srgrimes if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 7201541Srgrimes fs->fs_cgrotor = cg; 7211541Srgrimes return (fs->fs_fpg * cg + fs->fs_frag); 7221541Srgrimes } 72317108Sbde return (0); 7241541Srgrimes } 7251541Srgrimes /* 7261541Srgrimes * One or more previous blocks have been laid out. If less 7271541Srgrimes * than fs_maxcontig previous blocks are contiguous, the 7281541Srgrimes * next block is requested contiguously, otherwise it is 7291541Srgrimes * requested rotationally delayed by fs_rotdelay milliseconds. 7301541Srgrimes */ 7311541Srgrimes nextblk = bap[indx - 1] + fs->fs_frag; 73210632Sdg if (fs->fs_rotdelay == 0 || indx < fs->fs_maxcontig || 73310632Sdg bap[indx - fs->fs_maxcontig] + 7341541Srgrimes blkstofrags(fs, fs->fs_maxcontig) != nextblk) 7351541Srgrimes return (nextblk); 73610632Sdg /* 73710632Sdg * Here we convert ms of delay to frags as: 73810632Sdg * (frags) = (ms) * (rev/sec) * (sect/rev) / 73910632Sdg * ((sect/frag) * (ms/sec)) 74010632Sdg * then round up to the next block. 74110632Sdg */ 74210632Sdg nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 74310632Sdg (NSPF(fs) * 1000), fs->fs_frag); 7441541Srgrimes return (nextblk); 7451541Srgrimes} 7461541Srgrimes 7471541Srgrimes/* 7481541Srgrimes * Implement the cylinder overflow algorithm. 7491541Srgrimes * 7501541Srgrimes * The policy implemented by this algorithm is: 7511541Srgrimes * 1) allocate the block in its requested cylinder group. 7521541Srgrimes * 2) quadradically rehash on the cylinder group number. 7531541Srgrimes * 3) brute force search for a free block. 7541541Srgrimes */ 7551541Srgrimes/*VARARGS5*/ 7561541Srgrimesstatic u_long 7571541Srgrimesffs_hashalloc(ip, cg, pref, size, allocator) 7581541Srgrimes struct inode *ip; 7591541Srgrimes int cg; 7601541Srgrimes long pref; 7611541Srgrimes int size; /* size for data blocks, mode for inodes */ 76212590Sbde allocfcn_t *allocator; 7631541Srgrimes{ 7641541Srgrimes register struct fs *fs; 76512590Sbde long result; /* XXX why not same type as we return? */ 7661541Srgrimes int i, icg = cg; 7671541Srgrimes 7681541Srgrimes fs = ip->i_fs; 7691541Srgrimes /* 7701541Srgrimes * 1: preferred cylinder group 7711541Srgrimes */ 7721541Srgrimes result = (*allocator)(ip, cg, pref, size); 7731541Srgrimes if (result) 7741541Srgrimes return (result); 7751541Srgrimes /* 7761541Srgrimes * 2: quadratic rehash 7771541Srgrimes */ 7781541Srgrimes for (i = 1; i < fs->fs_ncg; i *= 2) { 7791541Srgrimes cg += i; 7801541Srgrimes if (cg >= fs->fs_ncg) 7811541Srgrimes cg -= fs->fs_ncg; 7821541Srgrimes result = (*allocator)(ip, cg, 0, size); 7831541Srgrimes if (result) 7841541Srgrimes return (result); 7851541Srgrimes } 7861541Srgrimes /* 7871541Srgrimes * 3: brute force search 7881541Srgrimes * Note that we start at i == 2, since 0 was checked initially, 7891541Srgrimes * and 1 is always checked in the quadratic rehash. 7901541Srgrimes */ 7911541Srgrimes cg = (icg + 2) % fs->fs_ncg; 7921541Srgrimes for (i = 2; i < fs->fs_ncg; i++) { 7931541Srgrimes result = (*allocator)(ip, cg, 0, size); 7941541Srgrimes if (result) 7951541Srgrimes return (result); 7961541Srgrimes cg++; 7971541Srgrimes if (cg == fs->fs_ncg) 7981541Srgrimes cg = 0; 7991541Srgrimes } 80012590Sbde return (0); 8011541Srgrimes} 8021541Srgrimes 8031541Srgrimes/* 8041541Srgrimes * Determine whether a fragment can be extended. 8051541Srgrimes * 8068876Srgrimes * Check to see if the necessary fragments are available, and 8071541Srgrimes * if they are, allocate them. 8081541Srgrimes */ 80922521Sdysonstatic ufs_daddr_t 8101541Srgrimesffs_fragextend(ip, cg, bprev, osize, nsize) 8111541Srgrimes struct inode *ip; 8121541Srgrimes int cg; 8131541Srgrimes long bprev; 8141541Srgrimes int osize, nsize; 8151541Srgrimes{ 8161541Srgrimes register struct fs *fs; 8171541Srgrimes register struct cg *cgp; 8181541Srgrimes struct buf *bp; 8191541Srgrimes long bno; 8201541Srgrimes int frags, bbase; 8211541Srgrimes int i, error; 8221541Srgrimes 8231541Srgrimes fs = ip->i_fs; 8241541Srgrimes if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 82517108Sbde return (0); 8261541Srgrimes frags = numfrags(fs, nsize); 8271541Srgrimes bbase = fragnum(fs, bprev); 8281541Srgrimes if (bbase > fragnum(fs, (bprev + frags - 1))) { 8291541Srgrimes /* cannot extend across a block boundary */ 83017108Sbde return (0); 8311541Srgrimes } 8321541Srgrimes error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 8331541Srgrimes (int)fs->fs_cgsize, NOCRED, &bp); 8341541Srgrimes if (error) { 8351541Srgrimes brelse(bp); 83617108Sbde return (0); 8371541Srgrimes } 8381541Srgrimes cgp = (struct cg *)bp->b_data; 8391541Srgrimes if (!cg_chkmagic(cgp)) { 8401541Srgrimes brelse(bp); 84117108Sbde return (0); 8421541Srgrimes } 84334961Sphk cgp->cg_time = time_second; 8441541Srgrimes bno = dtogd(fs, bprev); 8451541Srgrimes for (i = numfrags(fs, osize); i < frags; i++) 8461541Srgrimes if (isclr(cg_blksfree(cgp), bno + i)) { 8471541Srgrimes brelse(bp); 84817108Sbde return (0); 8491541Srgrimes } 8501541Srgrimes /* 8511541Srgrimes * the current fragment can be extended 8521541Srgrimes * deduct the count on fragment being extended into 8531541Srgrimes * increase the count on the remaining fragment (if any) 8541541Srgrimes * allocate the extended piece 8551541Srgrimes */ 8561541Srgrimes for (i = frags; i < fs->fs_frag - bbase; i++) 8571541Srgrimes if (isclr(cg_blksfree(cgp), bno + i)) 8581541Srgrimes break; 8591541Srgrimes cgp->cg_frsum[i - numfrags(fs, osize)]--; 8601541Srgrimes if (i != frags) 8611541Srgrimes cgp->cg_frsum[i - frags]++; 8621541Srgrimes for (i = numfrags(fs, osize); i < frags; i++) { 8631541Srgrimes clrbit(cg_blksfree(cgp), bno + i); 8641541Srgrimes cgp->cg_cs.cs_nffree--; 8651541Srgrimes fs->fs_cstotal.cs_nffree--; 8661541Srgrimes fs->fs_cs(fs, cg).cs_nffree--; 8671541Srgrimes } 8681541Srgrimes fs->fs_fmod = 1; 86934266Sjulian if (DOINGSOFTDEP(ITOV(ip))) 87034266Sjulian softdep_setup_blkmapdep(bp, fs, bprev); 8711541Srgrimes bdwrite(bp); 8721541Srgrimes return (bprev); 8731541Srgrimes} 8741541Srgrimes 8751541Srgrimes/* 8761541Srgrimes * Determine whether a block can be allocated. 8771541Srgrimes * 8781541Srgrimes * Check to see if a block of the appropriate size is available, 8791541Srgrimes * and if it is, allocate it. 8801541Srgrimes */ 88122521Sdysonstatic ufs_daddr_t 8821541Srgrimesffs_alloccg(ip, cg, bpref, size) 8831541Srgrimes struct inode *ip; 8841541Srgrimes int cg; 88522521Sdyson ufs_daddr_t bpref; 8861541Srgrimes int size; 8871541Srgrimes{ 8881541Srgrimes register struct fs *fs; 8891541Srgrimes register struct cg *cgp; 8901541Srgrimes struct buf *bp; 8911541Srgrimes register int i; 89234266Sjulian ufs_daddr_t bno, blkno; 89334266Sjulian int allocsiz, error, frags; 8941541Srgrimes 8951541Srgrimes fs = ip->i_fs; 8961541Srgrimes if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 89717108Sbde return (0); 8981541Srgrimes error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 8991541Srgrimes (int)fs->fs_cgsize, NOCRED, &bp); 9001541Srgrimes if (error) { 9011541Srgrimes brelse(bp); 90217108Sbde return (0); 9031541Srgrimes } 9041541Srgrimes cgp = (struct cg *)bp->b_data; 9051541Srgrimes if (!cg_chkmagic(cgp) || 9061541Srgrimes (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 9071541Srgrimes brelse(bp); 90817108Sbde return (0); 9091541Srgrimes } 91034961Sphk cgp->cg_time = time_second; 9111541Srgrimes if (size == fs->fs_bsize) { 91234266Sjulian bno = ffs_alloccgblk(ip, bp, bpref); 9131541Srgrimes bdwrite(bp); 9141541Srgrimes return (bno); 9151541Srgrimes } 9161541Srgrimes /* 9171541Srgrimes * check to see if any fragments are already available 9181541Srgrimes * allocsiz is the size which will be allocated, hacking 9191541Srgrimes * it down to a smaller size if necessary 9201541Srgrimes */ 9211541Srgrimes frags = numfrags(fs, size); 9221541Srgrimes for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 9231541Srgrimes if (cgp->cg_frsum[allocsiz] != 0) 9241541Srgrimes break; 9251541Srgrimes if (allocsiz == fs->fs_frag) { 9261541Srgrimes /* 9278876Srgrimes * no fragments were available, so a block will be 9281541Srgrimes * allocated, and hacked up 9291541Srgrimes */ 9301541Srgrimes if (cgp->cg_cs.cs_nbfree == 0) { 9311541Srgrimes brelse(bp); 93217108Sbde return (0); 9331541Srgrimes } 93434266Sjulian bno = ffs_alloccgblk(ip, bp, bpref); 9351541Srgrimes bpref = dtogd(fs, bno); 9361541Srgrimes for (i = frags; i < fs->fs_frag; i++) 9371541Srgrimes setbit(cg_blksfree(cgp), bpref + i); 9381541Srgrimes i = fs->fs_frag - frags; 9391541Srgrimes cgp->cg_cs.cs_nffree += i; 9401541Srgrimes fs->fs_cstotal.cs_nffree += i; 9411541Srgrimes fs->fs_cs(fs, cg).cs_nffree += i; 9421541Srgrimes fs->fs_fmod = 1; 9431541Srgrimes cgp->cg_frsum[i]++; 9441541Srgrimes bdwrite(bp); 9451541Srgrimes return (bno); 9461541Srgrimes } 9471541Srgrimes bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 9481541Srgrimes if (bno < 0) { 9491541Srgrimes brelse(bp); 95017108Sbde return (0); 9511541Srgrimes } 9521541Srgrimes for (i = 0; i < frags; i++) 9531541Srgrimes clrbit(cg_blksfree(cgp), bno + i); 9541541Srgrimes cgp->cg_cs.cs_nffree -= frags; 9551541Srgrimes fs->fs_cstotal.cs_nffree -= frags; 9561541Srgrimes fs->fs_cs(fs, cg).cs_nffree -= frags; 9571541Srgrimes fs->fs_fmod = 1; 9581541Srgrimes cgp->cg_frsum[allocsiz]--; 9591541Srgrimes if (frags != allocsiz) 9601541Srgrimes cgp->cg_frsum[allocsiz - frags]++; 96134266Sjulian blkno = cg * fs->fs_fpg + bno; 96234266Sjulian if (DOINGSOFTDEP(ITOV(ip))) 96334266Sjulian softdep_setup_blkmapdep(bp, fs, blkno); 9641541Srgrimes bdwrite(bp); 96534266Sjulian return ((u_long)blkno); 9661541Srgrimes} 9671541Srgrimes 9681541Srgrimes/* 9691541Srgrimes * Allocate a block in a cylinder group. 9701541Srgrimes * 9711541Srgrimes * This algorithm implements the following policy: 9721541Srgrimes * 1) allocate the requested block. 9731541Srgrimes * 2) allocate a rotationally optimal block in the same cylinder. 9741541Srgrimes * 3) allocate the next available block on the block rotor for the 9751541Srgrimes * specified cylinder group. 9761541Srgrimes * Note that this routine only allocates fs_bsize blocks; these 9771541Srgrimes * blocks may be fragmented by the routine that allocates them. 9781541Srgrimes */ 97922521Sdysonstatic ufs_daddr_t 98034266Sjulianffs_alloccgblk(ip, bp, bpref) 98134266Sjulian struct inode *ip; 98234266Sjulian struct buf *bp; 98322521Sdyson ufs_daddr_t bpref; 9841541Srgrimes{ 98534266Sjulian struct fs *fs; 98634266Sjulian struct cg *cgp; 98722521Sdyson ufs_daddr_t bno, blkno; 9881541Srgrimes int cylno, pos, delta; 9891541Srgrimes short *cylbp; 9901541Srgrimes register int i; 9911541Srgrimes 99234266Sjulian fs = ip->i_fs; 99334266Sjulian cgp = (struct cg *)bp->b_data; 9941541Srgrimes if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 9951541Srgrimes bpref = cgp->cg_rotor; 9961541Srgrimes goto norot; 9971541Srgrimes } 9981541Srgrimes bpref = blknum(fs, bpref); 9991541Srgrimes bpref = dtogd(fs, bpref); 10001541Srgrimes /* 10011541Srgrimes * if the requested block is available, use it 10021541Srgrimes */ 10031541Srgrimes if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 10041541Srgrimes bno = bpref; 10051541Srgrimes goto gotit; 10061541Srgrimes } 10076769Sse if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) { 10081541Srgrimes /* 10091541Srgrimes * Block layout information is not available. 10101541Srgrimes * Leaving bpref unchanged means we take the 10118876Srgrimes * next available free block following the one 10121541Srgrimes * we just allocated. Hopefully this will at 10131541Srgrimes * least hit a track cache on drives of unknown 10141541Srgrimes * geometry (e.g. SCSI). 10151541Srgrimes */ 10161541Srgrimes goto norot; 10171541Srgrimes } 10181541Srgrimes /* 10196769Sse * check for a block available on the same cylinder 10206769Sse */ 10216769Sse cylno = cbtocylno(fs, bpref); 10226769Sse if (cg_blktot(cgp)[cylno] == 0) 10236769Sse goto norot; 10246769Sse /* 10258876Srgrimes * check the summary information to see if a block is 10261541Srgrimes * available in the requested cylinder starting at the 10271541Srgrimes * requested rotational position and proceeding around. 10281541Srgrimes */ 10291541Srgrimes cylbp = cg_blks(fs, cgp, cylno); 10301541Srgrimes pos = cbtorpos(fs, bpref); 10311541Srgrimes for (i = pos; i < fs->fs_nrpos; i++) 10321541Srgrimes if (cylbp[i] > 0) 10331541Srgrimes break; 10341541Srgrimes if (i == fs->fs_nrpos) 10351541Srgrimes for (i = 0; i < pos; i++) 10361541Srgrimes if (cylbp[i] > 0) 10371541Srgrimes break; 10381541Srgrimes if (cylbp[i] > 0) { 10391541Srgrimes /* 10401541Srgrimes * found a rotational position, now find the actual 10411541Srgrimes * block. A panic if none is actually there. 10421541Srgrimes */ 10431541Srgrimes pos = cylno % fs->fs_cpc; 10441541Srgrimes bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 10451541Srgrimes if (fs_postbl(fs, pos)[i] == -1) { 10461541Srgrimes printf("pos = %d, i = %d, fs = %s\n", 10471541Srgrimes pos, i, fs->fs_fsmnt); 10481541Srgrimes panic("ffs_alloccgblk: cyl groups corrupted"); 10491541Srgrimes } 10501541Srgrimes for (i = fs_postbl(fs, pos)[i];; ) { 10511541Srgrimes if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) { 10521541Srgrimes bno = blkstofrags(fs, (bno + i)); 10531541Srgrimes goto gotit; 10541541Srgrimes } 10551541Srgrimes delta = fs_rotbl(fs)[i]; 10561541Srgrimes if (delta <= 0 || 10571541Srgrimes delta + i > fragstoblks(fs, fs->fs_fpg)) 10581541Srgrimes break; 10591541Srgrimes i += delta; 10601541Srgrimes } 10611541Srgrimes printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 10621541Srgrimes panic("ffs_alloccgblk: can't find blk in cyl"); 10631541Srgrimes } 10641541Srgrimesnorot: 10651541Srgrimes /* 10661541Srgrimes * no blocks in the requested cylinder, so take next 10671541Srgrimes * available one in this cylinder group. 10681541Srgrimes */ 10691541Srgrimes bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 10701541Srgrimes if (bno < 0) 107117108Sbde return (0); 10721541Srgrimes cgp->cg_rotor = bno; 10731541Srgrimesgotit: 10741541Srgrimes blkno = fragstoblks(fs, bno); 10751541Srgrimes ffs_clrblock(fs, cg_blksfree(cgp), (long)blkno); 10761541Srgrimes ffs_clusteracct(fs, cgp, blkno, -1); 10771541Srgrimes cgp->cg_cs.cs_nbfree--; 10781541Srgrimes fs->fs_cstotal.cs_nbfree--; 10791541Srgrimes fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 10801541Srgrimes cylno = cbtocylno(fs, bno); 10811541Srgrimes cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 10821541Srgrimes cg_blktot(cgp)[cylno]--; 10831541Srgrimes fs->fs_fmod = 1; 108434266Sjulian blkno = cgp->cg_cgx * fs->fs_fpg + bno; 108534266Sjulian if (DOINGSOFTDEP(ITOV(ip))) 108634266Sjulian softdep_setup_blkmapdep(bp, fs, blkno); 108734266Sjulian return (blkno); 10881541Srgrimes} 10891541Srgrimes 109012911Sphk#ifdef notyet 10911541Srgrimes/* 10921541Srgrimes * Determine whether a cluster can be allocated. 10931541Srgrimes * 10941541Srgrimes * We do not currently check for optimal rotational layout if there 10951541Srgrimes * are multiple choices in the same cylinder group. Instead we just 10961541Srgrimes * take the first one that we find following bpref. 10971541Srgrimes */ 109822521Sdysonstatic ufs_daddr_t 10991541Srgrimesffs_clusteralloc(ip, cg, bpref, len) 11001541Srgrimes struct inode *ip; 11011541Srgrimes int cg; 110222521Sdyson ufs_daddr_t bpref; 11031541Srgrimes int len; 11041541Srgrimes{ 11051541Srgrimes register struct fs *fs; 11061541Srgrimes register struct cg *cgp; 11071541Srgrimes struct buf *bp; 110822521Sdyson int i, got, run, bno, bit, map; 11091541Srgrimes u_char *mapp; 111022521Sdyson int32_t *lp; 11111541Srgrimes 11121541Srgrimes fs = ip->i_fs; 111322521Sdyson if (fs->fs_maxcluster[cg] < len) 11141541Srgrimes return (NULL); 11151541Srgrimes if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 11161541Srgrimes NOCRED, &bp)) 11171541Srgrimes goto fail; 11181541Srgrimes cgp = (struct cg *)bp->b_data; 11191541Srgrimes if (!cg_chkmagic(cgp)) 11201541Srgrimes goto fail; 11211541Srgrimes /* 11221541Srgrimes * Check to see if a cluster of the needed size (or bigger) is 11231541Srgrimes * available in this cylinder group. 11241541Srgrimes */ 112522521Sdyson lp = &cg_clustersum(cgp)[len]; 11261541Srgrimes for (i = len; i <= fs->fs_contigsumsize; i++) 112722521Sdyson if (*lp++ > 0) 11281541Srgrimes break; 112922521Sdyson if (i > fs->fs_contigsumsize) { 113022521Sdyson /* 113122521Sdyson * This is the first time looking for a cluster in this 113222521Sdyson * cylinder group. Update the cluster summary information 113322521Sdyson * to reflect the true maximum sized cluster so that 113422521Sdyson * future cluster allocation requests can avoid reading 113522521Sdyson * the cylinder group map only to find no clusters. 113622521Sdyson */ 113722521Sdyson lp = &cg_clustersum(cgp)[len - 1]; 113822521Sdyson for (i = len - 1; i > 0; i--) 113922521Sdyson if (*lp-- > 0) 114022521Sdyson break; 114122521Sdyson fs->fs_maxcluster[cg] = i; 11421541Srgrimes goto fail; 114322521Sdyson } 11441541Srgrimes /* 11451541Srgrimes * Search the cluster map to find a big enough cluster. 11461541Srgrimes * We take the first one that we find, even if it is larger 11471541Srgrimes * than we need as we prefer to get one close to the previous 11481541Srgrimes * block allocation. We do not search before the current 11491541Srgrimes * preference point as we do not want to allocate a block 11501541Srgrimes * that is allocated before the previous one (as we will 11511541Srgrimes * then have to wait for another pass of the elevator 11521541Srgrimes * algorithm before it will be read). We prefer to fail and 11531541Srgrimes * be recalled to try an allocation in the next cylinder group. 11541541Srgrimes */ 11551541Srgrimes if (dtog(fs, bpref) != cg) 11561541Srgrimes bpref = 0; 11571541Srgrimes else 11581541Srgrimes bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 11591541Srgrimes mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 11601541Srgrimes map = *mapp++; 11611541Srgrimes bit = 1 << (bpref % NBBY); 116222521Sdyson for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 11631541Srgrimes if ((map & bit) == 0) { 11641541Srgrimes run = 0; 11651541Srgrimes } else { 11661541Srgrimes run++; 11671541Srgrimes if (run == len) 11681541Srgrimes break; 11691541Srgrimes } 117022521Sdyson if ((got & (NBBY - 1)) != (NBBY - 1)) { 11711541Srgrimes bit <<= 1; 11721541Srgrimes } else { 11731541Srgrimes map = *mapp++; 11741541Srgrimes bit = 1; 11751541Srgrimes } 11761541Srgrimes } 117727890Sphk if (got >= cgp->cg_nclusterblks) 11781541Srgrimes goto fail; 11791541Srgrimes /* 11801541Srgrimes * Allocate the cluster that we have found. 11811541Srgrimes */ 118222521Sdyson for (i = 1; i <= len; i++) 118322521Sdyson if (!ffs_isblock(fs, cg_blksfree(cgp), got - run + i)) 118422521Sdyson panic("ffs_clusteralloc: map mismatch"); 118522521Sdyson bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); 118622521Sdyson if (dtog(fs, bno) != cg) 118722521Sdyson panic("ffs_clusteralloc: allocated out of group"); 11881541Srgrimes len = blkstofrags(fs, len); 11891541Srgrimes for (i = 0; i < len; i += fs->fs_frag) 119034266Sjulian if ((got = ffs_alloccgblk(ip, bp, bno + i)) != bno + i) 11911541Srgrimes panic("ffs_clusteralloc: lost block"); 11929980Sdg bdwrite(bp); 11931541Srgrimes return (bno); 11941541Srgrimes 11951541Srgrimesfail: 11961541Srgrimes brelse(bp); 11971541Srgrimes return (0); 11981541Srgrimes} 119912911Sphk#endif 12001541Srgrimes 12011541Srgrimes/* 12021541Srgrimes * Determine whether an inode can be allocated. 12031541Srgrimes * 12041541Srgrimes * Check to see if an inode is available, and if it is, 12051541Srgrimes * allocate it using the following policy: 12061541Srgrimes * 1) allocate the requested inode. 12071541Srgrimes * 2) allocate the next available inode after the requested 12081541Srgrimes * inode in the specified cylinder group. 12091541Srgrimes */ 12101541Srgrimesstatic ino_t 12111541Srgrimesffs_nodealloccg(ip, cg, ipref, mode) 12121541Srgrimes struct inode *ip; 12131541Srgrimes int cg; 121422521Sdyson ufs_daddr_t ipref; 12151541Srgrimes int mode; 12161541Srgrimes{ 12171541Srgrimes register struct fs *fs; 12181541Srgrimes register struct cg *cgp; 12191541Srgrimes struct buf *bp; 12201541Srgrimes int error, start, len, loc, map, i; 12211541Srgrimes 12221541Srgrimes fs = ip->i_fs; 12231541Srgrimes if (fs->fs_cs(fs, cg).cs_nifree == 0) 122417108Sbde return (0); 12251541Srgrimes error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 12261541Srgrimes (int)fs->fs_cgsize, NOCRED, &bp); 12271541Srgrimes if (error) { 12281541Srgrimes brelse(bp); 122917108Sbde return (0); 12301541Srgrimes } 12311541Srgrimes cgp = (struct cg *)bp->b_data; 12321541Srgrimes if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 12331541Srgrimes brelse(bp); 123417108Sbde return (0); 12351541Srgrimes } 123634961Sphk cgp->cg_time = time_second; 12371541Srgrimes if (ipref) { 12381541Srgrimes ipref %= fs->fs_ipg; 12391541Srgrimes if (isclr(cg_inosused(cgp), ipref)) 12401541Srgrimes goto gotit; 12411541Srgrimes } 12421541Srgrimes start = cgp->cg_irotor / NBBY; 12431541Srgrimes len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 12441541Srgrimes loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 12451541Srgrimes if (loc == 0) { 12461541Srgrimes len = start + 1; 12471541Srgrimes start = 0; 12481541Srgrimes loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 12491541Srgrimes if (loc == 0) { 12506357Sphk printf("cg = %d, irotor = %ld, fs = %s\n", 125137555Sbde cg, (long)cgp->cg_irotor, fs->fs_fsmnt); 12521541Srgrimes panic("ffs_nodealloccg: map corrupted"); 12531541Srgrimes /* NOTREACHED */ 12541541Srgrimes } 12551541Srgrimes } 12561541Srgrimes i = start + len - loc; 12571541Srgrimes map = cg_inosused(cgp)[i]; 12581541Srgrimes ipref = i * NBBY; 12591541Srgrimes for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 12601541Srgrimes if ((map & i) == 0) { 12611541Srgrimes cgp->cg_irotor = ipref; 12621541Srgrimes goto gotit; 12631541Srgrimes } 12641541Srgrimes } 12651541Srgrimes printf("fs = %s\n", fs->fs_fsmnt); 12661541Srgrimes panic("ffs_nodealloccg: block not in map"); 12671541Srgrimes /* NOTREACHED */ 12681541Srgrimesgotit: 126934266Sjulian if (DOINGSOFTDEP(ITOV(ip))) 127034266Sjulian softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref); 12711541Srgrimes setbit(cg_inosused(cgp), ipref); 12721541Srgrimes cgp->cg_cs.cs_nifree--; 12731541Srgrimes fs->fs_cstotal.cs_nifree--; 12741541Srgrimes fs->fs_cs(fs, cg).cs_nifree--; 12751541Srgrimes fs->fs_fmod = 1; 12761541Srgrimes if ((mode & IFMT) == IFDIR) { 12771541Srgrimes cgp->cg_cs.cs_ndir++; 12781541Srgrimes fs->fs_cstotal.cs_ndir++; 12791541Srgrimes fs->fs_cs(fs, cg).cs_ndir++; 12801541Srgrimes } 12811541Srgrimes bdwrite(bp); 12821541Srgrimes return (cg * fs->fs_ipg + ipref); 12831541Srgrimes} 12841541Srgrimes 12851541Srgrimes/* 12861541Srgrimes * Free a block or fragment. 12871541Srgrimes * 12881541Srgrimes * The specified block or fragment is placed back in the 12898876Srgrimes * free map. If a fragment is deallocated, a possible 12901541Srgrimes * block reassembly is checked. 12911541Srgrimes */ 12921549Srgrimesvoid 12931541Srgrimesffs_blkfree(ip, bno, size) 12941541Srgrimes register struct inode *ip; 129522521Sdyson ufs_daddr_t bno; 12961541Srgrimes long size; 12971541Srgrimes{ 12981541Srgrimes register struct fs *fs; 12991541Srgrimes register struct cg *cgp; 13001541Srgrimes struct buf *bp; 130122521Sdyson ufs_daddr_t blkno; 13021541Srgrimes int i, error, cg, blk, frags, bbase; 13031541Srgrimes 13041541Srgrimes fs = ip->i_fs; 130538862Sphk VOP_FREEBLKS(ip->i_devvp, fsbtodb(fs, bno), size); 130634266Sjulian if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 130734266Sjulian fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 130834266Sjulian printf("dev=0x%lx, bno = %d, bsize = %d, size = %ld, fs = %s\n", 130934266Sjulian (u_long)ip->i_dev, bno, fs->fs_bsize, size, fs->fs_fsmnt); 131023560Smpp panic("ffs_blkfree: bad size"); 13111541Srgrimes } 13121541Srgrimes cg = dtog(fs, bno); 13131541Srgrimes if ((u_int)bno >= fs->fs_size) { 131437555Sbde printf("bad block %ld, ino %lu\n", 131537555Sbde (long)bno, (u_long)ip->i_number); 13161541Srgrimes ffs_fserr(fs, ip->i_uid, "bad block"); 13171541Srgrimes return; 13181541Srgrimes } 13191541Srgrimes error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 13201541Srgrimes (int)fs->fs_cgsize, NOCRED, &bp); 13211541Srgrimes if (error) { 13221541Srgrimes brelse(bp); 13231541Srgrimes return; 13241541Srgrimes } 13251541Srgrimes cgp = (struct cg *)bp->b_data; 13261541Srgrimes if (!cg_chkmagic(cgp)) { 13271541Srgrimes brelse(bp); 13281541Srgrimes return; 13291541Srgrimes } 133034961Sphk cgp->cg_time = time_second; 13311541Srgrimes bno = dtogd(fs, bno); 13321541Srgrimes if (size == fs->fs_bsize) { 13331541Srgrimes blkno = fragstoblks(fs, bno); 133434266Sjulian if (!ffs_isfreeblock(fs, cg_blksfree(cgp), blkno)) { 13356357Sphk printf("dev = 0x%lx, block = %ld, fs = %s\n", 133637555Sbde (u_long)ip->i_dev, (long)bno, fs->fs_fsmnt); 133723560Smpp panic("ffs_blkfree: freeing free block"); 13381541Srgrimes } 13391541Srgrimes ffs_setblock(fs, cg_blksfree(cgp), blkno); 13401541Srgrimes ffs_clusteracct(fs, cgp, blkno, 1); 13411541Srgrimes cgp->cg_cs.cs_nbfree++; 13421541Srgrimes fs->fs_cstotal.cs_nbfree++; 13431541Srgrimes fs->fs_cs(fs, cg).cs_nbfree++; 13441541Srgrimes i = cbtocylno(fs, bno); 13451541Srgrimes cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 13461541Srgrimes cg_blktot(cgp)[i]++; 13471541Srgrimes } else { 13481541Srgrimes bbase = bno - fragnum(fs, bno); 13491541Srgrimes /* 13501541Srgrimes * decrement the counts associated with the old frags 13511541Srgrimes */ 13521541Srgrimes blk = blkmap(fs, cg_blksfree(cgp), bbase); 13531541Srgrimes ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 13541541Srgrimes /* 13551541Srgrimes * deallocate the fragment 13561541Srgrimes */ 13571541Srgrimes frags = numfrags(fs, size); 13581541Srgrimes for (i = 0; i < frags; i++) { 13591541Srgrimes if (isset(cg_blksfree(cgp), bno + i)) { 13606357Sphk printf("dev = 0x%lx, block = %ld, fs = %s\n", 136137555Sbde (u_long)ip->i_dev, (long)(bno + i), 136237555Sbde fs->fs_fsmnt); 136323560Smpp panic("ffs_blkfree: freeing free frag"); 13641541Srgrimes } 13651541Srgrimes setbit(cg_blksfree(cgp), bno + i); 13661541Srgrimes } 13671541Srgrimes cgp->cg_cs.cs_nffree += i; 13681541Srgrimes fs->fs_cstotal.cs_nffree += i; 13691541Srgrimes fs->fs_cs(fs, cg).cs_nffree += i; 13701541Srgrimes /* 13711541Srgrimes * add back in counts associated with the new frags 13721541Srgrimes */ 13731541Srgrimes blk = blkmap(fs, cg_blksfree(cgp), bbase); 13741541Srgrimes ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 13751541Srgrimes /* 13761541Srgrimes * if a complete block has been reassembled, account for it 13771541Srgrimes */ 13781541Srgrimes blkno = fragstoblks(fs, bbase); 13791541Srgrimes if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 13801541Srgrimes cgp->cg_cs.cs_nffree -= fs->fs_frag; 13811541Srgrimes fs->fs_cstotal.cs_nffree -= fs->fs_frag; 13821541Srgrimes fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 13831541Srgrimes ffs_clusteracct(fs, cgp, blkno, 1); 13841541Srgrimes cgp->cg_cs.cs_nbfree++; 13851541Srgrimes fs->fs_cstotal.cs_nbfree++; 13861541Srgrimes fs->fs_cs(fs, cg).cs_nbfree++; 13871541Srgrimes i = cbtocylno(fs, bbase); 13881541Srgrimes cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 13891541Srgrimes cg_blktot(cgp)[i]++; 13901541Srgrimes } 13911541Srgrimes } 13921541Srgrimes fs->fs_fmod = 1; 13931541Srgrimes bdwrite(bp); 13941541Srgrimes} 13951541Srgrimes 139622521Sdyson#ifdef DIAGNOSTIC 13971541Srgrimes/* 139822521Sdyson * Verify allocation of a block or fragment. Returns true if block or 139922521Sdyson * fragment is allocated, false if it is free. 140022521Sdyson */ 140131352Sbdestatic int 140222521Sdysonffs_checkblk(ip, bno, size) 140322521Sdyson struct inode *ip; 140422521Sdyson ufs_daddr_t bno; 140522521Sdyson long size; 140622521Sdyson{ 140722521Sdyson struct fs *fs; 140822521Sdyson struct cg *cgp; 140922521Sdyson struct buf *bp; 141022521Sdyson int i, error, frags, free; 141122521Sdyson 141222521Sdyson fs = ip->i_fs; 141322521Sdyson if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 141437555Sbde printf("bsize = %ld, size = %ld, fs = %s\n", 141537555Sbde (long)fs->fs_bsize, size, fs->fs_fsmnt); 141622544Smpp panic("ffs_checkblk: bad size"); 141722521Sdyson } 141822521Sdyson if ((u_int)bno >= fs->fs_size) 141922544Smpp panic("ffs_checkblk: bad block %d", bno); 142022521Sdyson error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), 142122521Sdyson (int)fs->fs_cgsize, NOCRED, &bp); 142222544Smpp if (error) 142322544Smpp panic("ffs_checkblk: cg bread failed"); 142422521Sdyson cgp = (struct cg *)bp->b_data; 142522544Smpp if (!cg_chkmagic(cgp)) 142622544Smpp panic("ffs_checkblk: cg magic mismatch"); 142722521Sdyson bno = dtogd(fs, bno); 142822521Sdyson if (size == fs->fs_bsize) { 142922521Sdyson free = ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno)); 143022521Sdyson } else { 143122521Sdyson frags = numfrags(fs, size); 143222521Sdyson for (free = 0, i = 0; i < frags; i++) 143322521Sdyson if (isset(cg_blksfree(cgp), bno + i)) 143422521Sdyson free++; 143522521Sdyson if (free != 0 && free != frags) 143622544Smpp panic("ffs_checkblk: partially free fragment"); 143722521Sdyson } 143822521Sdyson brelse(bp); 143922521Sdyson return (!free); 144022521Sdyson} 144122521Sdyson#endif /* DIAGNOSTIC */ 144222521Sdyson 144322521Sdyson/* 14441541Srgrimes * Free an inode. 14451541Srgrimes */ 14461541Srgrimesint 144734266Sjulianffs_vfree( pvp, ino, mode) 144830474Sphk struct vnode *pvp; 144930474Sphk ino_t ino; 145030474Sphk int mode; 14511541Srgrimes{ 145234266Sjulian if (DOINGSOFTDEP(pvp)) { 145334266Sjulian softdep_freefile(pvp, ino, mode); 145434266Sjulian return (0); 145534266Sjulian } 145634266Sjulian return (ffs_freefile(pvp, ino, mode)); 145734266Sjulian} 145834266Sjulian 145934266Sjulian/* 146034266Sjulian * Do the actual free operation. 146134266Sjulian * The specified inode is placed back in the free map. 146234266Sjulian */ 146334266Sjulian int 146434266Sjulian ffs_freefile( pvp, ino, mode) 146534266Sjulian struct vnode *pvp; 146634266Sjulian ino_t ino; 146734266Sjulian int mode; 146834266Sjulian{ 14691541Srgrimes register struct fs *fs; 14701541Srgrimes register struct cg *cgp; 14711541Srgrimes register struct inode *pip; 14721541Srgrimes struct buf *bp; 14731541Srgrimes int error, cg; 14741541Srgrimes 147530474Sphk pip = VTOI(pvp); 14761541Srgrimes fs = pip->i_fs; 14771541Srgrimes if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 147823560Smpp panic("ffs_vfree: range: dev = 0x%x, ino = %d, fs = %s", 14791541Srgrimes pip->i_dev, ino, fs->fs_fsmnt); 14801541Srgrimes cg = ino_to_cg(fs, ino); 14811541Srgrimes error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 14821541Srgrimes (int)fs->fs_cgsize, NOCRED, &bp); 14831541Srgrimes if (error) { 14841541Srgrimes brelse(bp); 148534266Sjulian return (error); 14861541Srgrimes } 14871541Srgrimes cgp = (struct cg *)bp->b_data; 14881541Srgrimes if (!cg_chkmagic(cgp)) { 14891541Srgrimes brelse(bp); 14901541Srgrimes return (0); 14911541Srgrimes } 149234961Sphk cgp->cg_time = time_second; 14931541Srgrimes ino %= fs->fs_ipg; 14941541Srgrimes if (isclr(cg_inosused(cgp), ino)) { 149537555Sbde printf("dev = 0x%lx, ino = %lu, fs = %s\n", 149637555Sbde (u_long)pip->i_dev, (u_long)ino, fs->fs_fsmnt); 14971541Srgrimes if (fs->fs_ronly == 0) 149823560Smpp panic("ffs_vfree: freeing free inode"); 14991541Srgrimes } 15001541Srgrimes clrbit(cg_inosused(cgp), ino); 15011541Srgrimes if (ino < cgp->cg_irotor) 15021541Srgrimes cgp->cg_irotor = ino; 15031541Srgrimes cgp->cg_cs.cs_nifree++; 15041541Srgrimes fs->fs_cstotal.cs_nifree++; 15051541Srgrimes fs->fs_cs(fs, cg).cs_nifree++; 150630474Sphk if ((mode & IFMT) == IFDIR) { 15071541Srgrimes cgp->cg_cs.cs_ndir--; 15081541Srgrimes fs->fs_cstotal.cs_ndir--; 15091541Srgrimes fs->fs_cs(fs, cg).cs_ndir--; 15101541Srgrimes } 15111541Srgrimes fs->fs_fmod = 1; 15121541Srgrimes bdwrite(bp); 15131541Srgrimes return (0); 15141541Srgrimes} 15151541Srgrimes 15161541Srgrimes/* 15171541Srgrimes * Find a block of the specified size in the specified cylinder group. 15181541Srgrimes * 15191541Srgrimes * It is a panic if a request is made to find a block if none are 15201541Srgrimes * available. 15211541Srgrimes */ 152222521Sdysonstatic ufs_daddr_t 15231541Srgrimesffs_mapsearch(fs, cgp, bpref, allocsiz) 15241541Srgrimes register struct fs *fs; 15251541Srgrimes register struct cg *cgp; 152622521Sdyson ufs_daddr_t bpref; 15271541Srgrimes int allocsiz; 15281541Srgrimes{ 152922521Sdyson ufs_daddr_t bno; 15301541Srgrimes int start, len, loc, i; 15311541Srgrimes int blk, field, subfield, pos; 15321541Srgrimes 15331541Srgrimes /* 15341541Srgrimes * find the fragment by searching through the free block 15351541Srgrimes * map for an appropriate bit pattern 15361541Srgrimes */ 15371541Srgrimes if (bpref) 15381541Srgrimes start = dtogd(fs, bpref) / NBBY; 15391541Srgrimes else 15401541Srgrimes start = cgp->cg_frotor / NBBY; 15411541Srgrimes len = howmany(fs->fs_fpg, NBBY) - start; 15421541Srgrimes loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[start], 15431541Srgrimes (u_char *)fragtbl[fs->fs_frag], 15441541Srgrimes (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 15451541Srgrimes if (loc == 0) { 15461541Srgrimes len = start + 1; 15471541Srgrimes start = 0; 15481541Srgrimes loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[0], 15491541Srgrimes (u_char *)fragtbl[fs->fs_frag], 15501541Srgrimes (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 15511541Srgrimes if (loc == 0) { 15521541Srgrimes printf("start = %d, len = %d, fs = %s\n", 15531541Srgrimes start, len, fs->fs_fsmnt); 15541541Srgrimes panic("ffs_alloccg: map corrupted"); 15551541Srgrimes /* NOTREACHED */ 15561541Srgrimes } 15571541Srgrimes } 15581541Srgrimes bno = (start + len - loc) * NBBY; 15591541Srgrimes cgp->cg_frotor = bno; 15601541Srgrimes /* 15611541Srgrimes * found the byte in the map 15621541Srgrimes * sift through the bits to find the selected frag 15631541Srgrimes */ 15641541Srgrimes for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 15651541Srgrimes blk = blkmap(fs, cg_blksfree(cgp), bno); 15661541Srgrimes blk <<= 1; 15671541Srgrimes field = around[allocsiz]; 15681541Srgrimes subfield = inside[allocsiz]; 15691541Srgrimes for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 15701541Srgrimes if ((blk & field) == subfield) 15711541Srgrimes return (bno + pos); 15721541Srgrimes field <<= 1; 15731541Srgrimes subfield <<= 1; 15741541Srgrimes } 15751541Srgrimes } 15763487Sphk printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 15771541Srgrimes panic("ffs_alloccg: block not in map"); 15781541Srgrimes return (-1); 15791541Srgrimes} 15801541Srgrimes 15811541Srgrimes/* 15821541Srgrimes * Update the cluster map because of an allocation or free. 15831541Srgrimes * 15841541Srgrimes * Cnt == 1 means free; cnt == -1 means allocating. 15851541Srgrimes */ 158612911Sphkstatic void 15871541Srgrimesffs_clusteracct(fs, cgp, blkno, cnt) 15881541Srgrimes struct fs *fs; 15891541Srgrimes struct cg *cgp; 159022521Sdyson ufs_daddr_t blkno; 15911541Srgrimes int cnt; 15921541Srgrimes{ 159322521Sdyson int32_t *sump; 159422521Sdyson int32_t *lp; 15951541Srgrimes u_char *freemapp, *mapp; 15961541Srgrimes int i, start, end, forw, back, map, bit; 15971541Srgrimes 15981541Srgrimes if (fs->fs_contigsumsize <= 0) 15991541Srgrimes return; 16001541Srgrimes freemapp = cg_clustersfree(cgp); 16011541Srgrimes sump = cg_clustersum(cgp); 16021541Srgrimes /* 16031541Srgrimes * Allocate or clear the actual block. 16041541Srgrimes */ 16051541Srgrimes if (cnt > 0) 16061541Srgrimes setbit(freemapp, blkno); 16071541Srgrimes else 16081541Srgrimes clrbit(freemapp, blkno); 16091541Srgrimes /* 16101541Srgrimes * Find the size of the cluster going forward. 16111541Srgrimes */ 16121541Srgrimes start = blkno + 1; 16131541Srgrimes end = start + fs->fs_contigsumsize; 16141541Srgrimes if (end >= cgp->cg_nclusterblks) 16151541Srgrimes end = cgp->cg_nclusterblks; 16161541Srgrimes mapp = &freemapp[start / NBBY]; 16171541Srgrimes map = *mapp++; 16181541Srgrimes bit = 1 << (start % NBBY); 16191541Srgrimes for (i = start; i < end; i++) { 16201541Srgrimes if ((map & bit) == 0) 16211541Srgrimes break; 16221541Srgrimes if ((i & (NBBY - 1)) != (NBBY - 1)) { 16231541Srgrimes bit <<= 1; 16241541Srgrimes } else { 16251541Srgrimes map = *mapp++; 16261541Srgrimes bit = 1; 16271541Srgrimes } 16281541Srgrimes } 16291541Srgrimes forw = i - start; 16301541Srgrimes /* 16311541Srgrimes * Find the size of the cluster going backward. 16321541Srgrimes */ 16331541Srgrimes start = blkno - 1; 16341541Srgrimes end = start - fs->fs_contigsumsize; 16351541Srgrimes if (end < 0) 16361541Srgrimes end = -1; 16371541Srgrimes mapp = &freemapp[start / NBBY]; 16381541Srgrimes map = *mapp--; 16391541Srgrimes bit = 1 << (start % NBBY); 16401541Srgrimes for (i = start; i > end; i--) { 16411541Srgrimes if ((map & bit) == 0) 16421541Srgrimes break; 16431541Srgrimes if ((i & (NBBY - 1)) != 0) { 16441541Srgrimes bit >>= 1; 16451541Srgrimes } else { 16461541Srgrimes map = *mapp--; 16471541Srgrimes bit = 1 << (NBBY - 1); 16481541Srgrimes } 16491541Srgrimes } 16501541Srgrimes back = start - i; 16511541Srgrimes /* 16521541Srgrimes * Account for old cluster and the possibly new forward and 16531541Srgrimes * back clusters. 16541541Srgrimes */ 16551541Srgrimes i = back + forw + 1; 16561541Srgrimes if (i > fs->fs_contigsumsize) 16571541Srgrimes i = fs->fs_contigsumsize; 16581541Srgrimes sump[i] += cnt; 16591541Srgrimes if (back > 0) 16601541Srgrimes sump[back] -= cnt; 16611541Srgrimes if (forw > 0) 16621541Srgrimes sump[forw] -= cnt; 166322521Sdyson /* 166422521Sdyson * Update cluster summary information. 166522521Sdyson */ 166622521Sdyson lp = &sump[fs->fs_contigsumsize]; 166722521Sdyson for (i = fs->fs_contigsumsize; i > 0; i--) 166822521Sdyson if (*lp-- > 0) 166922521Sdyson break; 167022521Sdyson fs->fs_maxcluster[cgp->cg_cgx] = i; 16711541Srgrimes} 16721541Srgrimes 16731541Srgrimes/* 16741541Srgrimes * Fserr prints the name of a file system with an error diagnostic. 16758876Srgrimes * 16761541Srgrimes * The form of the error message is: 16771541Srgrimes * fs: error message 16781541Srgrimes */ 16791541Srgrimesstatic void 16801541Srgrimesffs_fserr(fs, uid, cp) 16811541Srgrimes struct fs *fs; 16821541Srgrimes u_int uid; 16831541Srgrimes char *cp; 16841541Srgrimes{ 168518330Speter struct proc *p = curproc; /* XXX */ 16861541Srgrimes 168718330Speter log(LOG_ERR, "pid %d (%s), uid %d on %s: %s\n", p ? p->p_pid : -1, 168818330Speter p ? p->p_comm : "-", uid, fs->fs_fsmnt, cp); 16891541Srgrimes} 1690