Lines Matching refs:cg

106 	int cg;
129 cg = ino_to_cg(fs, ip->i_number);
131 cg = dtog(fs, bpref);
132 bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize,
389 int error, cg;
405 cg = ext2_dirpref(pip);
406 if (fs->e2fs_contigdirs[cg] < 255)
407 fs->e2fs_contigdirs[cg]++;
409 cg = ino_to_cg(fs, pip->i_number);
410 if (fs->e2fs_contigdirs[cg] > 0)
411 fs->e2fs_contigdirs[cg]--;
413 ipref = cg * fs->e2fs_ipg + 1;
414 ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg);
599 int cg, prefcg, cgsize;
614 * Force allocation in another cg if creating a first level dir.
621 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
622 if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < minndir &&
623 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree &&
624 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= avgbfree) {
625 mincg = cg;
626 minndir = e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]);
628 for (cg = 0; cg < prefcg; cg++)
629 if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < minndir &&
630 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree &&
631 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= avgbfree) {
632 mincg = cg;
633 minndir = e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]);
660 * Limit number of dirs in one cg and reserve space for
665 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
666 if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < maxndir &&
667 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= minifree &&
668 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= minbfree) {
669 if (fs->e2fs_contigdirs[cg] < maxcontigdirs)
670 return (cg);
672 for (cg = 0; cg < prefcg; cg++)
673 if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < maxndir &&
674 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= minifree &&
675 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= minbfree) {
676 if (fs->e2fs_contigdirs[cg] < maxcontigdirs)
677 return (cg);
682 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
683 if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree)
684 return (cg);
685 for (cg = 0; cg < prefcg; cg++)
686 if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree)
688 return (cg);
749 ext2_hashalloc(struct inode *ip, int cg, long pref, int size,
754 int i, icg = cg;
761 result = (*allocator)(ip, cg, pref, size);
768 cg += i;
769 if (cg >= fs->e2fs_gcount)
770 cg -= fs->e2fs_gcount;
771 result = (*allocator)(ip, cg, 0, size);
780 cg = (icg + 2) % fs->e2fs_gcount;
782 result = (*allocator)(ip, cg, 0, size);
785 cg++;
786 if (cg == fs->e2fs_gcount)
787 cg = 0;
793 ext2_cg_number_gdb_nometa(struct m_ext2fs *fs, int cg)
796 if (!ext2_cg_has_sb(fs, cg))
807 ext2_cg_number_gdb_meta(struct m_ext2fs *fs, int cg)
812 metagroup = cg / EXT2_DESCS_PER_BLOCK(fs);
816 if (cg == first || cg == first + 1 || cg == last)
823 ext2_cg_number_gdb(struct m_ext2fs *fs, int cg)
828 metagroup = cg / EXT2_DESCS_PER_BLOCK(fs);
832 return (ext2_cg_number_gdb_nometa(fs, cg));
834 return ext2_cg_number_gdb_meta(fs, cg);
838 ext2_number_base_meta_blocks(struct m_ext2fs *fs, int cg)
842 number = ext2_cg_has_sb(fs, cg);
845 cg < le32toh(fs->e2fs->e3fs_first_meta_bg) *
848 number += ext2_cg_number_gdb(fs, cg);
852 number += ext2_cg_number_gdb(fs, cg);
881 ext2_block_in_group(struct m_ext2fs *fs, e4fs_daddr_t block, int cg)
884 return ((ext2_get_group_number(fs, block) == cg) ? 1 : 0);
888 ext2_cg_block_bitmap_init(struct m_ext2fs *fs, int cg, struct buf *bp)
893 if (!(le16toh(fs->e2fs_gd[cg].ext4bgd_flags) & EXT2_BG_BLOCK_UNINIT))
898 bit_max = ext2_number_base_meta_blocks(fs, cg);
905 start = (uint64_t)cg * fs->e2fs_bpg +
909 tmp = e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg]);
911 ext2_block_in_group(fs, tmp, cg))
914 tmp = e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg]);
916 ext2_block_in_group(fs, tmp, cg))
919 tmp = e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]);
921 while( tmp < e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]) +
924 ext2_block_in_group(fs, tmp, cg))
938 fs->e2fs_gd[cg].ext4bgd_flags = htole16(le16toh(
939 fs->e2fs_gd[cg].ext4bgd_flags) & ~EXT2_BG_BLOCK_UNINIT);
945 ext2_b_bitmap_validate(struct m_ext2fs *fs, struct buf *bp, int cg)
961 gd = &fs->e2fs_gd[cg];
963 group_first_block = ((uint64_t)cg) * fs->e2fs_fpg +
970 "bad block bitmap, group", cg);
978 "bad inode bitmap", cg);
986 "bad inode table, group", cg);
1000 ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
1011 if (e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) == 0)
1016 e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])),
1023 error = ext2_cg_block_bitmap_init(fs, cg, bp);
1027 ext2_gd_b_bitmap_csum_set(fs, cg, bp);
1029 error = ext2_gd_b_bitmap_csum_verify(fs, cg, bp);
1033 error = ext2_b_bitmap_validate(fs,bp, cg);
1041 if (e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) == 0)
1046 if (dtog(fs, bpref) != cg)
1122 printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n",
1123 cg, (intmax_t)bno, fs->e2fs_fsmnt);
1129 ext2_clusteracct(fs, bbp, cg, bno, -1);
1131 e2fs_gd_set_nbfree(&fs->e2fs_gd[cg],
1132 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) - 1);
1135 ext2_gd_b_bitmap_csum_set(fs, cg, bp);
1137 return (((uint64_t)cg) * fs->e2fs_fpg +
1150 ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len)
1163 if (fs->e2fs_maxcluster[cg] < len)
1168 fsbtodb(fs, e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])),
1179 lp = &fs->e2fs_clustersum[cg].cs_sum[len];
1190 lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1];
1194 fs->e2fs_maxcluster[cg] = i;
1200 if (dtog(fs, bpref) != cg)
1237 ext2_clusteracct(fs, bbp, cg, bno + i, -1);
1239 e2fs_gd_set_nbfree(&fs->e2fs_gd[cg],
1240 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) - 1);
1246 return (cg * fs->e2fs_fpg + le32toh(fs->e2fs->e2fs_first_dblock)
1257 ext2_zero_inode_table(struct inode *ip, int cg)
1265 if (le16toh(fs->e2fs_gd[cg].ext4bgd_flags) & EXT2_BG_INODE_ZEROED)
1272 e2fs_gd_get_i_unused(&fs->e2fs_gd[cg]),
1277 e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]) + used_blks + i),
1286 fs->e2fs_gd[cg].ext4bgd_flags = htole16(le16toh(
1287 fs->e2fs_gd[cg].ext4bgd_flags) | EXT2_BG_INODE_ZEROED);
1308 ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode)
1321 if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) == 0)
1325 e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg])),
1333 if (le16toh(fs->e2fs_gd[cg].ext4bgd_flags) &
1339 fs->e2fs_gd[cg].ext4bgd_flags = htole16(le16toh(
1340 fs->e2fs_gd[cg].ext4bgd_flags) &
1343 ext2_gd_i_bitmap_csum_set(fs, cg, bp);
1344 error = ext2_zero_inode_table(ip, cg);
1351 error = ext2_gd_i_bitmap_csum_verify(fs, cg, bp);
1357 if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) == 0) {
1381 ext2_nodealloccg_bmap_corrupted, cg, ipref,
1392 e2fs_gd_set_nifree(&fs->e2fs_gd[cg],
1393 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) - 1);
1396 ifree = fs->e2fs_ipg - e2fs_gd_get_i_unused(&fs->e2fs_gd[cg]);
1398 e2fs_gd_set_i_unused(&fs->e2fs_gd[cg],
1404 e2fs_gd_set_ndirs(&fs->e2fs_gd[cg],
1405 e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) + 1);
1409 ext2_gd_i_bitmap_csum_set(fs, cg, bp);
1411 return ((uint64_t)cg * fs->e2fs_ipg + ipref + 1);
1424 int cg, error;
1429 cg = dtog(fs, bno);
1436 fsbtodb(fs, e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])),
1449 ext2_clusteracct(fs, bbp, cg, bno, 1);
1451 e2fs_gd_set_nbfree(&fs->e2fs_gd[cg],
1452 e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) + 1);
1455 ext2_gd_b_bitmap_csum_set(fs, cg, bp);
1470 int error, cg;
1480 cg = ino_to_cg(fs, ino);
1482 fsbtodb(fs, e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg])),
1498 e2fs_gd_set_nifree(&fs->e2fs_gd[cg],
1499 e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) + 1);
1501 e2fs_gd_set_ndirs(&fs->e2fs_gd[cg],
1502 e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) - 1);
1507 ext2_gd_i_bitmap_csum_set(fs, cg, bp);
1548 ext2_cg_has_sb(struct m_ext2fs *fs, int cg)
1552 if (cg == 0)
1556 if (cg == le32toh(fs->e2fs->e4fs_backup_bgs[0]) ||
1557 cg == le32toh(fs->e2fs->e4fs_backup_bgs[1]))
1562 if ((cg <= 1) ||
1566 if (!(cg & 1))
1570 a3 <= cg || a5 <= cg || a7 <= cg;
1572 if (cg == a3 || cg == a5 || cg == a7)