• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/ext4/

Lines Matching refs:depth

163 	int depth;
167 depth = path->p_depth;
170 ex = path[depth].p_ext;
176 if (path[depth].p_bh)
177 return path[depth].p_bh->b_blocknr;
336 ext4_ext_max_entries(struct inode *inode, int depth)
340 if (depth == ext_depth(inode)) {
341 if (depth == 0)
346 if (depth == 0)
373 int depth)
383 if (depth == 0) {
406 int depth)
415 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
423 max = ext4_ext_max_entries(inode, depth);
432 if (!ext4_valid_extent_entries(inode, eh, depth)) {
441 "entries %u, max %u(%u), depth %u(%u)",
444 max, le16_to_cpu(eh->eh_depth), depth);
449 #define ext4_ext_check(inode, eh, depth) \
450 __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
481 int depth = ext_depth(inode);
489 eh = path[depth].p_hdr;
508 int depth = path->p_depth;
511 for (i = 0; i <= depth; i++, path++)
660 short int depth, i, ppos = 0, alloc = 0;
663 depth = ext_depth(inode);
665 /* account possible depth increase */
667 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
676 i = depth;
681 ext_debug("depth %d: num %d, max %d\n",
702 if (unlikely(ppos > depth)) {
705 "ppos %d > depth %d", ppos, depth);
809 * at depth @at:
821 int depth = ext_depth(inode);
835 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
839 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
840 border = path[depth].p_ext[1].ee_block;
863 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
868 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
869 for (a = 0; a < depth - at; a++) {
902 /* move remainder of path[depth] to the new leaf */
903 if (unlikely(path[depth].p_hdr->eh_entries !=
904 path[depth].p_hdr->eh_max)) {
906 path[depth].p_hdr->eh_entries,
907 path[depth].p_hdr->eh_max);
914 path[depth].p_ext++;
915 while (path[depth].p_ext <=
916 EXT_MAX_EXTENT(path[depth].p_hdr)) {
918 le32_to_cpu(path[depth].p_ext->ee_block),
919 ext_pblock(path[depth].p_ext),
920 ext4_ext_is_uninitialized(path[depth].p_ext),
921 ext4_ext_get_actual_len(path[depth].p_ext),
923 /*memmove(ex++, path[depth].p_ext++,
926 path[depth].p_ext++;
930 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
945 err = ext4_ext_get_access(handle, inode, path + depth);
948 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
949 err = ext4_ext_dirty(handle, inode, path + depth);
956 k = depth - at - 1;
965 /* current depth stored in i var */
966 i = depth - 1;
985 neh->eh_depth = cpu_to_le16(depth - i);
1059 for (i = 0; i < depth; i++) {
1161 * if no free index is found, then it requests in-depth growing.
1168 int depth, i, err = 0;
1171 i = depth = ext_depth(inode);
1174 curp = path + depth;
1197 /* tree is full, time to grow in depth */
1213 * only first (depth 0 -> 1) produces free space;
1216 depth = ext_depth(inode);
1217 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1240 int depth, ee_len;
1246 depth = path->p_depth;
1249 if (depth == 0 && path->p_ext == NULL)
1256 ex = path[depth].p_ext;
1259 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1265 while (--depth >= 0) {
1266 ix = path[depth].p_idx;
1267 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1269 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1271 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1272 EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
1273 depth);
1308 int depth; /* Note, NOT eh_depth; depth from top of tree */
1315 depth = path->p_depth;
1318 if (depth == 0 && path->p_ext == NULL)
1325 ex = path[depth].p_ext;
1328 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1331 depth);
1334 while (--depth >= 0) {
1335 ix = path[depth].p_idx;
1336 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1355 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1364 while (--depth >= 0) {
1365 ix = path[depth].p_idx;
1366 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1379 while (++depth < path->p_depth) {
1385 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1398 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1419 int depth;
1422 depth = path->p_depth;
1424 if (depth == 0 && path->p_ext == NULL)
1427 while (depth >= 0) {
1428 if (depth == path->p_depth) {
1430 if (path[depth].p_ext !=
1431 EXT_LAST_EXTENT(path[depth].p_hdr))
1432 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1435 if (path[depth].p_idx !=
1436 EXT_LAST_INDEX(path[depth].p_hdr))
1437 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1439 depth--;
1452 int depth;
1455 depth = path->p_depth;
1458 if (depth == 0)
1462 depth--;
1464 while (depth >= 0) {
1465 if (path[depth].p_idx !=
1466 EXT_LAST_INDEX(path[depth].p_hdr))
1468 le32_to_cpu(path[depth].p_idx[1].ei_block);
1469 depth--;
1485 int depth = ext_depth(inode);
1490 eh = path[depth].p_hdr;
1491 ex = path[depth].p_ext;
1499 if (depth == 0) {
1512 k = depth - 1;
1513 border = path[depth].p_ext->ee_block;
1592 unsigned int depth, len;
1596 depth = ext_depth(inode);
1597 BUG_ON(path[depth].p_hdr == NULL);
1598 eh = path[depth].p_hdr;
1639 unsigned int depth, len1;
1644 depth = ext_depth(inode);
1645 if (!path[depth].p_ext)
1647 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1689 int depth, len, err;
1697 depth = ext_depth(inode);
1698 ex = path[depth].p_ext;
1699 if (unlikely(path[depth].p_hdr == NULL)) {
1700 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1713 err = ext4_ext_get_access(handle, inode, path + depth);
1728 eh = path[depth].p_hdr;
1734 depth = ext_depth(inode);
1735 eh = path[depth].p_hdr;
1750 eh = npath[depth].p_hdr;
1768 depth = ext_depth(inode);
1769 eh = path[depth].p_hdr;
1772 nearex = path[depth].p_ext;
1774 err = ext4_ext_get_access(handle, inode, path + depth);
1785 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1802 path[depth].p_ext = nearex + 1;
1815 path[depth].p_ext = nearex;
1819 nearex = path[depth].p_ext;
1836 err = ext4_ext_dirty(handle, inode, path + depth);
1856 int depth, exists, err = 0;
1873 depth = ext_depth(inode);
1874 if (unlikely(path[depth].p_hdr == NULL)) {
1875 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1879 ex = path[depth].p_ext;
1947 if (ext_depth(inode) != depth) {
1948 /* depth was changed. we have to realloc path */
1988 int depth = ext_depth(inode);
1993 ex = path[depth].p_ext;
2104 int depth = ext_depth(inode);
2108 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2109 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2142 int depth = ext_depth(inode);
2145 index = depth * 2;
2147 index = depth * 3;
2203 int depth = ext_depth(inode), credits;
2214 if (!path[depth].p_hdr)
2215 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2216 eh = path[depth].p_hdr;
2217 if (unlikely(path[depth].p_hdr == NULL)) {
2218 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2237 path[depth].p_ext = ex;
2284 err = ext4_ext_get_access(handle, inode, path + depth);
2307 err = ext4_ext_dirty(handle, inode, path + depth);
2323 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2324 err = ext4_ext_rm_idx(handle, inode, path + depth);
2354 int depth = ext_depth(inode);
2362 handle = ext4_journal_start(inode, depth + 1);
2371 * after i_size and walking into the tree depth-wise.
2373 depth = ext_depth(inode);
2374 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2379 path[0].p_depth = depth;
2381 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2388 if (i == depth) {
2431 if (WARN_ON(i + 1 > depth)) {
2436 depth - i - 1)) {
2531 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2631 unsigned int allocated, ee_len, depth;
2646 depth = ext_depth(inode);
2647 eh = path[depth].p_hdr;
2648 ex = path[depth].p_ext;
2665 err = ext4_ext_get_access(handle, inode, path + depth);
2677 ext4_ext_dirty(handle, inode, path + depth);
2712 ext4_ext_dirty(handle, inode, path + depth);
2727 ext4_ext_dirty(handle, inode, path + depth);
2748 depth = ext_depth(inode);
2757 ex = path[depth].p_ext;
2759 path + depth);
2763 ext4_ext_dirty(handle, inode, path + depth);
2784 ext4_ext_dirty(handle, inode, path + depth);
2792 * The depth, and hence eh & ex might change
2804 depth = newdepth;
2811 eh = path[depth].p_hdr;
2812 ex = path[depth].p_ext;
2816 err = ext4_ext_get_access(handle, inode, path + depth);
2835 ext4_ext_dirty(handle, inode, path + depth);
2842 * If there was a change of depth as part of the
2874 depth = ext_depth(inode);
2892 err = ext4_ext_dirty(handle, inode, path + depth);
2904 ext4_ext_dirty(handle, inode, path + depth);
2918 ext4_ext_dirty(handle, inode, path + depth);
2955 unsigned int allocated, ee_len, depth;
2969 depth = ext_depth(inode);
2970 ex = path[depth].p_ext;
2995 err = ext4_ext_get_access(handle, inode, path + depth);
3029 ext4_ext_dirty(handle, inode, path + depth);
3037 * The depth, and hence eh & ex might change
3049 depth = newdepth;
3056 ex = path[depth].p_ext;
3060 err = ext4_ext_get_access(handle, inode, path + depth);
3067 * If there was a change of depth as part of the
3088 err = ext4_ext_dirty(handle, inode, path + depth);
3101 ext4_ext_dirty(handle, inode, path + depth);
3115 ext4_ext_dirty(handle, inode, path + depth);
3124 int depth;
3128 depth = ext_depth(inode);
3129 eh = path[depth].p_hdr;
3130 ex = path[depth].p_ext;
3132 err = ext4_ext_get_access(handle, inode, path + depth);
3152 depth = ext_depth(inode);
3164 depth = ext_depth(inode);
3167 err = ext4_ext_dirty(handle, inode, path + depth);
3318 int i, err = 0, depth, ret, cache_type;
3360 depth = ext_depth(inode);
3367 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3369 "lblock: %lu, depth: %d pblock %lld",
3370 (unsigned long) map->m_lblk, depth,
3371 path[depth].p_block);
3375 eh = path[depth].p_hdr;
3377 ex = path[depth].p_ext;
3514 for (i = depth-1; i >= 0; i--) {