Lines Matching refs:cur

120 	struct xfs_btree_cur	*cur,
125 struct xfs_mount *mp = cur->bc_mp;
137 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops))
142 cur->bc_ops->get_maxrecs(cur, level))
154 struct xfs_btree_cur *cur,
159 struct xfs_mount *mp = cur->bc_mp;
163 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp);
194 struct xfs_btree_cur *cur,
199 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target;
203 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp);
222 struct xfs_btree_cur *cur,
227 struct xfs_mount *mp = cur->bc_mp;
228 struct xfs_perag *pag = cur->bc_ag.pag;
239 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops))
244 cur->bc_ops->get_maxrecs(cur, level))
263 struct xfs_btree_cur *cur,
268 switch (cur->bc_ops->type) {
270 return __xfs_btree_check_memblock(cur, block, level, bp);
272 return __xfs_btree_check_agblock(cur, block, level, bp);
274 return __xfs_btree_check_fsblock(cur, block, level, bp);
281 static inline unsigned int xfs_btree_block_errtag(struct xfs_btree_cur *cur)
283 if (cur->bc_ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN)
293 struct xfs_btree_cur *cur, /* btree cursor */
298 struct xfs_mount *mp = cur->bc_mp;
301 fa = __xfs_btree_check_block(cur, block, level, bp);
303 XFS_TEST_ERROR(false, mp, xfs_btree_block_errtag(cur))) {
306 xfs_btree_mark_sick(cur);
314 struct xfs_btree_cur *cur,
322 switch (cur->bc_ops->type) {
324 if (!xfbtree_verify_bno(cur->bc_mem.xfbtree,
329 if (!xfs_verify_fsbno(cur->bc_mp,
334 if (!xfs_verify_agbno(cur->bc_ag.pag,
349 struct xfs_btree_cur *cur,
356 error = __xfs_btree_check_ptr(cur, ptr, index, level);
358 switch (cur->bc_ops->type) {
360 xfs_err(cur->bc_mp,
362 cur->bc_ops->name, cur->bc_flags, level, index,
366 xfs_err(cur->bc_mp,
368 cur->bc_ino.ip->i_ino,
369 cur->bc_ino.whichfork, cur->bc_ops->name,
373 xfs_err(cur->bc_mp,
375 cur->bc_ag.pag->pag_agno, cur->bc_ops->name,
379 xfs_btree_mark_sick(cur);
469 struct xfs_btree_cur *cur,
474 trace_xfs_btree_free_block(cur, bp);
480 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
485 error = cur->bc_ops->free_block(cur, bp);
487 xfs_trans_binval(cur->bc_tp, bp);
488 XFS_BTREE_STATS_INC(cur, free);
498 struct xfs_btree_cur *cur, /* btree cursor */
510 for (i = 0; i < cur->bc_nlevels; i++) {
511 if (cur->bc_levels[i].bp)
512 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[i].bp);
523 ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 ||
524 xfs_is_shutdown(cur->bc_mp) || error != 0);
526 switch (cur->bc_ops->type) {
528 if (cur->bc_ag.pag)
529 xfs_perag_put(cur->bc_ag.pag);
535 if (cur->bc_mem.pag)
536 xfs_perag_put(cur->bc_mem.pag);
540 kmem_cache_free(cur->bc_cache, cur);
546 struct xfs_btree_cur *cur)
548 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM)
549 return cur->bc_mem.xfbtree->target;
550 return cur->bc_mp->m_ddev_targp;
556 struct xfs_btree_cur *cur)
558 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM)
560 return cur->bc_mp->m_bsize;
569 struct xfs_btree_cur *cur, /* input cursor */
572 struct xfs_mount *mp = cur->bc_mp;
573 struct xfs_trans *tp = cur->bc_tp;
583 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
591 new = cur->bc_ops->dup_cursor(cur);
596 new->bc_rec = cur->bc_rec;
602 new->bc_levels[i].ptr = cur->bc_levels[i].ptr;
603 new->bc_levels[i].ra = cur->bc_levels[i].ra;
604 bp = cur->bc_levels[i].bp;
607 xfs_btree_buftarg(cur),
609 xfs_btree_bbsize(cur), 0, &bp,
610 cur->bc_ops->buf_ops);
702 static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
704 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
705 if (xfs_has_crc(cur->bc_mp))
709 if (xfs_has_crc(cur->bc_mp))
719 struct xfs_btree_cur *cur,
722 return xfs_btree_block_len(cur) +
723 (n - 1) * cur->bc_ops->rec_len;
731 struct xfs_btree_cur *cur,
734 return xfs_btree_block_len(cur) +
735 (n - 1) * cur->bc_ops->key_len;
743 struct xfs_btree_cur *cur,
746 return xfs_btree_block_len(cur) +
747 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
755 struct xfs_btree_cur *cur,
759 return xfs_btree_block_len(cur) +
760 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
761 (n - 1) * cur->bc_ops->ptr_len;
769 struct xfs_btree_cur *cur,
774 ((char *)block + xfs_btree_rec_offset(cur, n));
782 struct xfs_btree_cur *cur,
787 ((char *)block + xfs_btree_key_offset(cur, n));
795 struct xfs_btree_cur *cur,
800 ((char *)block + xfs_btree_high_key_offset(cur, n));
808 struct xfs_btree_cur *cur,
817 ((char *)block + xfs_btree_ptr_offset(cur, n, level));
822 struct xfs_btree_cur *cur)
824 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
826 if (cur->bc_flags & XFS_BTREE_STAGING)
827 return cur->bc_ino.ifake->if_fork;
828 return xfs_ifork_ptr(cur->bc_ino.ip, cur->bc_ino.whichfork);
839 struct xfs_btree_cur *cur)
841 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
852 struct xfs_btree_cur *cur, /* btree cursor */
856 if (xfs_btree_at_iroot(cur, level)) {
858 return xfs_btree_get_iroot(cur);
861 *bpp = cur->bc_levels[level].bp;
871 struct xfs_btree_cur *cur, /* btree cursor */
880 block = xfs_btree_get_block(cur, level, &bp);
881 if (xfs_btree_check_block(cur, block, level, bp))
891 cur->bc_levels[level].ptr = 1;
901 struct xfs_btree_cur *cur, /* btree cursor */
910 block = xfs_btree_get_block(cur, level, &bp);
911 if (xfs_btree_check_block(cur, block, level, bp))
921 cur->bc_levels[level].ptr = be16_to_cpu(block->bb_numrecs);
963 struct xfs_btree_cur *cur,
967 struct xfs_mount *mp = cur->bc_mp;
974 mp->m_bsize, cur->bc_ops->buf_ops);
980 mp->m_bsize, cur->bc_ops->buf_ops);
989 struct xfs_btree_cur *cur,
993 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target;
1000 cur->bc_ops->buf_ops);
1006 cur->bc_ops->buf_ops);
1015 struct xfs_btree_cur *cur,
1019 struct xfs_mount *mp = cur->bc_mp;
1020 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
1028 mp->m_bsize, cur->bc_ops->buf_ops);
1035 mp->m_bsize, cur->bc_ops->buf_ops);
1048 struct xfs_btree_cur *cur, /* btree cursor */
1058 if (xfs_btree_at_iroot(cur, lev))
1061 if ((cur->bc_levels[lev].ra | lr) == cur->bc_levels[lev].ra)
1064 cur->bc_levels[lev].ra |= lr;
1065 block = XFS_BUF_TO_BLOCK(cur->bc_levels[lev].bp);
1067 switch (cur->bc_ops->type) {
1069 return xfs_btree_readahead_agblock(cur, lr, block);
1071 return xfs_btree_readahead_fsblock(cur, lr, block);
1073 return xfs_btree_readahead_memblock(cur, lr, block);
1082 struct xfs_btree_cur *cur,
1088 error = xfs_btree_check_ptr(cur, ptr, 0, 1);
1092 switch (cur->bc_ops->type) {
1094 *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.pag->pag_agno,
1098 *daddr = XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
1115 struct xfs_btree_cur *cur,
1121 if (xfs_btree_ptr_to_daddr(cur, ptr, &daddr))
1123 xfs_buf_readahead(xfs_btree_buftarg(cur), daddr,
1124 xfs_btree_bbsize(cur) * count,
1125 cur->bc_ops->buf_ops);
1134 struct xfs_btree_cur *cur, /* btree cursor */
1140 if (cur->bc_levels[lev].bp)
1141 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[lev].bp);
1142 cur->bc_levels[lev].bp = bp;
1143 cur->bc_levels[lev].ra = 0;
1146 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
1148 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA;
1150 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA;
1153 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA;
1155 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA;
1161 struct xfs_btree_cur *cur,
1164 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
1172 struct xfs_btree_cur *cur,
1175 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
1183 struct xfs_btree_cur *cur,
1187 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
1197 struct xfs_btree_cur *cur,
1204 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
1219 struct xfs_btree_cur *cur,
1226 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
1308 struct xfs_btree_cur *cur)
1310 switch (cur->bc_ops->type) {
1312 return cur->bc_mem.xfbtree->owner;
1314 return cur->bc_ino.ip->i_ino;
1316 return cur->bc_ag.pag->pag_agno;
1325 struct xfs_btree_cur *cur,
1330 xfs_btree_init_buf(cur->bc_mp, bp, cur->bc_ops, level, numrecs,
1331 xfs_btree_owner(cur));
1341 struct xfs_btree_cur *cur,
1349 if (!(cur->bc_ops->geom_flags & XFS_BTGEO_LASTREC_UPDATE))
1352 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1353 if (!xfs_btree_ptr_is_null(cur, &ptr))
1360 struct xfs_btree_cur *cur,
1364 switch (cur->bc_ops->type) {
1366 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
1370 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
1381 struct xfs_btree_cur *cur,
1384 xfs_buf_set_ref(bp, cur->bc_ops->lru_refs);
1389 struct xfs_btree_cur *cur,
1397 error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
1400 error = xfs_trans_get_buf(cur->bc_tp, xfs_btree_buftarg(cur), d,
1401 xfs_btree_bbsize(cur), 0, bpp);
1405 (*bpp)->b_ops = cur->bc_ops->buf_ops;
1416 struct xfs_btree_cur *cur,
1422 struct xfs_mount *mp = cur->bc_mp;
1429 error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
1432 error = xfs_trans_read_buf(mp, cur->bc_tp, xfs_btree_buftarg(cur), d,
1433 xfs_btree_bbsize(cur), flags, bpp,
1434 cur->bc_ops->buf_ops);
1436 xfs_btree_mark_sick(cur);
1440 xfs_btree_set_refs(cur, *bpp);
1450 struct xfs_btree_cur *cur,
1456 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
1464 struct xfs_btree_cur *cur,
1470 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
1478 struct xfs_btree_cur *cur,
1484 memcpy(dst_ptr, src_ptr, numptrs * cur->bc_ops->ptr_len);
1492 struct xfs_btree_cur *cur,
1502 dst_key = (char *)key + (dir * cur->bc_ops->key_len);
1503 memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
1511 struct xfs_btree_cur *cur,
1521 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
1522 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
1530 struct xfs_btree_cur *cur,
1540 dst_ptr = (char *)ptr + (dir * cur->bc_ops->ptr_len);
1541 memmove(dst_ptr, ptr, numptrs * cur->bc_ops->ptr_len);
1549 struct xfs_btree_cur *cur,
1556 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1557 xfs_trans_log_buf(cur->bc_tp, bp,
1558 xfs_btree_key_offset(cur, first),
1559 xfs_btree_key_offset(cur, last + 1) - 1);
1561 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
1562 xfs_ilog_fbroot(cur->bc_ino.whichfork));
1571 struct xfs_btree_cur *cur,
1577 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1578 xfs_trans_log_buf(cur->bc_tp, bp,
1579 xfs_btree_rec_offset(cur, first),
1580 xfs_btree_rec_offset(cur, last + 1) - 1);
1589 struct xfs_btree_cur *cur, /* btree cursor */
1599 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1600 xfs_trans_log_buf(cur->bc_tp, bp,
1601 xfs_btree_ptr_offset(cur, first, level),
1602 xfs_btree_ptr_offset(cur, last + 1, level) - 1);
1604 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
1605 xfs_ilog_fbroot(cur->bc_ino.whichfork));
1615 struct xfs_btree_cur *cur, /* btree cursor */
1652 if (xfs_has_crc(cur->bc_mp)) {
1667 (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) ?
1670 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1671 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
1673 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
1674 xfs_ilog_fbroot(cur->bc_ino.whichfork));
1684 struct xfs_btree_cur *cur,
1694 ASSERT(level < cur->bc_nlevels);
1697 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
1700 block = xfs_btree_get_block(cur, level, &bp);
1703 error = xfs_btree_check_block(cur, block, level, bp);
1709 if (++cur->bc_levels[level].ptr <= xfs_btree_get_numrecs(block))
1713 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1714 if (xfs_btree_ptr_is_null(cur, &ptr))
1717 XFS_BTREE_STATS_INC(cur, increment);
1723 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1724 block = xfs_btree_get_block(cur, lev, &bp);
1727 error = xfs_btree_check_block(cur, block, lev, bp);
1732 if (++cur->bc_levels[lev].ptr <= xfs_btree_get_numrecs(block))
1736 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
1743 if (lev == cur->bc_nlevels) {
1744 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
1747 xfs_btree_mark_sick(cur);
1751 ASSERT(lev < cur->bc_nlevels);
1757 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1760 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block);
1762 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1766 xfs_btree_setbuf(cur, lev, bp);
1767 cur->bc_levels[lev].ptr = 1;
1787 struct xfs_btree_cur *cur,
1797 ASSERT(level < cur->bc_nlevels);
1800 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
1803 if (--cur->bc_levels[level].ptr > 0)
1807 block = xfs_btree_get_block(cur, level, &bp);
1810 error = xfs_btree_check_block(cur, block, level, bp);
1816 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
1817 if (xfs_btree_ptr_is_null(cur, &ptr))
1820 XFS_BTREE_STATS_INC(cur, decrement);
1826 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1827 if (--cur->bc_levels[lev].ptr > 0)
1830 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
1837 if (lev == cur->bc_nlevels) {
1838 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
1841 xfs_btree_mark_sick(cur);
1845 ASSERT(lev < cur->bc_nlevels);
1851 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1854 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block);
1856 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1859 xfs_btree_setbuf(cur, lev, bp);
1860 cur->bc_levels[lev].ptr = xfs_btree_get_numrecs(block);
1880 struct xfs_btree_cur *cur,
1885 if (!xfs_has_crc(cur->bc_mp) ||
1886 (cur->bc_flags & XFS_BTREE_BMBT_INVALID_OWNER))
1889 owner = xfs_btree_owner(cur);
1890 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
1903 struct xfs_btree_cur *cur, /* btree cursor */
1913 if (xfs_btree_at_iroot(cur, level)) {
1914 *blkp = xfs_btree_get_iroot(cur);
1924 bp = cur->bc_levels[level].bp;
1925 error = xfs_btree_ptr_to_daddr(cur, pp, &daddr);
1933 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
1938 if (xfs_btree_check_block_owner(cur, *blkp) != NULL)
1949 xfs_btree_setbuf(cur, level, bp);
1955 xfs_trans_brelse(cur->bc_tp, bp);
1956 xfs_btree_mark_sick(cur);
1967 struct xfs_btree_cur *cur,
1974 cur->bc_ops->init_key_from_rec(kp,
1975 xfs_btree_rec_addr(cur, keyno, block));
1979 return xfs_btree_key_addr(cur, keyno, block);
1987 struct xfs_btree_cur *cur,
1990 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
1996 } else if (cur->bc_flags & XFS_BTREE_STAGING) {
1997 ptr->s = cpu_to_be32(cur->bc_ag.afake->af_root);
1999 cur->bc_ops->init_ptr_from_cur(cur, ptr);
2009 struct xfs_btree_cur *cur, /* btree cursor */
2021 XFS_BTREE_STATS_INC(cur, lookup);
2024 if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0)) {
2025 xfs_btree_mark_sick(cur);
2033 xfs_btree_init_ptr_from_cur(cur, &ptr);
2042 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
2044 error = xfs_btree_lookup_get_block(cur, level, pp, &block);
2065 if (level != 0 || cur->bc_nlevels != 1) {
2068 cur->bc_mp, block,
2070 xfs_btree_mark_sick(cur);
2074 cur->bc_levels[0].ptr = dir != XFS_LOOKUP_LE;
2084 XFS_BTREE_STATS_INC(cur, compare);
2090 kp = xfs_lookup_get_search_key(cur, level,
2099 diff = cur->bc_ops->key_diff(cur, kp);
2120 pp = xfs_btree_ptr_addr(cur, keyno, block);
2122 error = xfs_btree_debug_check_ptr(cur, pp, 0, level);
2126 cur->bc_levels[level].ptr = keyno;
2137 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
2140 !xfs_btree_ptr_is_null(cur, &ptr)) {
2143 cur->bc_levels[0].ptr = keyno;
2144 error = xfs_btree_increment(cur, 0, &i);
2147 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
2148 xfs_btree_mark_sick(cur);
2156 cur->bc_levels[0].ptr = keyno;
2174 struct xfs_btree_cur *cur,
2177 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING);
2179 (cur->bc_ops->key_len / 2));
2185 struct xfs_btree_cur *cur,
2195 rec = xfs_btree_rec_addr(cur, 1, block);
2196 cur->bc_ops->init_key_from_rec(key, rec);
2198 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
2200 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
2202 rec = xfs_btree_rec_addr(cur, n, block);
2203 cur->bc_ops->init_high_key_from_rec(&hkey, rec);
2204 if (xfs_btree_keycmp_gt(cur, &hkey, &max_hkey))
2208 high = xfs_btree_high_key_from_key(cur, key);
2209 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
2216 struct xfs_btree_cur *cur,
2225 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
2226 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2227 cur->bc_ops->key_len / 2);
2229 max_hkey = xfs_btree_high_key_addr(cur, 1, block);
2231 hkey = xfs_btree_high_key_addr(cur, n, block);
2232 if (xfs_btree_keycmp_gt(cur, hkey, max_hkey))
2236 high = xfs_btree_high_key_from_key(cur, key);
2237 memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
2239 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2240 cur->bc_ops->key_len);
2247 struct xfs_btree_cur *cur,
2252 xfs_btree_get_leaf_keys(cur, block, key);
2254 xfs_btree_get_node_keys(cur, block, key);
2266 struct xfs_btree_cur *cur,
2269 return (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) || ptr == 1;
2279 struct xfs_btree_cur *cur,
2293 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING);
2296 if (level + 1 >= cur->bc_nlevels)
2299 trace_xfs_btree_updkeys(cur, level, bp0);
2302 hkey = xfs_btree_high_key_from_key(cur, lkey);
2303 xfs_btree_get_keys(cur, block, lkey);
2304 for (level++; level < cur->bc_nlevels; level++) {
2308 block = xfs_btree_get_block(cur, level, &bp);
2309 trace_xfs_btree_updkeys(cur, level, bp);
2311 error = xfs_btree_check_block(cur, block, level, bp);
2315 ptr = cur->bc_levels[level].ptr;
2316 nlkey = xfs_btree_key_addr(cur, ptr, block);
2317 nhkey = xfs_btree_high_key_addr(cur, ptr, block);
2319 xfs_btree_keycmp_eq(cur, nlkey, lkey) &&
2320 xfs_btree_keycmp_eq(cur, nhkey, hkey))
2322 xfs_btree_copy_keys(cur, nlkey, lkey, 1);
2323 xfs_btree_log_keys(cur, bp, ptr, ptr);
2324 if (level + 1 >= cur->bc_nlevels)
2326 xfs_btree_get_node_keys(cur, block, lkey);
2335 struct xfs_btree_cur *cur,
2341 block = xfs_btree_get_block(cur, level, &bp);
2342 return __xfs_btree_updkeys(cur, level, block, bp, true);
2350 struct xfs_btree_cur *cur,
2361 block = xfs_btree_get_block(cur, level, &bp);
2362 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)
2363 return __xfs_btree_updkeys(cur, level, block, bp, false);
2371 xfs_btree_get_keys(cur, block, &key);
2372 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
2376 block = xfs_btree_get_block(cur, level, &bp);
2378 error = xfs_btree_check_block(cur, block, level, bp);
2382 ptr = cur->bc_levels[level].ptr;
2383 kp = xfs_btree_key_addr(cur, ptr, block);
2384 xfs_btree_copy_keys(cur, kp, &key, 1);
2385 xfs_btree_log_keys(cur, bp, ptr, ptr);
2392 * Update the record referred to by cur to the value in the
2398 struct xfs_btree_cur *cur,
2408 block = xfs_btree_get_block(cur, 0, &bp);
2411 error = xfs_btree_check_block(cur, block, 0, bp);
2416 ptr = cur->bc_levels[0].ptr;
2417 rp = xfs_btree_rec_addr(cur, ptr, block);
2420 xfs_btree_copy_recs(cur, rp, rec, 1);
2421 xfs_btree_log_recs(cur, bp, ptr, ptr);
2427 if (xfs_btree_is_lastrec(cur, block, 0)) {
2428 cur->bc_ops->update_lastrec(cur, block, rec,
2433 if (xfs_btree_needs_key_update(cur, ptr)) {
2434 error = xfs_btree_update_keys(cur, 0);
2446 * Move 1 record left from cur/level if possible.
2447 * Update cur to reflect the new path.
2451 struct xfs_btree_cur *cur,
2469 if (xfs_btree_at_iroot(cur, level))
2473 right = xfs_btree_get_block(cur, level, &rbp);
2476 error = xfs_btree_check_block(cur, right, level, rbp);
2482 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2483 if (xfs_btree_ptr_is_null(cur, &lptr))
2490 if (cur->bc_levels[level].ptr <= 1)
2494 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
2500 if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
2513 XFS_BTREE_STATS_INC(cur, lshift);
2514 XFS_BTREE_STATS_ADD(cur, moves, 1);
2525 lkp = xfs_btree_key_addr(cur, lrecs, left);
2526 rkp = xfs_btree_key_addr(cur, 1, right);
2528 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2529 rpp = xfs_btree_ptr_addr(cur, 1, right);
2531 error = xfs_btree_debug_check_ptr(cur, rpp, 0, level);
2535 xfs_btree_copy_keys(cur, lkp, rkp, 1);
2536 xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
2538 xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
2539 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
2541 ASSERT(cur->bc_ops->keys_inorder(cur,
2542 xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
2547 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2548 rrp = xfs_btree_rec_addr(cur, 1, right);
2550 xfs_btree_copy_recs(cur, lrp, rrp, 1);
2551 xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
2553 ASSERT(cur->bc_ops->recs_inorder(cur,
2554 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
2558 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2561 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2566 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
2570 error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level);
2575 xfs_btree_shift_keys(cur,
2576 xfs_btree_key_addr(cur, 2, right),
2578 xfs_btree_shift_ptrs(cur,
2579 xfs_btree_ptr_addr(cur, 2, right),
2582 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2583 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2586 xfs_btree_shift_recs(cur,
2587 xfs_btree_rec_addr(cur, 2, right),
2589 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2596 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
2597 error = xfs_btree_dup_cursor(cur, &tcur);
2602 xfs_btree_mark_sick(cur);
2620 error = xfs_btree_update_keys(cur, level);
2625 cur->bc_levels[level].ptr--;
2643 * Move 1 record right from cur/level if possible.
2644 * Update cur to reflect the new path.
2648 struct xfs_btree_cur *cur,
2664 if (xfs_btree_at_iroot(cur, level))
2668 left = xfs_btree_get_block(cur, level, &lbp);
2671 error = xfs_btree_check_block(cur, left, level, lbp);
2677 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2678 if (xfs_btree_ptr_is_null(cur, &rptr))
2686 if (cur->bc_levels[level].ptr >= lrecs)
2690 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
2696 if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
2699 XFS_BTREE_STATS_INC(cur, rshift);
2700 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2712 lkp = xfs_btree_key_addr(cur, lrecs, left);
2713 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2714 rkp = xfs_btree_key_addr(cur, 1, right);
2715 rpp = xfs_btree_ptr_addr(cur, 1, right);
2718 error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
2723 xfs_btree_shift_keys(cur, rkp, 1, rrecs);
2724 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
2726 error = xfs_btree_debug_check_ptr(cur, lpp, 0, level);
2731 xfs_btree_copy_keys(cur, rkp, lkp, 1);
2732 xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
2734 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
2735 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
2737 ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
2738 xfs_btree_key_addr(cur, 2, right)));
2744 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2745 rrp = xfs_btree_rec_addr(cur, 1, right);
2747 xfs_btree_shift_recs(cur, rrp, 1, rrecs);
2750 xfs_btree_copy_recs(cur, rrp, lrp, 1);
2751 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
2758 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2761 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2767 error = xfs_btree_dup_cursor(cur, &tcur);
2772 xfs_btree_mark_sick(cur);
2782 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
2783 error = xfs_btree_update_keys(cur, level);
2812 struct xfs_btree_cur *cur,
2826 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
2831 error = cur->bc_ops->alloc_block(cur, hint_block, new_block, stat);
2832 trace_xfs_btree_alloc_block(cur, new_block, *stat, error);
2837 * Split cur/level block in half.
2843 struct xfs_btree_cur *cur,
2865 XFS_BTREE_STATS_INC(cur, split);
2868 left = xfs_btree_get_block(cur, level, &lbp);
2871 error = xfs_btree_check_block(cur, left, level, lbp);
2876 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
2879 error = xfs_btree_alloc_block(cur, &lptr, &rptr, stat);
2884 XFS_BTREE_STATS_INC(cur, alloc);
2887 error = xfs_btree_get_buf_block(cur, &rptr, &right, &rbp);
2892 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
2901 if ((lrecs & 1) && cur->bc_levels[level].ptr <= rrecs + 1)
2905 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2924 lkp = xfs_btree_key_addr(cur, src_index, left);
2925 lpp = xfs_btree_ptr_addr(cur, src_index, left);
2926 rkp = xfs_btree_key_addr(cur, 1, right);
2927 rpp = xfs_btree_ptr_addr(cur, 1, right);
2930 error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
2936 xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
2937 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
2939 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2940 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2943 xfs_btree_get_node_keys(cur, right, key);
2949 lrp = xfs_btree_rec_addr(cur, src_index, left);
2950 rrp = xfs_btree_rec_addr(cur, 1, right);
2953 xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
2954 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2957 xfs_btree_get_leaf_keys(cur, right, key);
2964 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
2965 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
2966 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2967 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2969 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
2970 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
2976 if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
2977 error = xfs_btree_read_buf_block(cur, &rrptr,
2981 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
2982 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
2986 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
2987 error = xfs_btree_update_keys(cur, level);
2997 if (cur->bc_levels[level].ptr > lrecs + 1) {
2998 xfs_btree_setbuf(cur, level, rbp);
2999 cur->bc_levels[level].ptr -= lrecs;
3005 if (level + 1 < cur->bc_nlevels) {
3006 error = xfs_btree_dup_cursor(cur, curp);
3024 struct xfs_btree_cur *cur;
3058 xfs_trans_set_context(args->cur->bc_tp);
3060 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
3063 xfs_trans_clear_context(args->cur->bc_tp);
3094 struct xfs_btree_cur *cur,
3104 if (!xfs_btree_is_bmap(cur->bc_ops) ||
3105 cur->bc_tp->t_highest_agno == NULLAGNUMBER)
3106 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
3108 args.cur = cur;
3132 struct xfs_btree_cur *cur, /* btree cursor */
3148 XFS_BTREE_STATS_INC(cur, newroot);
3150 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
3152 level = cur->bc_nlevels - 1;
3154 block = xfs_btree_get_iroot(cur);
3155 pp = xfs_btree_ptr_addr(cur, 1, block);
3158 error = xfs_btree_alloc_block(cur, pp, &nptr, stat);
3164 XFS_BTREE_STATS_INC(cur, alloc);
3167 error = xfs_btree_get_buf_block(cur, &nptr, &cblock, &cbp);
3175 memcpy(cblock, block, xfs_btree_block_len(cur));
3176 if (xfs_has_crc(cur->bc_mp)) {
3178 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
3186 cur->bc_nlevels++;
3187 ASSERT(cur->bc_nlevels <= cur->bc_maxlevels);
3188 cur->bc_levels[level + 1].ptr = 1;
3190 kp = xfs_btree_key_addr(cur, 1, block);
3191 ckp = xfs_btree_key_addr(cur, 1, cblock);
3192 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
3194 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
3196 error = xfs_btree_debug_check_ptr(cur, pp, i, level);
3201 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
3203 error = xfs_btree_debug_check_ptr(cur, &nptr, 0, level);
3207 xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
3209 xfs_iroot_realloc(cur->bc_ino.ip,
3211 cur->bc_ino.whichfork);
3213 xfs_btree_setbuf(cur, level, cbp);
3219 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
3220 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3221 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3224 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork);
3233 struct xfs_btree_cur *cur,
3237 if (cur->bc_flags & XFS_BTREE_STAGING) {
3239 cur->bc_ag.afake->af_root = be32_to_cpu(ptr->s);
3240 cur->bc_ag.afake->af_levels += inc;
3242 cur->bc_ops->set_root(cur, ptr, inc);
3251 struct xfs_btree_cur *cur, /* btree cursor */
3267 XFS_BTREE_STATS_INC(cur, newroot);
3270 xfs_btree_init_ptr_from_cur(cur, &rptr);
3273 error = xfs_btree_alloc_block(cur, &rptr, &lptr, stat);
3278 XFS_BTREE_STATS_INC(cur, alloc);
3281 error = xfs_btree_get_buf_block(cur, &lptr, &new, &nbp);
3286 xfs_btree_set_root(cur, &lptr, 1);
3294 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
3297 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
3302 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3303 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3306 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
3308 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
3316 xfs_btree_buf_to_ptr(cur, rbp, &rptr);
3318 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
3319 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
3327 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
3328 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
3329 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
3330 !xfs_btree_ptr_is_null(cur, &rptr));
3338 xfs_btree_get_node_keys(cur, left,
3339 xfs_btree_key_addr(cur, 1, new));
3340 xfs_btree_get_node_keys(cur, right,
3341 xfs_btree_key_addr(cur, 2, new));
3348 xfs_btree_get_leaf_keys(cur, left,
3349 xfs_btree_key_addr(cur, 1, new));
3350 xfs_btree_get_leaf_keys(cur, right,
3351 xfs_btree_key_addr(cur, 2, new));
3353 xfs_btree_log_keys(cur, nbp, 1, 2);
3356 xfs_btree_copy_ptrs(cur,
3357 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
3358 xfs_btree_copy_ptrs(cur,
3359 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
3360 xfs_btree_log_ptrs(cur, nbp, 1, 2);
3363 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
3364 cur->bc_levels[cur->bc_nlevels].ptr = nptr;
3365 cur->bc_nlevels++;
3366 ASSERT(cur->bc_nlevels <= cur->bc_maxlevels);
3378 struct xfs_btree_cur *cur, /* btree cursor */
3390 if (xfs_btree_at_iroot(cur, level)) {
3391 struct xfs_inode *ip = cur->bc_ino.ip;
3393 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
3395 xfs_iroot_realloc(ip, 1, cur->bc_ino.whichfork);
3401 error = xfs_btree_new_iroot(cur, &logflags, stat);
3405 xfs_trans_log_inode(cur->bc_tp, ip, logflags);
3412 error = xfs_btree_rshift(cur, level, stat);
3417 error = xfs_btree_lshift(cur, level, stat);
3422 *oindex = *index = cur->bc_levels[level].ptr;
3432 error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
3437 *index = cur->bc_levels[level].ptr;
3447 struct xfs_btree_cur *cur, /* btree cursor */
3452 struct xfs_btree_cur **curp, /* output: new cursor replacing cur */
3475 if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE &&
3476 level >= cur->bc_nlevels) {
3477 error = xfs_btree_new_root(cur, stat);
3478 xfs_btree_set_ptr_null(cur, ptrp);
3484 ptr = cur->bc_levels[level].ptr;
3492 XFS_BTREE_STATS_INC(cur, insrec);
3495 block = xfs_btree_get_block(cur, level, &bp);
3500 error = xfs_btree_check_block(cur, block, level, bp);
3507 ASSERT(cur->bc_ops->recs_inorder(cur, rec,
3508 xfs_btree_rec_addr(cur, ptr, block)));
3510 ASSERT(cur->bc_ops->keys_inorder(cur, key,
3511 xfs_btree_key_addr(cur, ptr, block)));
3520 xfs_btree_set_ptr_null(cur, &nptr);
3521 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
3522 error = xfs_btree_make_block_unfull(cur, level, numrecs,
3532 block = xfs_btree_get_block(cur, level, &bp);
3536 error = xfs_btree_check_block(cur, block, level, bp);
3545 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
3552 kp = xfs_btree_key_addr(cur, ptr, block);
3553 pp = xfs_btree_ptr_addr(cur, ptr, block);
3556 error = xfs_btree_debug_check_ptr(cur, pp, i, level);
3561 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
3562 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
3564 error = xfs_btree_debug_check_ptr(cur, ptrp, 0, level);
3569 xfs_btree_copy_keys(cur, kp, key, 1);
3570 xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
3573 xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
3574 xfs_btree_log_keys(cur, bp, ptr, numrecs);
3577 ASSERT(cur->bc_ops->keys_inorder(cur, kp,
3578 xfs_btree_key_addr(cur, ptr + 1, block)));
3585 rp = xfs_btree_rec_addr(cur, ptr, block);
3587 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
3590 xfs_btree_copy_recs(cur, rp, rec, 1);
3592 xfs_btree_log_recs(cur, bp, ptr, numrecs);
3595 ASSERT(cur->bc_ops->recs_inorder(cur, rp,
3596 xfs_btree_rec_addr(cur, ptr + 1, block)));
3602 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3613 xfs_btree_get_keys(cur, block, lkey);
3614 } else if (xfs_btree_needs_key_update(cur, optr)) {
3615 error = xfs_btree_update_keys(cur, level);
3624 if (xfs_btree_is_lastrec(cur, block, level)) {
3625 cur->bc_ops->update_lastrec(cur, block, rec,
3634 if (!xfs_btree_ptr_is_null(cur, &nptr)) {
3635 xfs_btree_copy_keys(cur, key, lkey, 1);
3649 * Insert the record at the point referenced by cur.
3657 struct xfs_btree_cur *cur,
3672 pcur = cur;
3675 xfs_btree_set_ptr_null(cur, &nptr);
3678 cur->bc_ops->init_rec_from_cur(cur, &rec);
3679 cur->bc_ops->init_key_from_rec(key, &rec);
3694 if (pcur != cur)
3699 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3700 xfs_btree_mark_sick(cur);
3711 if (pcur != cur &&
3712 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
3714 if (cur->bc_ops->update_cursor &&
3715 !(cur->bc_flags & XFS_BTREE_STAGING))
3716 cur->bc_ops->update_cursor(pcur, cur);
3717 cur->bc_nlevels = pcur->bc_nlevels;
3725 } while (!xfs_btree_ptr_is_null(cur, &nptr));
3743 struct xfs_btree_cur *cur)
3745 int whichfork = cur->bc_ino.whichfork;
3746 struct xfs_inode *ip = cur->bc_ino.ip;
3764 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
3765 ASSERT(cur->bc_nlevels > 1);
3771 level = cur->bc_nlevels - 1;
3778 block = xfs_btree_get_iroot(cur);
3782 cblock = xfs_btree_get_block(cur, level - 1, &cbp);
3790 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
3793 XFS_BTREE_STATS_INC(cur, killroot);
3796 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
3797 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3798 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
3799 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3802 index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
3804 xfs_iroot_realloc(cur->bc_ino.ip, index,
3805 cur->bc_ino.whichfork);
3812 kp = xfs_btree_key_addr(cur, 1, block);
3813 ckp = xfs_btree_key_addr(cur, 1, cblock);
3814 xfs_btree_copy_keys(cur, kp, ckp, numrecs);
3816 pp = xfs_btree_ptr_addr(cur, 1, block);
3817 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
3820 error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1);
3825 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
3827 error = xfs_btree_free_block(cur, cbp);
3831 cur->bc_levels[level - 1].bp = NULL;
3833 xfs_trans_log_inode(cur->bc_tp, ip,
3834 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork));
3835 cur->bc_nlevels--;
3845 struct xfs_btree_cur *cur,
3852 XFS_BTREE_STATS_INC(cur, killroot);
3858 xfs_btree_set_root(cur, newroot, -1);
3860 error = xfs_btree_free_block(cur, bp);
3864 cur->bc_levels[level].bp = NULL;
3865 cur->bc_levels[level].ra = 0;
3866 cur->bc_nlevels--;
3873 struct xfs_btree_cur *cur,
3881 error = xfs_btree_decrement(cur, level, &i);
3892 * Delete record pointed to by cur/level.
3898 struct xfs_btree_cur *cur, /* btree cursor */
3924 ptr = cur->bc_levels[level].ptr;
3931 block = xfs_btree_get_block(cur, level, &bp);
3935 error = xfs_btree_check_block(cur, block, level, bp);
3946 XFS_BTREE_STATS_INC(cur, delrec);
3947 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
3955 lkp = xfs_btree_key_addr(cur, ptr + 1, block);
3956 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
3959 error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
3965 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
3966 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
3967 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
3968 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
3973 xfs_btree_shift_recs(cur,
3974 xfs_btree_rec_addr(cur, ptr + 1, block),
3976 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
3984 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3990 if (xfs_btree_is_lastrec(cur, block, level)) {
3991 cur->bc_ops->update_lastrec(cur, block, NULL,
4000 if (xfs_btree_at_iroot(cur, level)) {
4001 xfs_iroot_realloc(cur->bc_ino.ip, -1, cur->bc_ino.whichfork);
4003 error = xfs_btree_kill_iroot(cur);
4007 error = xfs_btree_dec_cursor(cur, level, stat);
4018 if (level == cur->bc_nlevels - 1) {
4025 pp = xfs_btree_ptr_addr(cur, 1, block);
4026 error = xfs_btree_kill_root(cur, bp, level, pp);
4030 error = xfs_btree_dec_cursor(cur, level, stat);
4042 if (xfs_btree_needs_key_update(cur, ptr)) {
4043 error = xfs_btree_update_keys(cur, level);
4052 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
4053 error = xfs_btree_dec_cursor(cur, level, stat);
4064 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
4065 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
4067 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
4073 if (xfs_btree_ptr_is_null(cur, &rptr) &&
4074 xfs_btree_ptr_is_null(cur, &lptr) &&
4075 level == cur->bc_nlevels - 2) {
4076 error = xfs_btree_kill_iroot(cur);
4078 error = xfs_btree_dec_cursor(cur, level, stat);
4085 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
4086 !xfs_btree_ptr_is_null(cur, &lptr));
4092 error = xfs_btree_dup_cursor(cur, &tcur);
4100 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
4106 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4107 xfs_btree_mark_sick(cur);
4115 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4116 xfs_btree_mark_sick(cur);
4122 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4123 xfs_btree_mark_sick(cur);
4144 cur->bc_ops->get_minrecs(tcur, level)) {
4150 cur->bc_ops->get_minrecs(tcur, level));
4155 error = xfs_btree_dec_cursor(cur, level, stat);
4168 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
4170 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4171 xfs_btree_mark_sick(cur);
4179 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4180 xfs_btree_mark_sick(cur);
4191 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
4197 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4198 xfs_btree_mark_sick(cur);
4207 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
4208 xfs_btree_mark_sick(cur);
4216 error = xfs_btree_check_block(cur, left, level, lbp);
4229 cur->bc_ops->get_minrecs(tcur, level)) {
4235 cur->bc_ops->get_minrecs(tcur, level));
4239 cur->bc_levels[0].ptr++;
4258 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
4260 if (!xfs_btree_ptr_is_null(cur, &lptr) &&
4262 cur->bc_ops->get_maxrecs(cur, level)) {
4270 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
4277 } else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
4279 cur->bc_ops->get_maxrecs(cur, level)) {
4287 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
4296 error = xfs_btree_dec_cursor(cur, level, stat);
4309 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
4317 lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
4318 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
4319 rkp = xfs_btree_key_addr(cur, 1, right);
4320 rpp = xfs_btree_ptr_addr(cur, 1, right);
4323 error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
4328 xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
4329 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
4331 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
4332 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
4338 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
4339 rrp = xfs_btree_rec_addr(cur, 1, right);
4341 xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
4342 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
4345 XFS_BTREE_STATS_INC(cur, join);
4352 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB);
4353 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4354 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
4357 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4358 if (!xfs_btree_ptr_is_null(cur, &cptr)) {
4359 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
4362 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
4363 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
4367 error = xfs_btree_free_block(cur, rbp);
4376 cur->bc_levels[level].bp = lbp;
4377 cur->bc_levels[level].ptr += lrecs;
4378 cur->bc_levels[level].ra = 0;
4384 else if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE ||
4385 level + 1 < cur->bc_nlevels) {
4386 error = xfs_btree_increment(cur, level + 1, &i);
4398 cur->bc_levels[level].ptr--;
4421 * Delete the record pointed to by cur.
4427 struct xfs_btree_cur *cur,
4442 error = xfs_btree_delrec(cur, level, &i);
4453 if (joined && (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) {
4454 error = xfs_btree_updkeys_force(cur, 0);
4460 for (level = 1; level < cur->bc_nlevels; level++) {
4461 if (cur->bc_levels[level].ptr == 0) {
4462 error = xfs_btree_decrement(cur, level, &i);
4481 struct xfs_btree_cur *cur, /* btree cursor */
4492 ptr = cur->bc_levels[0].ptr;
4493 block = xfs_btree_get_block(cur, 0, &bp);
4496 error = xfs_btree_check_block(cur, block, 0, bp);
4512 *recp = xfs_btree_rec_addr(cur, ptr, block);
4520 struct xfs_btree_cur *cur,
4531 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
4532 block = xfs_btree_get_block(cur, level, &bp);
4535 error = fn(cur, level, data);
4540 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
4541 if (xfs_btree_ptr_is_null(cur, &rptr))
4550 xfs_btree_buf_to_ptr(cur, bp, &bufptr);
4551 if (xfs_btree_ptrs_equal(cur, &rptr, &bufptr)) {
4552 xfs_btree_mark_sick(cur);
4556 return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
4563 struct xfs_btree_cur *cur,
4573 xfs_btree_init_ptr_from_cur(cur, &lptr);
4576 for (level = cur->bc_nlevels - 1; level >= 0; level--) {
4578 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
4586 ptr = xfs_btree_ptr_addr(cur, 1, block);
4587 xfs_btree_readahead_ptr(cur, ptr, 1);
4590 xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
4600 error = xfs_btree_visit_block(cur, level, fn, data);
4641 struct xfs_btree_cur *cur,
4650 block = xfs_btree_get_block(cur, level, &bp);
4651 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
4669 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
4670 ASSERT(level == cur->bc_nlevels - 1);
4674 if (cur->bc_tp) {
4675 if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) {
4676 xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
4688 struct xfs_btree_cur *cur,
4697 return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
4921 struct xfs_btree_cur *cur,
4933 ASSERT(cur->bc_ops->init_high_key_from_rec);
4934 ASSERT(cur->bc_ops->diff_two_keys);
4941 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
4947 error = xfs_btree_increment(cur, 0, &stat);
4954 error = xfs_btree_get_rec(cur, &recp, &stat);
4960 cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
4962 if (xfs_btree_keycmp_gt(cur, low_key, &rec_key))
4967 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4968 if (xfs_btree_keycmp_gt(cur, &rec_key, high_key))
4972 error = fn(cur, recp, priv);
4978 error = xfs_btree_increment(cur, 0, &stat);
5008 struct xfs_btree_cur *cur,
5028 level = cur->bc_nlevels - 1;
5029 xfs_btree_init_ptr_from_cur(cur, &ptr);
5030 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
5033 xfs_btree_get_block(cur, level, &bp);
5034 trace_xfs_btree_overlapped_query_range(cur, level, bp);
5036 error = xfs_btree_check_block(cur, block, level, bp);
5040 cur->bc_levels[level].ptr = 1;
5042 while (level < cur->bc_nlevels) {
5043 block = xfs_btree_get_block(cur, level, &bp);
5046 if (cur->bc_levels[level].ptr >
5049 if (level < cur->bc_nlevels - 1)
5050 cur->bc_levels[level + 1].ptr++;
5057 recp = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr,
5060 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
5061 cur->bc_ops->init_key_from_rec(&rec_key, recp);
5072 if (xfs_btree_keycmp_lt(cur, high_key, &rec_key))
5074 if (xfs_btree_keycmp_ge(cur, &rec_hkey, low_key)) {
5075 error = fn(cur, recp, priv);
5079 cur->bc_levels[level].ptr++;
5084 lkp = xfs_btree_key_addr(cur, cur->bc_levels[level].ptr, block);
5085 hkp = xfs_btree_high_key_addr(cur, cur->bc_levels[level].ptr,
5087 pp = xfs_btree_ptr_addr(cur, cur->bc_levels[level].ptr, block);
5098 if (xfs_btree_keycmp_lt(cur, high_key, lkp))
5100 if (xfs_btree_keycmp_ge(cur, hkp, low_key)) {
5102 error = xfs_btree_lookup_get_block(cur, level, pp,
5106 xfs_btree_get_block(cur, level, &bp);
5107 trace_xfs_btree_overlapped_query_range(cur, level, bp);
5109 error = xfs_btree_check_block(cur, block, level, bp);
5113 cur->bc_levels[level].ptr = 1;
5116 cur->bc_levels[level].ptr++;
5127 if (cur->bc_levels[0].bp == NULL) {
5128 for (i = 0; i < cur->bc_nlevels; i++) {
5129 if (cur->bc_levels[i].bp) {
5130 xfs_trans_brelse(cur->bc_tp,
5131 cur->bc_levels[i].bp);
5132 cur->bc_levels[i].bp = NULL;
5133 cur->bc_levels[i].ptr = 0;
5134 cur->bc_levels[i].ra = 0;
5144 struct xfs_btree_cur *cur,
5150 cur->bc_rec = *irec;
5151 cur->bc_ops->init_rec_from_cur(cur, &rec);
5152 cur->bc_ops->init_key_from_rec(key, &rec);
5163 struct xfs_btree_cur *cur,
5173 xfs_btree_key_from_irec(cur, &high_key, high_rec);
5174 xfs_btree_key_from_irec(cur, &low_key, low_rec);
5177 if (!xfs_btree_keycmp_le(cur, &low_key, &high_key))
5180 if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
5181 return xfs_btree_simple_query_range(cur, &low_key,
5183 return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
5190 struct xfs_btree_cur *cur,
5197 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec));
5201 return xfs_btree_simple_query_range(cur, &low_key, &high_key, fn, priv);
5206 struct xfs_btree_cur *cur,
5219 struct xfs_btree_cur *cur,
5223 return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
5230 struct xfs_btree_cur *cur,
5234 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
5255 struct xfs_btree_cur *cur,
5264 cur->bc_ops->init_key_from_rec(&rec_key, rec);
5274 if (xfs_btree_masked_keycmp_lt(cur, &info->start_key, &rec_key,
5285 key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key,
5288 !(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
5298 cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec);
5299 if (xfs_btree_masked_keycmp_gt(cur, &rec_high_key, &info->high_key,
5322 struct xfs_btree_cur *cur,
5335 if (!cur->bc_ops->keys_contiguous) {
5340 xfs_btree_key_from_irec(cur, &info.start_key, low);
5341 xfs_btree_key_from_irec(cur, &info.end_key, high);
5343 error = xfs_btree_query_range(cur, low, high,
5358 if (xfs_btree_masked_keycmp_ge(cur, &info.high_key, &info.end_key,
5370 struct xfs_btree_cur *cur)
5375 block = xfs_btree_get_block(cur, 0, &bp);
5378 if (cur->bc_levels[0].ptr < xfs_btree_get_numrecs(block))
5382 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
5430 struct xfs_btree_cur *cur)
5435 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec));
5436 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
5442 error = xfs_btree_decrement(cur, 0, &stat);
5447 xfs_btree_mark_sick(cur);