Lines Matching refs:cur

30 	struct xfs_btree_cur	*cur,
33 return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
38 struct xfs_btree_cur *cur)
40 return xfs_inobt_init_cursor(cur->bc_ag.pag, cur->bc_tp,
41 cur->bc_ag.agbp);
46 struct xfs_btree_cur *cur)
48 return xfs_finobt_init_cursor(cur->bc_ag.pag, cur->bc_tp,
49 cur->bc_ag.agbp);
54 struct xfs_btree_cur *cur,
58 struct xfs_buf *agbp = cur->bc_ag.agbp;
63 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
68 struct xfs_btree_cur *cur,
72 struct xfs_buf *agbp = cur->bc_ag.agbp;
77 xfs_ialloc_log_agi(cur->bc_tp, agbp,
84 struct xfs_btree_cur *cur,
87 struct xfs_buf *agbp = cur->bc_ag.agbp;
90 if (!xfs_has_inobtcounts(cur->bc_mp))
93 if (xfs_btree_is_fino(cur->bc_ops))
97 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS);
102 struct xfs_btree_cur *cur,
113 args.tp = cur->bc_tp;
114 args.mp = cur->bc_mp;
115 args.pag = cur->bc_ag.pag;
135 xfs_inobt_mod_blockcount(cur, 1);
141 struct xfs_btree_cur *cur,
146 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
151 struct xfs_btree_cur *cur,
156 if (cur->bc_mp->m_finobt_nores)
157 return xfs_inobt_alloc_block(cur, start, new, stat);
158 return __xfs_inobt_alloc_block(cur, start, new, stat,
164 struct xfs_btree_cur *cur,
170 xfs_inobt_mod_blockcount(cur, -1);
171 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
172 return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
178 struct xfs_btree_cur *cur,
181 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
186 struct xfs_btree_cur *cur,
189 if (cur->bc_mp->m_finobt_nores)
190 return xfs_inobt_free_block(cur, bp);
191 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
196 struct xfs_btree_cur *cur,
199 return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
224 struct xfs_btree_cur *cur,
227 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
228 if (xfs_has_sparseinodes(cur->bc_mp)) {
230 cpu_to_be16(cur->bc_rec.i.ir_holemask);
231 rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
232 rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
236 cpu_to_be32(cur->bc_rec.i.ir_freecount);
238 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
246 struct xfs_btree_cur *cur,
249 struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
251 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
258 struct xfs_btree_cur *cur,
261 struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
263 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
269 struct xfs_btree_cur *cur,
273 cur->bc_rec.i.ir_startino;
278 struct xfs_btree_cur *cur,
379 struct xfs_btree_cur *cur,
389 struct xfs_btree_cur *cur,
399 struct xfs_btree_cur *cur,
482 struct xfs_btree_cur *cur;
484 cur = xfs_btree_alloc_cursor(mp, tp, &xfs_inobt_ops,
486 cur->bc_ag.pag = xfs_perag_hold(pag);
487 cur->bc_ag.agbp = agbp;
491 cur->bc_nlevels = be32_to_cpu(agi->agi_level);
493 return cur;
508 struct xfs_btree_cur *cur;
510 cur = xfs_btree_alloc_cursor(mp, tp, &xfs_finobt_ops,
512 cur->bc_ag.pag = xfs_perag_hold(pag);
513 cur->bc_ag.agbp = agbp;
517 cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
519 return cur;
528 struct xfs_btree_cur *cur,
533 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
536 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
538 if (xfs_btree_is_ino(cur->bc_ops)) {
542 if (xfs_has_inobtcounts(cur->bc_mp)) {
547 xfs_btree_commit_afakeroot(cur, tp, agbp);
552 if (xfs_has_inobtcounts(cur->bc_mp)) {
557 xfs_btree_commit_afakeroot(cur, tp, agbp);
745 struct xfs_btree_cur *cur;
752 cur = xfs_inobt_init_cursor(pag, tp, agbp);
753 error = xfs_btree_count_blocks(cur, tree_blocks);
754 xfs_btree_del_cursor(cur, error);