1/* 2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18#include "xfs.h" 19#include "xfs_fs.h" 20#include "xfs_types.h" 21#include "xfs_bit.h" 22#include "xfs_log.h" 23#include "xfs_inum.h" 24#include "xfs_trans.h" 25#include "xfs_sb.h" 26#include "xfs_ag.h" 27#include "xfs_mount.h" 28#include "xfs_bmap_btree.h" 29#include "xfs_alloc_btree.h" 30#include "xfs_ialloc_btree.h" 31#include "xfs_dinode.h" 32#include "xfs_inode.h" 33#include "xfs_btree.h" 34#include "xfs_btree_trace.h" 35#include "xfs_alloc.h" 36#include "xfs_error.h" 37#include "xfs_trace.h" 38 39 40STATIC struct xfs_btree_cur * 41xfs_allocbt_dup_cursor( 42 struct xfs_btree_cur *cur) 43{ 44 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, 45 cur->bc_private.a.agbp, cur->bc_private.a.agno, 46 cur->bc_btnum); 47} 48 49STATIC void 50xfs_allocbt_set_root( 51 struct xfs_btree_cur *cur, 52 union xfs_btree_ptr *ptr, 53 int inc) 54{ 55 struct xfs_buf *agbp = cur->bc_private.a.agbp; 56 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 57 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); 58 int btnum = cur->bc_btnum; 59 struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); 60 61 ASSERT(ptr->s != 0); 62 63 agf->agf_roots[btnum] = ptr->s; 64 be32_add_cpu(&agf->agf_levels[btnum], inc); 65 pag->pagf_levels[btnum] += inc; 66 xfs_perag_put(pag); 67 68 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); 69} 70 71STATIC int 72xfs_allocbt_alloc_block( 73 struct xfs_btree_cur *cur, 74 union xfs_btree_ptr *start, 75 union xfs_btree_ptr *new, 76 int length, 77 int *stat) 78{ 79 int error; 80 xfs_agblock_t bno; 81 82 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 83 84 /* Allocate the new block from the freelist. If we can't, give up. */ 85 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, 86 &bno, 1); 87 if (error) { 88 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 89 return error; 90 } 91 92 if (bno == NULLAGBLOCK) { 93 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 94 *stat = 0; 95 return 0; 96 } 97 98 xfs_trans_agbtree_delta(cur->bc_tp, 1); 99 new->s = cpu_to_be32(bno); 100 101 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 102 *stat = 1; 103 return 0; 104} 105 106STATIC int 107xfs_allocbt_free_block( 108 struct xfs_btree_cur *cur, 109 struct xfs_buf *bp) 110{ 111 struct xfs_buf *agbp = cur->bc_private.a.agbp; 112 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 113 xfs_agblock_t bno; 114 int error; 115 116 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); 117 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); 118 if (error) 119 return error; 120 121 /* 122 * Since blocks move to the free list without the coordination used in 123 * xfs_bmap_finish, we can't allow block to be available for 124 * reallocation and non-transaction writing (user data) until we know 125 * that the transaction that moved it to the free list is permanently 126 * on disk. We track the blocks by declaring these blocks as "busy"; 127 * the busy list is maintained on a per-ag basis and each transaction 128 * records which entries should be removed when the iclog commits to 129 * disk. If a busy block is allocated, the iclog is pushed up to the 130 * LSN that freed the block. 131 */ 132 xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1); 133 xfs_trans_agbtree_delta(cur->bc_tp, -1); 134 return 0; 135} 136 137/* 138 * Update the longest extent in the AGF 139 */ 140STATIC void 141xfs_allocbt_update_lastrec( 142 struct xfs_btree_cur *cur, 143 struct xfs_btree_block *block, 144 union xfs_btree_rec *rec, 145 int ptr, 146 int reason) 147{ 148 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); 149 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); 150 struct xfs_perag *pag; 151 __be32 len; 152 int numrecs; 153 154 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT); 155 156 switch (reason) { 157 case LASTREC_UPDATE: 158 /* 159 * If this is the last leaf block and it's the last record, 160 * then update the size of the longest extent in the AG. 161 */ 162 if (ptr != xfs_btree_get_numrecs(block)) 163 return; 164 len = rec->alloc.ar_blockcount; 165 break; 166 case LASTREC_INSREC: 167 if (be32_to_cpu(rec->alloc.ar_blockcount) <= 168 be32_to_cpu(agf->agf_longest)) 169 return; 170 len = rec->alloc.ar_blockcount; 171 break; 172 case LASTREC_DELREC: 173 numrecs = xfs_btree_get_numrecs(block); 174 if (ptr <= numrecs) 175 return; 176 ASSERT(ptr == numrecs + 1); 177 178 if (numrecs) { 179 xfs_alloc_rec_t *rrp; 180 181 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs); 182 len = rrp->ar_blockcount; 183 } else { 184 len = 0; 185 } 186 187 break; 188 default: 189 ASSERT(0); 190 return; 191 } 192 193 agf->agf_longest = len; 194 pag = xfs_perag_get(cur->bc_mp, seqno); 195 pag->pagf_longest = be32_to_cpu(len); 196 xfs_perag_put(pag); 197 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST); 198} 199 200STATIC int 201xfs_allocbt_get_minrecs( 202 struct xfs_btree_cur *cur, 203 int level) 204{ 205 return cur->bc_mp->m_alloc_mnr[level != 0]; 206} 207 208STATIC int 209xfs_allocbt_get_maxrecs( 210 struct xfs_btree_cur *cur, 211 int level) 212{ 213 return cur->bc_mp->m_alloc_mxr[level != 0]; 214} 215 216STATIC void 217xfs_allocbt_init_key_from_rec( 218 union xfs_btree_key *key, 219 union xfs_btree_rec *rec) 220{ 221 ASSERT(rec->alloc.ar_startblock != 0); 222 223 key->alloc.ar_startblock = rec->alloc.ar_startblock; 224 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; 225} 226 227STATIC void 228xfs_allocbt_init_rec_from_key( 229 union xfs_btree_key *key, 230 union xfs_btree_rec *rec) 231{ 232 ASSERT(key->alloc.ar_startblock != 0); 233 234 rec->alloc.ar_startblock = key->alloc.ar_startblock; 235 rec->alloc.ar_blockcount = key->alloc.ar_blockcount; 236} 237 238STATIC void 239xfs_allocbt_init_rec_from_cur( 240 struct xfs_btree_cur *cur, 241 union xfs_btree_rec *rec) 242{ 243 ASSERT(cur->bc_rec.a.ar_startblock != 0); 244 245 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); 246 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); 247} 248 249STATIC void 250xfs_allocbt_init_ptr_from_cur( 251 struct xfs_btree_cur *cur, 252 union xfs_btree_ptr *ptr) 253{ 254 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); 255 256 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno)); 257 ASSERT(agf->agf_roots[cur->bc_btnum] != 0); 258 259 ptr->s = agf->agf_roots[cur->bc_btnum]; 260} 261 262STATIC __int64_t 263xfs_allocbt_key_diff( 264 struct xfs_btree_cur *cur, 265 union xfs_btree_key *key) 266{ 267 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a; 268 xfs_alloc_key_t *kp = &key->alloc; 269 __int64_t diff; 270 271 if (cur->bc_btnum == XFS_BTNUM_BNO) { 272 return (__int64_t)be32_to_cpu(kp->ar_startblock) - 273 rec->ar_startblock; 274 } 275 276 diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount; 277 if (diff) 278 return diff; 279 280 return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; 281} 282 283STATIC int 284xfs_allocbt_kill_root( 285 struct xfs_btree_cur *cur, 286 struct xfs_buf *bp, 287 int level, 288 union xfs_btree_ptr *newroot) 289{ 290 int error; 291 292 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 293 XFS_BTREE_STATS_INC(cur, killroot); 294 295 /* 296 * Update the root pointer, decreasing the level by 1 and then 297 * free the old root. 298 */ 299 xfs_allocbt_set_root(cur, newroot, -1); 300 error = xfs_allocbt_free_block(cur, bp); 301 if (error) { 302 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 303 return error; 304 } 305 306 XFS_BTREE_STATS_INC(cur, free); 307 308 xfs_btree_setbuf(cur, level, NULL); 309 cur->bc_nlevels--; 310 311 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 312 return 0; 313} 314 315#ifdef DEBUG 316STATIC int 317xfs_allocbt_keys_inorder( 318 struct xfs_btree_cur *cur, 319 union xfs_btree_key *k1, 320 union xfs_btree_key *k2) 321{ 322 if (cur->bc_btnum == XFS_BTNUM_BNO) { 323 return be32_to_cpu(k1->alloc.ar_startblock) < 324 be32_to_cpu(k2->alloc.ar_startblock); 325 } else { 326 return be32_to_cpu(k1->alloc.ar_blockcount) < 327 be32_to_cpu(k2->alloc.ar_blockcount) || 328 (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount && 329 be32_to_cpu(k1->alloc.ar_startblock) < 330 be32_to_cpu(k2->alloc.ar_startblock)); 331 } 332} 333 334STATIC int 335xfs_allocbt_recs_inorder( 336 struct xfs_btree_cur *cur, 337 union xfs_btree_rec *r1, 338 union xfs_btree_rec *r2) 339{ 340 if (cur->bc_btnum == XFS_BTNUM_BNO) { 341 return be32_to_cpu(r1->alloc.ar_startblock) + 342 be32_to_cpu(r1->alloc.ar_blockcount) <= 343 be32_to_cpu(r2->alloc.ar_startblock); 344 } else { 345 return be32_to_cpu(r1->alloc.ar_blockcount) < 346 be32_to_cpu(r2->alloc.ar_blockcount) || 347 (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount && 348 be32_to_cpu(r1->alloc.ar_startblock) < 349 be32_to_cpu(r2->alloc.ar_startblock)); 350 } 351} 352#endif /* DEBUG */ 353 354#ifdef XFS_BTREE_TRACE 355ktrace_t *xfs_allocbt_trace_buf; 356 357STATIC void 358xfs_allocbt_trace_enter( 359 struct xfs_btree_cur *cur, 360 const char *func, 361 char *s, 362 int type, 363 int line, 364 __psunsigned_t a0, 365 __psunsigned_t a1, 366 __psunsigned_t a2, 367 __psunsigned_t a3, 368 __psunsigned_t a4, 369 __psunsigned_t a5, 370 __psunsigned_t a6, 371 __psunsigned_t a7, 372 __psunsigned_t a8, 373 __psunsigned_t a9, 374 __psunsigned_t a10) 375{ 376 ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type, 377 (void *)func, (void *)s, NULL, (void *)cur, 378 (void *)a0, (void *)a1, (void *)a2, (void *)a3, 379 (void *)a4, (void *)a5, (void *)a6, (void *)a7, 380 (void *)a8, (void *)a9, (void *)a10); 381} 382 383STATIC void 384xfs_allocbt_trace_cursor( 385 struct xfs_btree_cur *cur, 386 __uint32_t *s0, 387 __uint64_t *l0, 388 __uint64_t *l1) 389{ 390 *s0 = cur->bc_private.a.agno; 391 *l0 = cur->bc_rec.a.ar_startblock; 392 *l1 = cur->bc_rec.a.ar_blockcount; 393} 394 395STATIC void 396xfs_allocbt_trace_key( 397 struct xfs_btree_cur *cur, 398 union xfs_btree_key *key, 399 __uint64_t *l0, 400 __uint64_t *l1) 401{ 402 *l0 = be32_to_cpu(key->alloc.ar_startblock); 403 *l1 = be32_to_cpu(key->alloc.ar_blockcount); 404} 405 406STATIC void 407xfs_allocbt_trace_record( 408 struct xfs_btree_cur *cur, 409 union xfs_btree_rec *rec, 410 __uint64_t *l0, 411 __uint64_t *l1, 412 __uint64_t *l2) 413{ 414 *l0 = be32_to_cpu(rec->alloc.ar_startblock); 415 *l1 = be32_to_cpu(rec->alloc.ar_blockcount); 416 *l2 = 0; 417} 418#endif /* XFS_BTREE_TRACE */ 419 420static const struct xfs_btree_ops xfs_allocbt_ops = { 421 .rec_len = sizeof(xfs_alloc_rec_t), 422 .key_len = sizeof(xfs_alloc_key_t), 423 424 .dup_cursor = xfs_allocbt_dup_cursor, 425 .set_root = xfs_allocbt_set_root, 426 .kill_root = xfs_allocbt_kill_root, 427 .alloc_block = xfs_allocbt_alloc_block, 428 .free_block = xfs_allocbt_free_block, 429 .update_lastrec = xfs_allocbt_update_lastrec, 430 .get_minrecs = xfs_allocbt_get_minrecs, 431 .get_maxrecs = xfs_allocbt_get_maxrecs, 432 .init_key_from_rec = xfs_allocbt_init_key_from_rec, 433 .init_rec_from_key = xfs_allocbt_init_rec_from_key, 434 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, 435 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, 436 .key_diff = xfs_allocbt_key_diff, 437 438#ifdef DEBUG 439 .keys_inorder = xfs_allocbt_keys_inorder, 440 .recs_inorder = xfs_allocbt_recs_inorder, 441#endif 442 443#ifdef XFS_BTREE_TRACE 444 .trace_enter = xfs_allocbt_trace_enter, 445 .trace_cursor = xfs_allocbt_trace_cursor, 446 .trace_key = xfs_allocbt_trace_key, 447 .trace_record = xfs_allocbt_trace_record, 448#endif 449}; 450 451/* 452 * Allocate a new allocation btree cursor. 453 */ 454struct xfs_btree_cur * /* new alloc btree cursor */ 455xfs_allocbt_init_cursor( 456 struct xfs_mount *mp, /* file system mount point */ 457 struct xfs_trans *tp, /* transaction pointer */ 458 struct xfs_buf *agbp, /* buffer for agf structure */ 459 xfs_agnumber_t agno, /* allocation group number */ 460 xfs_btnum_t btnum) /* btree identifier */ 461{ 462 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 463 struct xfs_btree_cur *cur; 464 465 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); 466 467 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); 468 469 cur->bc_tp = tp; 470 cur->bc_mp = mp; 471 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]); 472 cur->bc_btnum = btnum; 473 cur->bc_blocklog = mp->m_sb.sb_blocklog; 474 475 cur->bc_ops = &xfs_allocbt_ops; 476 if (btnum == XFS_BTNUM_CNT) 477 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; 478 479 cur->bc_private.a.agbp = agbp; 480 cur->bc_private.a.agno = agno; 481 482 return cur; 483} 484 485/* 486 * Calculate number of records in an alloc btree block. 487 */ 488int 489xfs_allocbt_maxrecs( 490 struct xfs_mount *mp, 491 int blocklen, 492 int leaf) 493{ 494 blocklen -= XFS_ALLOC_BLOCK_LEN(mp); 495 496 if (leaf) 497 return blocklen / sizeof(xfs_alloc_rec_t); 498 return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t)); 499} 500