1/* 2 * linux/fs/hfsplus/extents.c 3 * 4 * Copyright (C) 2001 5 * Brad Boyer (flar@allandria.com) 6 * (C) 2003 Ardis Technologies <roman@ardistech.com> 7 * 8 * Handling of Extents both in catalog and extents overflow trees 9 */ 10 11#include <linux/errno.h> 12#include <linux/fs.h> 13#include <linux/pagemap.h> 14 15#include "hfsplus_fs.h" 16#include "hfsplus_raw.h" 17 18/* Compare two extents keys, returns 0 on same, pos/neg for difference */ 19int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1, 20 const hfsplus_btree_key *k2) 21{ 22 __be32 k1id, k2id; 23 __be32 k1s, k2s; 24 25 k1id = k1->ext.cnid; 26 k2id = k2->ext.cnid; 27 if (k1id != k2id) 28 return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1; 29 30 if (k1->ext.fork_type != k2->ext.fork_type) 31 return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1; 32 33 k1s = k1->ext.start_block; 34 k2s = k2->ext.start_block; 35 if (k1s == k2s) 36 return 0; 37 return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1; 38} 39 40static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid, 41 u32 block, u8 type) 42{ 43 key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2); 44 key->ext.cnid = cpu_to_be32(cnid); 45 key->ext.start_block = cpu_to_be32(block); 46 key->ext.fork_type = type; 47 key->ext.pad = 0; 48} 49 50static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off) 51{ 52 int i; 53 u32 count; 54 55 for (i = 0; i < 8; ext++, i++) { 56 count = be32_to_cpu(ext->block_count); 57 if (off < count) 58 return be32_to_cpu(ext->start_block) + off; 59 off -= count; 60 } 61 /* panic? */ 62 return 0; 63} 64 65static int hfsplus_ext_block_count(struct hfsplus_extent *ext) 66{ 67 int i; 68 u32 count = 0; 69 70 for (i = 0; i < 8; ext++, i++) 71 count += be32_to_cpu(ext->block_count); 72 return count; 73} 74 75static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext) 76{ 77 int i; 78 79 ext += 7; 80 for (i = 0; i < 7; ext--, i++) 81 if (ext->block_count) 82 break; 83 return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count); 84} 85 86static void __hfsplus_ext_write_extent(hfsplus_handle_t *hfsplus_handle, struct inode *inode, struct hfs_find_data *fd) 87{ 88 int res; 89 90 hfsplus_ext_build_key(fd->search_key, inode->i_ino, HFSPLUS_I(inode).cached_start, 91 HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); 92 res = hfs_brec_find(hfsplus_handle, fd); 93 if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_NEW) { 94 if (res != -ENOENT) 95 return; 96 hfs_brec_insert(hfsplus_handle, fd, HFSPLUS_I(inode).cached_extents, sizeof(hfsplus_extent_rec)); 97 HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); 98 } else { 99 if (res) 100 return; 101 hfs_bnode_write(hfsplus_handle, fd->bnode, HFSPLUS_I(inode).cached_extents, fd->entryoffset, fd->entrylength); 102 HFSPLUS_I(inode).flags &= ~HFSPLUS_FLG_EXT_DIRTY; 103 } 104} 105 106void hfsplus_ext_write_extent(hfsplus_handle_t *hfsplus_handle, struct inode *inode) 107{ 108 if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) { 109 struct hfs_find_data fd; 110 111 hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd); 112 __hfsplus_ext_write_extent(hfsplus_handle, inode, &fd); 113 hfs_find_exit(hfsplus_handle, &fd); 114 } 115} 116 117static inline int __hfsplus_ext_read_extent(hfsplus_handle_t *hfsplus_handle, struct hfs_find_data *fd, 118 struct hfsplus_extent *extent, 119 u32 cnid, u32 block, u8 type) 120{ 121 int res; 122 123 hfsplus_ext_build_key(fd->search_key, cnid, block, type); 124 fd->key->ext.cnid = 0; 125 res = hfs_brec_find(hfsplus_handle, fd); 126 if (res && res != -ENOENT) 127 return res; 128 if (fd->key->ext.cnid != fd->search_key->ext.cnid || 129 fd->key->ext.fork_type != fd->search_key->ext.fork_type) 130 return -ENOENT; 131 if (fd->entrylength != sizeof(hfsplus_extent_rec)) 132 return -EIO; 133 hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfsplus_extent_rec)); 134 return 0; 135} 136 137static inline int __hfsplus_ext_cache_extent(hfsplus_handle_t *hfsplus_handle, struct hfs_find_data *fd, struct inode *inode, u32 block) 138{ 139 int res; 140 141 if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) 142 __hfsplus_ext_write_extent(hfsplus_handle, inode, fd); 143 144 res = __hfsplus_ext_read_extent(hfsplus_handle, fd, HFSPLUS_I(inode).cached_extents, inode->i_ino, 145 block, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); 146 if (!res) { 147 HFSPLUS_I(inode).cached_start = be32_to_cpu(fd->key->ext.start_block); 148 HFSPLUS_I(inode).cached_blocks = hfsplus_ext_block_count(HFSPLUS_I(inode).cached_extents); 149 } else { 150 HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0; 151 HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); 152 } 153 return res; 154} 155 156static int hfsplus_ext_read_extent(hfsplus_handle_t *hfsplus_handle, struct inode *inode, u32 block) 157{ 158 struct hfs_find_data fd; 159 int res; 160 161 if (block >= HFSPLUS_I(inode).cached_start && 162 block < HFSPLUS_I(inode).cached_start + HFSPLUS_I(inode).cached_blocks) 163 return 0; 164 165 hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd); 166 res = __hfsplus_ext_cache_extent(hfsplus_handle, &fd, inode, block); 167 hfs_find_exit(hfsplus_handle, &fd); 168 return res; 169} 170 171/* Get a block at iblock for inode, possibly allocating if create */ 172int hfsplus_get_block(struct inode *inode, sector_t iblock, 173 struct buffer_head *bh_result, int create) 174{ 175 struct super_block *sb; 176 int res = -EIO; 177 u32 ablock, dblock, mask; 178 int shift; 179 hfsplus_handle_t *hfsplus_handle, tmp_hfsplus_handle; 180 181 tmp_hfsplus_handle.journaled = !HFSPLUS_JOURNAL_PRESENT; 182 tmp_hfsplus_handle.handle = NULL; 183 184 sb = inode->i_sb; 185 186 /* Journal device */ 187 if (HFSPLUS_SB(sb).jnl.journaled == HFSPLUS_JOURNAL_PRESENT) { 188 /* Write Metadata */ 189 if (((inode->i_mapping->a_ops == &hfsplus_journalled_btree_aops) || 190 (inode->i_mapping->a_ops == &hfsplus_journalled_aops)) && create) { 191 hfsplus_handle = hfsplus_jbd_current_handle(); 192 if (hfsplus_handle == NULL) { 193 printk("hfsplus_handle is NULL\n"); 194 hfsplus_handle = &tmp_hfsplus_handle; 195 } 196 } 197 else { 198 hfsplus_handle = &tmp_hfsplus_handle; 199 } 200 } 201 /* Non-journal device */ 202 else { 203 hfsplus_handle = &tmp_hfsplus_handle; 204 } 205 206 /* Convert inode block to disk allocation block */ 207 shift = HFSPLUS_SB(sb).alloc_blksz_shift - sb->s_blocksize_bits; 208 ablock = iblock >> HFSPLUS_SB(sb).fs_shift; 209 210 if (iblock >= HFSPLUS_I(inode).fs_blocks) { 211 if (iblock > HFSPLUS_I(inode).fs_blocks || !create) { 212 return -EIO; 213 } 214 if (ablock >= HFSPLUS_I(inode).alloc_blocks) { 215 res = hfsplus_file_extend(hfsplus_handle, inode); 216 if (res) { 217 return res; 218 } 219 } 220 } else 221 create = 0; 222 223 if (ablock < HFSPLUS_I(inode).first_blocks) { 224 dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).first_extents, ablock); 225 goto done; 226 } 227 228 down(&HFSPLUS_I(inode).extents_lock); 229 res = hfsplus_ext_read_extent(hfsplus_handle, inode, ablock); 230 if (!res) { 231 dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock - 232 HFSPLUS_I(inode).cached_start); 233 } else { 234 up(&HFSPLUS_I(inode).extents_lock); 235 return -EIO; 236 } 237 up(&HFSPLUS_I(inode).extents_lock); 238 239done: 240 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); 241 mask = (1 << HFSPLUS_SB(sb).fs_shift) - 1; 242 map_bh(bh_result, sb, (dblock << HFSPLUS_SB(sb).fs_shift) + HFSPLUS_SB(sb).blockoffset + (iblock & mask)); 243 if (create) { 244 set_buffer_new(bh_result); 245 HFSPLUS_I(inode).phys_size += sb->s_blocksize; 246 HFSPLUS_I(inode).fs_blocks++; 247 inode_add_bytes(inode, sb->s_blocksize); 248 if (hfsplus_journalled_mark_inode_dirty(__FUNCTION__, hfsplus_handle, inode)) { 249 printk("HFS+-fs: Error in %s()\n", __FUNCTION__); 250 return -1; 251 } 252 } 253 return 0; 254} 255 256static void hfsplus_dump_extent(struct hfsplus_extent *extent) 257{ 258 int i; 259 260 dprint(DBG_EXTENT, " "); 261 for (i = 0; i < 8; i++) 262 dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block), 263 be32_to_cpu(extent[i].block_count)); 264 dprint(DBG_EXTENT, "\n"); 265} 266 267static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset, 268 u32 alloc_block, u32 block_count) 269{ 270 u32 count, start; 271 int i; 272 273 hfsplus_dump_extent(extent); 274 for (i = 0; i < 8; extent++, i++) { 275 count = be32_to_cpu(extent->block_count); 276 if (offset == count) { 277 start = be32_to_cpu(extent->start_block); 278 if (alloc_block != start + count) { 279 if (++i >= 8) 280 return -ENOSPC; 281 extent++; 282 extent->start_block = cpu_to_be32(alloc_block); 283 } else 284 block_count += count; 285 extent->block_count = cpu_to_be32(block_count); 286 return 0; 287 } else if (offset < count) 288 break; 289 offset -= count; 290 } 291 /* panic? */ 292 return -EIO; 293} 294 295static int hfsplus_free_extents(hfsplus_handle_t *hfsplus_handle, struct super_block *sb, 296 struct hfsplus_extent *extent, 297 u32 offset, u32 block_nr) 298{ 299 u32 count, start; 300 int i; 301 302 hfsplus_dump_extent(extent); 303 for (i = 0; i < 8; extent++, i++) { 304 count = be32_to_cpu(extent->block_count); 305 if (offset == count) 306 goto found; 307 else if (offset < count) 308 break; 309 offset -= count; 310 } 311 /* panic? */ 312 printk("#### Return EIO from %s()\n", __FUNCTION__); 313 return -EIO; 314found: 315 for (;;) { 316 start = be32_to_cpu(extent->start_block); 317 if (count <= block_nr) { 318 hfsplus_block_free(hfsplus_handle, sb, start, count); 319 extent->block_count = 0; 320 extent->start_block = 0; 321 block_nr -= count; 322 } else { 323 count -= block_nr; 324 hfsplus_block_free(hfsplus_handle, sb, start + count, block_nr); 325 extent->block_count = cpu_to_be32(count); 326 block_nr = 0; 327 } 328 if (!block_nr || !i) 329 return 0; 330 i--; 331 extent--; 332 count = be32_to_cpu(extent->block_count); 333 } 334} 335 336int hfsplus_free_fork(hfsplus_handle_t *hfsplus_handle, struct super_block *sb, u32 cnid, struct hfsplus_fork_raw *fork, int type) 337{ 338 struct hfs_find_data fd; 339 hfsplus_extent_rec ext_entry; 340 u32 total_blocks, blocks, start; 341 int res, i; 342 343 total_blocks = be32_to_cpu(fork->total_blocks); 344 if (!total_blocks) 345 return 0; 346 347 blocks = 0; 348 for (i = 0; i < 8; i++) 349 blocks += be32_to_cpu(fork->extents[i].block_count); 350 351 res = hfsplus_free_extents(hfsplus_handle, sb, fork->extents, blocks, blocks); 352 if (res) 353 return res; 354 if (total_blocks == blocks) 355 return 0; 356 357 hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); 358 do { 359 res = __hfsplus_ext_read_extent(hfsplus_handle, &fd, ext_entry, cnid, 360 total_blocks, type); 361 if (res) 362 break; 363 start = be32_to_cpu(fd.key->ext.start_block); 364 hfsplus_free_extents(hfsplus_handle, sb, ext_entry, 365 total_blocks - start, 366 total_blocks); 367 hfs_brec_remove(hfsplus_handle, &fd); 368 total_blocks = start; 369 } while (total_blocks > blocks); 370 hfs_find_exit(hfsplus_handle, &fd); 371 372 return res; 373} 374 375int hfsplus_file_extend(hfsplus_handle_t *hfsplus_handle, struct inode *inode) 376{ 377 struct super_block *sb = inode->i_sb; 378 u32 start, len, goal; 379 int res; 380 381 if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) { 382 // extend alloc file 383 printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8, 384 HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks); 385 return -ENOSPC; 386 } 387 388 down(&HFSPLUS_I(inode).extents_lock); 389 if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks) 390 goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents); 391 else { 392 res = hfsplus_ext_read_extent(hfsplus_handle, inode, HFSPLUS_I(inode).alloc_blocks); 393 if (res) 394 goto out; 395 goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).cached_extents); 396 } 397 398 len = HFSPLUS_I(inode).clump_blocks; 399 start = hfsplus_block_allocate(hfsplus_handle, sb, HFSPLUS_SB(sb).total_blocks, goal, &len); 400 if (start >= HFSPLUS_SB(sb).total_blocks) { 401 start = hfsplus_block_allocate(hfsplus_handle, sb, goal, 0, &len); 402 if (start >= goal) { 403 res = -ENOSPC; 404 goto out; 405 } 406 } 407 408 dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 409 if (HFSPLUS_I(inode).alloc_blocks <= HFSPLUS_I(inode).first_blocks) { 410 if (!HFSPLUS_I(inode).first_blocks) { 411 dprint(DBG_EXTENT, "first extents\n"); 412 /* no extents yet */ 413 HFSPLUS_I(inode).first_extents[0].start_block = cpu_to_be32(start); 414 HFSPLUS_I(inode).first_extents[0].block_count = cpu_to_be32(len); 415 res = 0; 416 } else { 417 /* try to append to extents in inode */ 418 res = hfsplus_add_extent(HFSPLUS_I(inode).first_extents, 419 HFSPLUS_I(inode).alloc_blocks, 420 start, len); 421 if (res == -ENOSPC) 422 goto insert_extent; 423 } 424 if (!res) { 425 hfsplus_dump_extent(HFSPLUS_I(inode).first_extents); 426 HFSPLUS_I(inode).first_blocks += len; 427 } 428 } else { 429 res = hfsplus_add_extent(HFSPLUS_I(inode).cached_extents, 430 HFSPLUS_I(inode).alloc_blocks - 431 HFSPLUS_I(inode).cached_start, 432 start, len); 433 if (!res) { 434 hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); 435 HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY; 436 HFSPLUS_I(inode).cached_blocks += len; 437 } else if (res == -ENOSPC) 438 goto insert_extent; 439 } 440out: 441 up(&HFSPLUS_I(inode).extents_lock); 442 if (!res) { 443 HFSPLUS_I(inode).alloc_blocks += len; 444 res = hfsplus_journalled_mark_inode_dirty(__FUNCTION__, hfsplus_handle, inode); 445 } 446 return res; 447 448insert_extent: 449 dprint(DBG_EXTENT, "insert new extent\n"); 450 hfsplus_ext_write_extent(hfsplus_handle, inode); 451 452 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); 453 HFSPLUS_I(inode).cached_extents[0].start_block = cpu_to_be32(start); 454 HFSPLUS_I(inode).cached_extents[0].block_count = cpu_to_be32(len); 455 hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); 456 HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW; 457 HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).alloc_blocks; 458 HFSPLUS_I(inode).cached_blocks = len; 459 460 res = 0; 461 goto out; 462} 463 464void hfsplus_file_truncate(struct inode *inode) 465{ 466 struct super_block *sb = inode->i_sb; 467 struct hfs_find_data fd; 468 u32 alloc_cnt, blk_cnt, start; 469 int res; 470 hfsplus_handle_t hfsplus_handle; 471 472 dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino, 473 (long long)HFSPLUS_I(inode).phys_size, inode->i_size); 474 475 if (inode->i_size > HFSPLUS_I(inode).phys_size) { 476 struct address_space *mapping = inode->i_mapping; 477 struct page *page; 478 u32 size = inode->i_size - 1; 479 int res; 480 481 page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT); 482 if (!page) 483 return; 484 size &= PAGE_CACHE_SIZE - 1; 485 size++; 486 res = mapping->a_ops->prepare_write(NULL, page, size, size); 487 if (!res) 488 res = mapping->a_ops->commit_write(NULL, page, size, size); 489 if (res) 490 inode->i_size = HFSPLUS_I(inode).phys_size; 491 unlock_page(page); 492 page_cache_release(page); 493 if (hfsplus_journal_start(__FUNCTION__, sb, &hfsplus_handle)) 494 return; 495 496 hfsplus_journalled_mark_inode_dirty(__FUNCTION__, &hfsplus_handle, inode); 497 hfsplus_journal_stop(&hfsplus_handle); 498 return; 499 } else if (inode->i_size == HFSPLUS_I(inode).phys_size) 500 return; 501 502 if (hfsplus_journal_start(__FUNCTION__, sb, &hfsplus_handle)) 503 return; 504 505 blk_cnt = (inode->i_size + HFSPLUS_SB(sb).alloc_blksz - 1) >> HFSPLUS_SB(sb).alloc_blksz_shift; 506 alloc_cnt = HFSPLUS_I(inode).alloc_blocks; 507 if (blk_cnt == alloc_cnt) 508 goto out; 509 510 down(&HFSPLUS_I(inode).extents_lock); 511 hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); 512 while (1) { 513 if (alloc_cnt == HFSPLUS_I(inode).first_blocks) { 514 hfsplus_free_extents(&hfsplus_handle, sb, HFSPLUS_I(inode).first_extents, 515 alloc_cnt, alloc_cnt - blk_cnt); 516 hfsplus_dump_extent(HFSPLUS_I(inode).first_extents); 517 HFSPLUS_I(inode).first_blocks = blk_cnt; 518 break; 519 } 520 res = __hfsplus_ext_cache_extent(&hfsplus_handle, &fd, inode, alloc_cnt); 521 if (res) 522 break; 523 start = HFSPLUS_I(inode).cached_start; 524 hfsplus_free_extents(&hfsplus_handle, sb, HFSPLUS_I(inode).cached_extents, 525 alloc_cnt - start, alloc_cnt - blk_cnt); 526 hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); 527 if (blk_cnt > start) { 528 HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY; 529 break; 530 } 531 alloc_cnt = start; 532 HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0; 533 HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); 534 hfs_brec_remove(&hfsplus_handle, &fd); 535 } 536 hfs_find_exit(&hfsplus_handle, &fd); 537 up(&HFSPLUS_I(inode).extents_lock); 538 539 HFSPLUS_I(inode).alloc_blocks = blk_cnt; 540out: 541 HFSPLUS_I(inode).phys_size = inode->i_size; 542 HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 543 inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits); 544 /* Foxconn added start pling 05/31/2010 */ 545 /* Set the i_blocks field properly */ 546 inode->i_blocks = inode->i_size/512; 547 if (inode->i_size % 512) 548 inode->i_blocks++; 549 /* Foxconn added end pling 05/31/2010 */ 550 hfsplus_journalled_mark_inode_dirty(__FUNCTION__, &hfsplus_handle, inode); 551 hfsplus_journal_stop(&hfsplus_handle); 552} 553