• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/fs/squashfs/

Lines Matching refs:msblk

120 	struct squashfs_sb_info *msblk = s->s_fs_info;
127 if (msblk->devblksize - *offset == 1) {
128 if (msblk->swap)
137 if (msblk->swap)
146 if (msblk->swap) {
161 if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
162 if (*offset == msblk->devblksize) {
188 struct squashfs_sb_info *msblk = s->s_fs_info;
189 struct squashfs_super_block *sblk = &msblk->sblk;
191 msblk->devblksize_log2) + 2];
192 unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
193 unsigned int cur_index = index >> msblk->devblksize_log2;
199 bytes = msblk->devblksize - offset;
215 bytes += msblk->devblksize;
226 bytes = msblk->devblksize - offset;
239 bytes += msblk->devblksize;
249 mutex_lock(&msblk->read_data_mutex);
262 avail_bytes = msblk->devblksize - offset;
281 avail_bytes = msblk->devblksize - offset;
288 mutex_unlock(&msblk->read_data_mutex);
300 avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
301 msblk->devblksize - offset :
312 (SQUASHFS_CHECK_DATA(msblk->sblk.flags)
316 mutex_unlock(&msblk->read_data_mutex);
333 struct squashfs_sb_info *msblk = s->s_fs_info;
341 if (msblk->block_cache[i].block == block)
344 mutex_lock(&msblk->block_cache_mutex);
348 for (i = msblk->next_cache, n = SQUASHFS_CACHED_BLKS;
351 if (msblk->block_cache[i].block !=
359 add_wait_queue(&msblk->waitq, &wait);
361 mutex_unlock(&msblk->block_cache_mutex);
364 remove_wait_queue(&msblk->waitq, &wait);
367 msblk->next_cache = (i + 1) % SQUASHFS_CACHED_BLKS;
369 if (msblk->block_cache[i].block ==
371 if (!(msblk->block_cache[i].data =
376 mutex_unlock(&msblk->block_cache_mutex);
381 msblk->block_cache[i].block = SQUASHFS_USED_BLK;
382 mutex_unlock(&msblk->block_cache_mutex);
384 msblk->block_cache[i].length = squashfs_read_data(s,
385 msblk->block_cache[i].data, block, 0, &next_index, SQUASHFS_METADATA_SIZE);
386 if (msblk->block_cache[i].length == 0) {
389 mutex_lock(&msblk->block_cache_mutex);
390 msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
391 kfree(msblk->block_cache[i].data);
392 wake_up(&msblk->waitq);
393 mutex_unlock(&msblk->block_cache_mutex);
397 mutex_lock(&msblk->block_cache_mutex);
398 wake_up(&msblk->waitq);
399 msblk->block_cache[i].block = block;
400 msblk->block_cache[i].next_index = next_index;
404 if (msblk->block_cache[i].block != block) {
405 mutex_unlock(&msblk->block_cache_mutex);
409 bytes = msblk->block_cache[i].length - offset;
412 mutex_unlock(&msblk->block_cache_mutex);
416 memcpy(buffer, msblk->block_cache[i].data +
418 if (msblk->block_cache[i].length - offset == length) {
419 *next_block = msblk->block_cache[i].next_index;
425 mutex_unlock(&msblk->block_cache_mutex);
429 memcpy(buffer, msblk->block_cache[i].data +
433 block = msblk->block_cache[i].next_index;
434 mutex_unlock(&msblk->block_cache_mutex);
451 struct squashfs_sb_info *msblk = s->s_fs_info;
453 msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
457 if (msblk->swap) {
483 SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, struct
486 mutex_lock(&msblk->fragment_mutex);
488 wake_up(&msblk->fragment_wait_queue);
489 mutex_unlock(&msblk->fragment_mutex);
498 struct squashfs_sb_info *msblk = s->s_fs_info;
499 struct squashfs_super_block *sblk = &msblk->sblk;
502 mutex_lock(&msblk->fragment_mutex);
505 msblk->fragment[i].block != start_block; i++);
508 for (i = msblk->next_fragment, n =
510 msblk->fragment[i].locked; n--, i = (i + 1) %
517 add_wait_queue(&msblk->fragment_wait_queue,
520 mutex_unlock(&msblk->fragment_mutex);
523 remove_wait_queue(&msblk->fragment_wait_queue,
527 msblk->next_fragment = (msblk->next_fragment + 1) %
530 if (msblk->fragment[i].data == NULL)
531 if (!(msblk->fragment[i].data = SQUASHFS_ALLOC
535 mutex_unlock(&msblk->fragment_mutex);
539 msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
540 msblk->fragment[i].locked = 1;
541 mutex_unlock(&msblk->fragment_mutex);
543 if (!(msblk->fragment[i].length = squashfs_read_data(s,
544 msblk->fragment[i].data,
548 msblk->fragment[i].locked = 0;
553 mutex_lock(&msblk->fragment_mutex);
554 msblk->fragment[i].block = start_block;
556 i, msblk->fragment[i].block,
557 msblk->fragment[i].locked);
558 mutex_unlock(&msblk->fragment_mutex);
562 msblk->fragment[i].locked++;
563 mutex_unlock(&msblk->fragment_mutex);
565 msblk->fragment[i].block,
566 msblk->fragment[i].locked);
570 return &msblk->fragment[i];
577 static void squashfs_new_inode(struct squashfs_sb_info *msblk, struct inode *i,
584 i->i_uid = msblk->uid[inodeb->uid];
590 i->i_gid = msblk->guid[inodeb->guid];
596 struct squashfs_sb_info *msblk = s->s_fs_info;
597 long long start = msblk->inode_lookup_table[SQUASHFS_LOOKUP_BLOCK(ino - 1)];
603 if (msblk->swap) {
625 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
631 (msblk->read_inode)(i, inode);
659 struct squashfs_sb_info *msblk = s->s_fs_info;
665 (msblk->read_inode)(i, inode);
676 struct squashfs_sb_info *msblk = s->s_fs_info;
677 struct squashfs_super_block *sblk = &msblk->sblk;
689 if (msblk->swap) {
702 squashfs_new_inode(msblk, i, inodeb);
711 if (msblk->swap) {
760 if (msblk->swap) {
807 if (msblk->swap) {
841 if (msblk->swap) {
882 if (msblk->swap) {
916 if (msblk->swap) {
947 if (msblk->swap) {
986 struct squashfs_sb_info *msblk = s->s_fs_info;
987 struct squashfs_super_block *sblk = &msblk->sblk;
993 if (!(msblk->inode_lookup_table = kmalloc(length, GFP_KERNEL))) {
998 if (!squashfs_read_data(s, (char *) msblk->inode_lookup_table,
1005 if (msblk->swap) {
1011 &msblk->inode_lookup_table[i], 1);
1012 msblk->inode_lookup_table[i] = block;
1022 struct squashfs_sb_info *msblk = s->s_fs_info;
1023 struct squashfs_super_block *sblk = &msblk->sblk;
1030 if (!(msblk->fragment_index = kmalloc(length, GFP_KERNEL))) {
1035 if (!squashfs_read_data(s, (char *) msblk->fragment_index,
1042 if (msblk->swap) {
1048 &msblk->fragment_index[i], 1);
1049 msblk->fragment_index[i] = fragment;
1057 static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
1059 struct squashfs_super_block *sblk = &msblk->sblk;
1061 msblk->read_inode = squashfs_read_inode;
1062 msblk->read_blocklist = read_blocklist;
1063 msblk->read_fragment_index_table = read_fragment_index_table;
1066 if (!squashfs_1_0_supported(msblk)) {
1074 if (!squashfs_2_0_supported(msblk)) {
1095 struct squashfs_sb_info *msblk;
1110 msblk = s->s_fs_info;
1111 sblk = &msblk->sblk;
1113 msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
1114 msblk->devblksize_log2 = ffz(~msblk->devblksize);
1116 mutex_init(&msblk->read_data_mutex);
1117 mutex_init(&msblk->read_page_mutex);
1118 mutex_init(&msblk->block_cache_mutex);
1119 mutex_init(&msblk->fragment_mutex);
1120 mutex_init(&msblk->meta_index_mutex);
1122 init_waitqueue_head(&msblk->waitq);
1123 init_waitqueue_head(&msblk->fragment_wait_queue);
1135 msblk->swap = 0;
1144 msblk->swap = 1;
1155 if(!supported_squashfs_filesystem(msblk, silent))
1195 if (!(msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
1202 msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
1204 msblk->next_cache = 0;
1207 if (!(msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL))) {
1213 if (!(msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
1218 msblk->guid = msblk->uid + sblk->no_uids;
1220 if (msblk->swap) {
1231 SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
1234 if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
1243 if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
1246 if (!(msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) *
1253 msblk->fragment[i].locked = 0;
1254 msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
1255 msblk->fragment[i].data = NULL;
1258 msblk->next_fragment = 0;
1261 if (msblk->read_fragment_index_table(s) == 0)
1276 if ((msblk->read_inode)(root, sblk->root_inode) == 0)
1290 if(msblk->inode_lookup_table)
1291 kfree(msblk->inode_lookup_table);
1292 if (msblk->fragment_index)
1293 kfree(msblk->fragment_index);
1294 if(msblk->fragment) {
1296 if(msblk->fragment[i].data)
1297 SQUASHFS_FREE(msblk->fragment[i].data);
1298 kfree(msblk->fragment);
1300 if(msblk->uid)
1301 kfree(msblk->uid);
1302 if(msblk->read_page)
1303 kfree(msblk->read_page);
1304 if(msblk->block_cache) {
1306 if (msblk->block_cache[i].data)
1307 kfree(msblk->block_cache[i].data);
1308 kfree(msblk->block_cache);
1310 if(msblk->fragment_index_2)
1311 kfree(msblk->fragment_index_2);
1324 struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info;
1325 struct squashfs_super_block *sblk = &msblk->sblk;
1391 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1394 mutex_lock(&msblk->meta_index_mutex);
1398 if(msblk->meta_index == NULL)
1402 if (msblk->meta_index[i].inode_number == inode->i_ino &&
1403 msblk->meta_index[i].offset >= offset &&
1404 msblk->meta_index[i].offset <= index &&
1405 msblk->meta_index[i].locked == 0) {
1407 msblk->meta_index[i].offset);
1408 meta = &msblk->meta_index[i];
1416 mutex_unlock(&msblk->meta_index_mutex);
1424 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1428 mutex_lock(&msblk->meta_index_mutex);
1432 if(msblk->meta_index == NULL) {
1433 if (!(msblk->meta_index = kmalloc(sizeof(struct meta_index) *
1439 msblk->meta_index[i].inode_number = 0;
1440 msblk->meta_index[i].locked = 0;
1442 msblk->next_meta_index = 0;
1446 msblk->meta_index[msblk->next_meta_index].locked; i --)
1447 msblk->next_meta_index = (msblk->next_meta_index + 1) %
1456 msblk->next_meta_index,
1457 &msblk->meta_index[msblk->next_meta_index]);
1459 meta = &msblk->meta_index[msblk->next_meta_index];
1460 msblk->next_meta_index = (msblk->next_meta_index + 1) %
1470 mutex_unlock(&msblk->meta_index_mutex);
1485 struct squashfs_sb_info *msblk = s->s_fs_info;
1489 if (msblk->swap) {
1531 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1532 struct squashfs_super_block *sblk = &msblk->sblk;
1657 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1658 struct squashfs_super_block *sblk = &msblk->sblk;
1665 char *data_ptr = msblk->read_page;
1687 if ((block = (msblk->read_blocklist)(inode, index, 1,
1691 mutex_lock(&msblk->read_page_mutex);
1693 if (!(bytes = squashfs_read_data(inode->i_sb, msblk->read_page,
1697 mutex_unlock(&msblk->read_page_mutex);
1753 mutex_unlock(&msblk->read_page_mutex);
1755 release_cached_fragment(msblk, fragment);
1776 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1777 struct squashfs_super_block *sblk = &msblk->sblk;
1801 block = (msblk->read_blocklist)(inode, page->index, 1,
1806 mutex_lock(&msblk->read_page_mutex);
1807 bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
1811 memcpy(pageaddr, msblk->read_page, bytes);
1816 mutex_unlock(&msblk->read_page_mutex);
1829 release_cached_fragment(msblk, fragment);
1856 struct squashfs_sb_info *msblk = s->s_fs_info;
1857 struct squashfs_super_block *sblk = &msblk->sblk;
1869 if (msblk->swap) {
1906 struct squashfs_sb_info *msblk = s->s_fs_info;
1907 struct squashfs_super_block *sblk = &msblk->sblk;
1925 if (msblk->swap) {
1961 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
1962 struct squashfs_super_block *sblk = &msblk->sblk;
2013 if (msblk->swap) {
2034 if (msblk->swap) {
2104 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
2105 struct squashfs_super_block *sblk = &msblk->sblk;
2132 if (msblk->swap) {
2152 if (msblk->swap) {