1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
5 * Phillip Lougher <phillip@lougher.org.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * inode.c
22 */
23
24#include <linux/squashfs_fs.h>
25#include <linux/module.h>
26#include <linux/fs.h>
27#include <linux/squashfs_fs_sb.h>
28#include <linux/squashfs_fs_i.h>
29#include <linux/buffer_head.h>
30#include <linux/vfs.h>
31#include <linux/vmalloc.h>
32#include <linux/smp_lock.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35
36#include "squashfs.h"
37#include "sqlzma.h"
38
39unsigned char *sqread_data;
40
41static void vfs_read_inode(struct inode *i);
42static struct dentry *squashfs_get_parent(struct dentry *child);
43static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode);
44static int squashfs_statfs(struct dentry *, struct kstatfs *);
45static int squashfs_symlink_readpage(struct file *file, struct page *page);
46static long long read_blocklist(struct inode *inode, int index,
47				int readahead_blks, char *block_list,
48				unsigned short **block_p, unsigned int *bsize);
49static int squashfs_readpage(struct file *file, struct page *page);
50static int squashfs_readpage4K(struct file *file, struct page *page);
51static int squashfs_readdir(struct file *, void *, filldir_t);
52static struct dentry *squashfs_lookup(struct inode *, struct dentry *,
53				struct nameidata *);
54static int squashfs_remount(struct super_block *s, int *flags, char *data);
55static void squashfs_put_super(struct super_block *);
56static int squashfs_get_sb(struct file_system_type *,int, const char *, void *,
57				struct vfsmount *);
58static struct inode *squashfs_alloc_inode(struct super_block *sb);
59static void squashfs_destroy_inode(struct inode *inode);
60static int init_inodecache(void);
61static void destroy_inodecache(void);
62
63static struct file_system_type squashfs_fs_type = {
64	.owner = THIS_MODULE,
65	.name = "squashfs",
66	.get_sb = squashfs_get_sb,
67	.kill_sb = kill_block_super,
68	.fs_flags = FS_REQUIRES_DEV
69};
70
71static const unsigned char squashfs_filetype_table[] = {
72	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
73};
74
75static struct super_operations squashfs_super_ops = {
76	.alloc_inode = squashfs_alloc_inode,
77	.destroy_inode = squashfs_destroy_inode,
78	.statfs = squashfs_statfs,
79	.put_super = squashfs_put_super,
80	.remount_fs = squashfs_remount
81};
82
83static struct super_operations squashfs_export_super_ops = {
84	.alloc_inode = squashfs_alloc_inode,
85	.destroy_inode = squashfs_destroy_inode,
86	.statfs = squashfs_statfs,
87	.put_super = squashfs_put_super,
88	.read_inode = vfs_read_inode
89};
90
91static struct export_operations squashfs_export_ops = {
92	.get_parent = squashfs_get_parent
93};
94
95SQSH_EXTERN const struct address_space_operations squashfs_symlink_aops = {
96	.readpage = squashfs_symlink_readpage
97};
98
99SQSH_EXTERN const struct address_space_operations squashfs_aops = {
100	.readpage = squashfs_readpage
101};
102
103SQSH_EXTERN const struct address_space_operations squashfs_aops_4K = {
104	.readpage = squashfs_readpage4K
105};
106
107static const struct file_operations squashfs_dir_ops = {
108	.read = generic_read_dir,
109	.readdir = squashfs_readdir
110};
111
112SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = {
113	.lookup = squashfs_lookup
114};
115
116
117static struct buffer_head *get_block_length(struct super_block *s,
118				int *cur_index, int *offset, int *c_byte)
119{
120	struct squashfs_sb_info *msblk = s->s_fs_info;
121	unsigned short temp;
122	struct buffer_head *bh;
123
124	if (!(bh = sb_bread(s, *cur_index)))
125		goto out;
126
127	if (msblk->devblksize - *offset == 1) {
128		if (msblk->swap)
129			((unsigned char *) &temp)[1] = *((unsigned char *)
130				(bh->b_data + *offset));
131		else
132			((unsigned char *) &temp)[0] = *((unsigned char *)
133				(bh->b_data + *offset));
134		brelse(bh);
135		if (!(bh = sb_bread(s, ++(*cur_index))))
136			goto out;
137		if (msblk->swap)
138			((unsigned char *) &temp)[0] = *((unsigned char *)
139				bh->b_data);
140		else
141			((unsigned char *) &temp)[1] = *((unsigned char *)
142				bh->b_data);
143		*c_byte = temp;
144		*offset = 1;
145	} else {
146		if (msblk->swap) {
147			((unsigned char *) &temp)[1] = *((unsigned char *)
148				(bh->b_data + *offset));
149			((unsigned char *) &temp)[0] = *((unsigned char *)
150				(bh->b_data + *offset + 1));
151		} else {
152			((unsigned char *) &temp)[0] = *((unsigned char *)
153				(bh->b_data + *offset));
154			((unsigned char *) &temp)[1] = *((unsigned char *)
155				(bh->b_data + *offset + 1));
156		}
157		*c_byte = temp;
158		*offset += 2;
159	}
160
161	if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
162		if (*offset == msblk->devblksize) {
163			brelse(bh);
164			if (!(bh = sb_bread(s, ++(*cur_index))))
165				goto out;
166			*offset = 0;
167		}
168		if (*((unsigned char *) (bh->b_data + *offset)) !=
169						SQUASHFS_MARKER_BYTE) {
170			ERROR("Metadata block marker corrupt @ %x\n",
171						*cur_index);
172			brelse(bh);
173			goto out;
174		}
175		(*offset)++;
176	}
177	return bh;
178
179out:
180	return NULL;
181}
182
183
184SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer,
185			long long index, unsigned int length,
186			long long *next_index, int srclength)
187{
188	struct squashfs_sb_info *msblk = s->s_fs_info;
189	struct squashfs_super_block *sblk = &msblk->sblk;
190	struct buffer_head *bh[((SQUASHFS_FILE_MAX_SIZE - 1) >>
191			msblk->devblksize_log2) + 2];
192	unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
193	unsigned int cur_index = index >> msblk->devblksize_log2;
194	int bytes, avail_bytes, b = 0, k = 0;
195	unsigned int compressed;
196	unsigned int c_byte = length;
197
198	if (c_byte) {
199		bytes = msblk->devblksize - offset;
200		compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte);
201		c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
202
203		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", index, compressed
204					? "" : "un", (unsigned int) c_byte, srclength);
205
206		if (c_byte > srclength || index < 0 || (index + c_byte) > sblk->bytes_used)
207			goto read_failure;
208
209		if (!(bh[0] = sb_getblk(s, cur_index)))
210			goto block_release;
211
212		for (b = 1; bytes < c_byte; b++) {
213			if (!(bh[b] = sb_getblk(s, ++cur_index)))
214				goto block_release;
215			bytes += msblk->devblksize;
216		}
217		ll_rw_block(READ, b, bh);
218	} else {
219		if (index < 0 || (index + 2) > sblk->bytes_used)
220			goto read_failure;
221
222		if (!(bh[0] = get_block_length(s, &cur_index, &offset,
223								&c_byte)))
224			goto read_failure;
225
226		bytes = msblk->devblksize - offset;
227		compressed = SQUASHFS_COMPRESSED(c_byte);
228		c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
229
230		TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
231					? "" : "un", (unsigned int) c_byte);
232
233		if (c_byte > srclength || (index + c_byte) > sblk->bytes_used)
234			goto read_failure;
235
236		for (b = 1; bytes < c_byte; b++) {
237			if (!(bh[b] = sb_getblk(s, ++cur_index)))
238				goto block_release;
239			bytes += msblk->devblksize;
240		}
241		ll_rw_block(READ, b - 1, bh + 1);
242	}
243
244	if (compressed) {
245
246		int rest, start;
247		int len;
248
249		mutex_lock(&msblk->read_data_mutex);
250
251		/*
252	 	* uncompress block
253	 	*/
254		for (k = 0; k < b; k++) {
255			wait_on_buffer(bh[k]);
256			if (!buffer_uptodate(bh[k]))
257				goto release_mutex;
258		}
259
260		avail_bytes = 0;
261		for (k = 0; !avail_bytes && k < b; k++) {
262			avail_bytes = msblk->devblksize - offset;
263			if (c_byte < avail_bytes)
264				avail_bytes = c_byte;
265			if (avail_bytes)
266				break;
267			offset = 0;
268			brelse(bh[k]);
269		}
270		bytes = 0;
271		if (!avail_bytes)
272			goto release_mutex;
273
274		start = k;
275		for (; k < b; k++) {
276			memcpy(sqread_data + bytes, bh[k]->b_data + offset,
277			       avail_bytes);
278			bytes += avail_bytes;
279			offset = 0;
280			brelse(bh[k]);
281			avail_bytes = msblk->devblksize - offset;
282			rest = c_byte - bytes;
283			if (rest < avail_bytes)
284				avail_bytes = rest;
285		}
286
287		len = LzmaUncompress(buffer, &srclength, sqread_data, bytes);
288		mutex_unlock(&msblk->read_data_mutex);
289		bytes = srclength;
290	} else {
291		int i;
292
293		for(i = 0; i < b; i++) {
294			wait_on_buffer(bh[i]);
295			if(!buffer_uptodate(bh[i]))
296				goto block_release;
297		}
298
299		for (bytes = 0; k < b; k++) {
300			avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
301					msblk->devblksize - offset :
302					c_byte - bytes;
303			memcpy(buffer + bytes, bh[k]->b_data + offset, avail_bytes);
304			bytes += avail_bytes;
305			offset = 0;
306			brelse(bh[k]);
307		}
308	}
309
310	if (next_index)
311		*next_index = index + c_byte + (length ? 0 :
312				(SQUASHFS_CHECK_DATA(msblk->sblk.flags)
313				 ? 3 : 2));
314	return bytes;
315release_mutex:
316	mutex_unlock(&msblk->read_data_mutex);
317
318block_release:
319	for (; k < b; k++)
320		brelse(bh[k]);
321
322read_failure:
323	ERROR("sb_bread failed reading block 0x%x\n", cur_index);
324	return 0;
325}
326
327
328SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, char *buffer,
329				long long block, unsigned int offset,
330				int length, long long *next_block,
331				unsigned int *next_offset)
332{
333	struct squashfs_sb_info *msblk = s->s_fs_info;
334	int n, i, bytes, return_length = length;
335	long long next_index;
336
337	TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset);
338
339	while ( 1 ) {
340		for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
341			if (msblk->block_cache[i].block == block)
342				break;
343
344		mutex_lock(&msblk->block_cache_mutex);
345
346		if (i == SQUASHFS_CACHED_BLKS) {
347			/* read inode header block */
348			for (i = msblk->next_cache, n = SQUASHFS_CACHED_BLKS;
349					n ; n --, i = (i + 1) %
350					SQUASHFS_CACHED_BLKS)
351				if (msblk->block_cache[i].block !=
352							SQUASHFS_USED_BLK)
353					break;
354
355			if (n == 0) {
356				wait_queue_t wait;
357
358				init_waitqueue_entry(&wait, current);
359				add_wait_queue(&msblk->waitq, &wait);
360				set_current_state(TASK_UNINTERRUPTIBLE);
361 				mutex_unlock(&msblk->block_cache_mutex);
362				schedule();
363				set_current_state(TASK_RUNNING);
364				remove_wait_queue(&msblk->waitq, &wait);
365				continue;
366			}
367			msblk->next_cache = (i + 1) % SQUASHFS_CACHED_BLKS;
368
369			if (msblk->block_cache[i].block ==
370							SQUASHFS_INVALID_BLK) {
371				if (!(msblk->block_cache[i].data =
372						kmalloc(SQUASHFS_METADATA_SIZE,
373						GFP_KERNEL))) {
374					ERROR("Failed to allocate cache"
375							"block\n");
376					mutex_unlock(&msblk->block_cache_mutex);
377					goto out;
378				}
379			}
380
381			msblk->block_cache[i].block = SQUASHFS_USED_BLK;
382			mutex_unlock(&msblk->block_cache_mutex);
383
384			msblk->block_cache[i].length = squashfs_read_data(s,
385				msblk->block_cache[i].data, block, 0, &next_index, SQUASHFS_METADATA_SIZE);
386			if (msblk->block_cache[i].length == 0) {
387				ERROR("Unable to read cache block [%llx:%x]\n",
388						block, offset);
389				mutex_lock(&msblk->block_cache_mutex);
390				msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
391				kfree(msblk->block_cache[i].data);
392				wake_up(&msblk->waitq);
393				mutex_unlock(&msblk->block_cache_mutex);
394				goto out;
395			}
396
397			mutex_lock(&msblk->block_cache_mutex);
398			wake_up(&msblk->waitq);
399			msblk->block_cache[i].block = block;
400			msblk->block_cache[i].next_index = next_index;
401			TRACE("Read cache block [%llx:%x]\n", block, offset);
402		}
403
404		if (msblk->block_cache[i].block != block) {
405			mutex_unlock(&msblk->block_cache_mutex);
406			continue;
407		}
408
409		bytes = msblk->block_cache[i].length - offset;
410
411		if (bytes < 1) {
412			mutex_unlock(&msblk->block_cache_mutex);
413			goto out;
414		} else if (bytes >= length) {
415			if (buffer)
416				memcpy(buffer, msblk->block_cache[i].data +
417						offset, length);
418			if (msblk->block_cache[i].length - offset == length) {
419				*next_block = msblk->block_cache[i].next_index;
420				*next_offset = 0;
421			} else {
422				*next_block = block;
423				*next_offset = offset + length;
424			}
425			mutex_unlock(&msblk->block_cache_mutex);
426			goto finish;
427		} else {
428			if (buffer) {
429				memcpy(buffer, msblk->block_cache[i].data +
430						offset, bytes);
431				buffer += bytes;
432			}
433			block = msblk->block_cache[i].next_index;
434			mutex_unlock(&msblk->block_cache_mutex);
435			length -= bytes;
436			offset = 0;
437		}
438	}
439
440finish:
441	return return_length;
442out:
443	return 0;
444}
445
446
447static int get_fragment_location(struct super_block *s, unsigned int fragment,
448				long long *fragment_start_block,
449				unsigned int *fragment_size)
450{
451	struct squashfs_sb_info *msblk = s->s_fs_info;
452	long long start_block =
453		msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
454	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
455	struct squashfs_fragment_entry fragment_entry;
456
457	if (msblk->swap) {
458		struct squashfs_fragment_entry sfragment_entry;
459
460		if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
461					start_block, offset,
462					sizeof(sfragment_entry), &start_block,
463					&offset))
464			goto out;
465		SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry);
466	} else
467		if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
468					start_block, offset,
469					sizeof(fragment_entry), &start_block,
470					&offset))
471			goto out;
472
473	*fragment_start_block = fragment_entry.start_block;
474	*fragment_size = fragment_entry.size;
475
476	return 1;
477
478out:
479	return 0;
480}
481
482
483SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, struct
484					squashfs_fragment_cache *fragment)
485{
486	mutex_lock(&msblk->fragment_mutex);
487	fragment->locked --;
488	wake_up(&msblk->fragment_wait_queue);
489	mutex_unlock(&msblk->fragment_mutex);
490}
491
492
493SQSH_EXTERN struct squashfs_fragment_cache *get_cached_fragment(struct super_block
494					*s, long long start_block,
495					int length)
496{
497	int i, n;
498	struct squashfs_sb_info *msblk = s->s_fs_info;
499	struct squashfs_super_block *sblk = &msblk->sblk;
500
501	while ( 1 ) {
502		mutex_lock(&msblk->fragment_mutex);
503
504		for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS &&
505				msblk->fragment[i].block != start_block; i++);
506
507		if (i == SQUASHFS_CACHED_FRAGMENTS) {
508			for (i = msblk->next_fragment, n =
509				SQUASHFS_CACHED_FRAGMENTS; n &&
510				msblk->fragment[i].locked; n--, i = (i + 1) %
511				SQUASHFS_CACHED_FRAGMENTS);
512
513			if (n == 0) {
514				wait_queue_t wait;
515
516				init_waitqueue_entry(&wait, current);
517				add_wait_queue(&msblk->fragment_wait_queue,
518									&wait);
519				set_current_state(TASK_UNINTERRUPTIBLE);
520				mutex_unlock(&msblk->fragment_mutex);
521				schedule();
522				set_current_state(TASK_RUNNING);
523				remove_wait_queue(&msblk->fragment_wait_queue,
524									&wait);
525				continue;
526			}
527			msblk->next_fragment = (msblk->next_fragment + 1) %
528				SQUASHFS_CACHED_FRAGMENTS;
529
530			if (msblk->fragment[i].data == NULL)
531				if (!(msblk->fragment[i].data = SQUASHFS_ALLOC
532						(SQUASHFS_FILE_MAX_SIZE))) {
533					ERROR("Failed to allocate fragment "
534							"cache block\n");
535					mutex_unlock(&msblk->fragment_mutex);
536					goto out;
537				}
538
539			msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
540			msblk->fragment[i].locked = 1;
541			mutex_unlock(&msblk->fragment_mutex);
542
543			if (!(msblk->fragment[i].length = squashfs_read_data(s,
544						msblk->fragment[i].data,
545						start_block, length, NULL, sblk->block_size))) {
546				ERROR("Unable to read fragment cache block "
547							"[%llx]\n", start_block);
548				msblk->fragment[i].locked = 0;
549				smp_mb();
550				goto out;
551			}
552
553			mutex_lock(&msblk->fragment_mutex);
554			msblk->fragment[i].block = start_block;
555			TRACE("New fragment %d, start block %lld, locked %d\n",
556						i, msblk->fragment[i].block,
557						msblk->fragment[i].locked);
558			mutex_unlock(&msblk->fragment_mutex);
559			break;
560		}
561
562		msblk->fragment[i].locked++;
563		mutex_unlock(&msblk->fragment_mutex);
564		TRACE("Got fragment %d, start block %lld, locked %d\n", i,
565						msblk->fragment[i].block,
566						msblk->fragment[i].locked);
567		break;
568	}
569
570	return &msblk->fragment[i];
571
572out:
573	return NULL;
574}
575
576
577static void squashfs_new_inode(struct squashfs_sb_info *msblk, struct inode *i,
578		struct squashfs_base_inode_header *inodeb)
579{
580	i->i_ino = inodeb->inode_number;
581	i->i_mtime.tv_sec = inodeb->mtime;
582	i->i_atime.tv_sec = inodeb->mtime;
583	i->i_ctime.tv_sec = inodeb->mtime;
584	i->i_uid = msblk->uid[inodeb->uid];
585	i->i_mode = inodeb->mode;
586	i->i_size = 0;
587	if (inodeb->guid == SQUASHFS_GUIDS)
588		i->i_gid = i->i_uid;
589	else
590		i->i_gid = msblk->guid[inodeb->guid];
591}
592
593
594static squashfs_inode_t squashfs_inode_lookup(struct super_block *s, int ino)
595{
596	struct squashfs_sb_info *msblk = s->s_fs_info;
597	long long start = msblk->inode_lookup_table[SQUASHFS_LOOKUP_BLOCK(ino - 1)];
598	int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino - 1);
599	squashfs_inode_t inode;
600
601	TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino);
602
603	if (msblk->swap) {
604		squashfs_inode_t sinode;
605
606		if (!squashfs_get_cached_block(s, (char *) &sinode, start, offset,
607					sizeof(sinode), &start, &offset))
608			goto out;
609		SQUASHFS_SWAP_INODE_T((&inode), &sinode);
610	} else if (!squashfs_get_cached_block(s, (char *) &inode, start, offset,
611					sizeof(inode), &start, &offset))
612			goto out;
613
614	TRACE("squashfs_inode_lookup, inode = 0x%llx\n", inode);
615
616	return inode;
617
618out:
619	return SQUASHFS_INVALID_BLK;
620}
621
622
623static void vfs_read_inode(struct inode *i)
624{
625	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
626	squashfs_inode_t inode = squashfs_inode_lookup(i->i_sb, i->i_ino);
627
628	TRACE("Entered vfs_read_inode\n");
629
630	if(inode != SQUASHFS_INVALID_BLK)
631		(msblk->read_inode)(i, inode);
632}
633
634
635static struct dentry *squashfs_get_parent(struct dentry *child)
636{
637	struct inode *i = child->d_inode;
638	struct inode *parent = iget(i->i_sb, SQUASHFS_I(i)->u.s2.parent_inode);
639	struct dentry *rv;
640
641	TRACE("Entered squashfs_get_parent\n");
642
643	if(parent == NULL) {
644		rv = ERR_PTR(-EACCES);
645		goto out;
646	}
647
648	rv = d_alloc_anon(parent);
649	if(rv == NULL)
650		rv = ERR_PTR(-ENOMEM);
651
652out:
653	return rv;
654}
655
656
657SQSH_EXTERN struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode, unsigned int inode_number)
658{
659	struct squashfs_sb_info *msblk = s->s_fs_info;
660	struct inode *i = iget_locked(s, inode_number);
661
662	TRACE("Entered squashfs_iget\n");
663
664	if(i && (i->i_state & I_NEW)) {
665		(msblk->read_inode)(i, inode);
666		unlock_new_inode(i);
667	}
668
669	return i;
670}
671
672
673static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode)
674{
675	struct super_block *s = i->i_sb;
676	struct squashfs_sb_info *msblk = s->s_fs_info;
677	struct squashfs_super_block *sblk = &msblk->sblk;
678	long long block = SQUASHFS_INODE_BLK(inode) +
679		sblk->inode_table_start;
680	unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
681	long long next_block;
682	unsigned int next_offset;
683	union squashfs_inode_header id, sid;
684	struct squashfs_base_inode_header *inodeb = &id.base,
685					  *sinodeb = &sid.base;
686
687	TRACE("Entered squashfs_read_inode\n");
688
689	if (msblk->swap) {
690		if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
691					offset, sizeof(*sinodeb), &next_block,
692					&next_offset))
693			goto failed_read;
694		SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb,
695					sizeof(*sinodeb));
696	} else
697		if (!squashfs_get_cached_block(s, (char *) inodeb, block,
698					offset, sizeof(*inodeb), &next_block,
699					&next_offset))
700			goto failed_read;
701
702	squashfs_new_inode(msblk, i, inodeb);
703
704	switch(inodeb->inode_type) {
705		case SQUASHFS_FILE_TYPE: {
706			unsigned int frag_size;
707			long long frag_blk;
708			struct squashfs_reg_inode_header *inodep = &id.reg;
709			struct squashfs_reg_inode_header *sinodep = &sid.reg;
710
711			if (msblk->swap) {
712				if (!squashfs_get_cached_block(s, (char *)
713						sinodep, block, offset,
714						sizeof(*sinodep), &next_block,
715						&next_offset))
716					goto failed_read;
717				SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep);
718			} else
719				if (!squashfs_get_cached_block(s, (char *)
720						inodep, block, offset,
721						sizeof(*inodep), &next_block,
722						&next_offset))
723					goto failed_read;
724
725			frag_blk = SQUASHFS_INVALID_BLK;
726			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
727					!get_fragment_location(s,
728					inodep->fragment, &frag_blk, &frag_size))
729				goto failed_read;
730
731			i->i_nlink = 1;
732			i->i_size = inodep->file_size;
733			i->i_fop = &generic_ro_fops;
734			i->i_mode |= S_IFREG;
735			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
736			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
737			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
738			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
739			SQUASHFS_I(i)->start_block = inodep->start_block;
740			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
741			SQUASHFS_I(i)->offset = next_offset;
742			if (sblk->block_size > 4096)
743				i->i_data.a_ops = &squashfs_aops;
744			else
745				i->i_data.a_ops = &squashfs_aops_4K;
746
747			TRACE("File inode %x:%x, start_block %llx, "
748					"block_list_start %llx, offset %x\n",
749					SQUASHFS_INODE_BLK(inode), offset,
750					inodep->start_block, next_block,
751					next_offset);
752			break;
753		}
754		case SQUASHFS_LREG_TYPE: {
755			unsigned int frag_size;
756			long long frag_blk;
757			struct squashfs_lreg_inode_header *inodep = &id.lreg;
758			struct squashfs_lreg_inode_header *sinodep = &sid.lreg;
759
760			if (msblk->swap) {
761				if (!squashfs_get_cached_block(s, (char *)
762						sinodep, block, offset,
763						sizeof(*sinodep), &next_block,
764						&next_offset))
765					goto failed_read;
766				SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep);
767			} else
768				if (!squashfs_get_cached_block(s, (char *)
769						inodep, block, offset,
770						sizeof(*inodep), &next_block,
771						&next_offset))
772					goto failed_read;
773
774			frag_blk = SQUASHFS_INVALID_BLK;
775			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
776					!get_fragment_location(s,
777					inodep->fragment, &frag_blk, &frag_size))
778				goto failed_read;
779
780			i->i_nlink = inodep->nlink;
781			i->i_size = inodep->file_size;
782			i->i_fop = &generic_ro_fops;
783			i->i_mode |= S_IFREG;
784			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
785			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
786			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
787			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
788			SQUASHFS_I(i)->start_block = inodep->start_block;
789			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
790			SQUASHFS_I(i)->offset = next_offset;
791			if (sblk->block_size > 4096)
792				i->i_data.a_ops = &squashfs_aops;
793			else
794				i->i_data.a_ops = &squashfs_aops_4K;
795
796			TRACE("File inode %x:%x, start_block %llx, "
797					"block_list_start %llx, offset %x\n",
798					SQUASHFS_INODE_BLK(inode), offset,
799					inodep->start_block, next_block,
800					next_offset);
801			break;
802		}
803		case SQUASHFS_DIR_TYPE: {
804			struct squashfs_dir_inode_header *inodep = &id.dir;
805			struct squashfs_dir_inode_header *sinodep = &sid.dir;
806
807			if (msblk->swap) {
808				if (!squashfs_get_cached_block(s, (char *)
809						sinodep, block, offset,
810						sizeof(*sinodep), &next_block,
811						&next_offset))
812					goto failed_read;
813				SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep);
814			} else
815				if (!squashfs_get_cached_block(s, (char *)
816						inodep, block, offset,
817						sizeof(*inodep), &next_block,
818						&next_offset))
819					goto failed_read;
820
821			i->i_nlink = inodep->nlink;
822			i->i_size = inodep->file_size;
823			i->i_op = &squashfs_dir_inode_ops;
824			i->i_fop = &squashfs_dir_ops;
825			i->i_mode |= S_IFDIR;
826			SQUASHFS_I(i)->start_block = inodep->start_block;
827			SQUASHFS_I(i)->offset = inodep->offset;
828			SQUASHFS_I(i)->u.s2.directory_index_count = 0;
829			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
830
831			TRACE("Directory inode %x:%x, start_block %x, offset "
832					"%x\n", SQUASHFS_INODE_BLK(inode),
833					offset, inodep->start_block,
834					inodep->offset);
835			break;
836		}
837		case SQUASHFS_LDIR_TYPE: {
838			struct squashfs_ldir_inode_header *inodep = &id.ldir;
839			struct squashfs_ldir_inode_header *sinodep = &sid.ldir;
840
841			if (msblk->swap) {
842				if (!squashfs_get_cached_block(s, (char *)
843						sinodep, block, offset,
844						sizeof(*sinodep), &next_block,
845						&next_offset))
846					goto failed_read;
847				SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep,
848						sinodep);
849			} else
850				if (!squashfs_get_cached_block(s, (char *)
851						inodep, block, offset,
852						sizeof(*inodep), &next_block,
853						&next_offset))
854					goto failed_read;
855
856			i->i_nlink = inodep->nlink;
857			i->i_size = inodep->file_size;
858			i->i_op = &squashfs_dir_inode_ops;
859			i->i_fop = &squashfs_dir_ops;
860			i->i_mode |= S_IFDIR;
861			SQUASHFS_I(i)->start_block = inodep->start_block;
862			SQUASHFS_I(i)->offset = inodep->offset;
863			SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
864			SQUASHFS_I(i)->u.s2.directory_index_offset =
865								next_offset;
866			SQUASHFS_I(i)->u.s2.directory_index_count =
867								inodep->i_count;
868			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
869
870			TRACE("Long directory inode %x:%x, start_block %x, "
871					"offset %x\n",
872					SQUASHFS_INODE_BLK(inode), offset,
873					inodep->start_block, inodep->offset);
874			break;
875		}
876		case SQUASHFS_SYMLINK_TYPE: {
877			struct squashfs_symlink_inode_header *inodep =
878								&id.symlink;
879			struct squashfs_symlink_inode_header *sinodep =
880								&sid.symlink;
881
882			if (msblk->swap) {
883				if (!squashfs_get_cached_block(s, (char *)
884						sinodep, block, offset,
885						sizeof(*sinodep), &next_block,
886						&next_offset))
887					goto failed_read;
888				SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep,
889								sinodep);
890			} else
891				if (!squashfs_get_cached_block(s, (char *)
892						inodep, block, offset,
893						sizeof(*inodep), &next_block,
894						&next_offset))
895					goto failed_read;
896
897			i->i_nlink = inodep->nlink;
898			i->i_size = inodep->symlink_size;
899			i->i_op = &page_symlink_inode_operations;
900			i->i_data.a_ops = &squashfs_symlink_aops;
901			i->i_mode |= S_IFLNK;
902			SQUASHFS_I(i)->start_block = next_block;
903			SQUASHFS_I(i)->offset = next_offset;
904
905			TRACE("Symbolic link inode %x:%x, start_block %llx, "
906					"offset %x\n",
907					SQUASHFS_INODE_BLK(inode), offset,
908					next_block, next_offset);
909			break;
910		 }
911		 case SQUASHFS_BLKDEV_TYPE:
912		 case SQUASHFS_CHRDEV_TYPE: {
913			struct squashfs_dev_inode_header *inodep = &id.dev;
914			struct squashfs_dev_inode_header *sinodep = &sid.dev;
915
916			if (msblk->swap) {
917				if (!squashfs_get_cached_block(s, (char *)
918						sinodep, block, offset,
919						sizeof(*sinodep), &next_block,
920						&next_offset))
921					goto failed_read;
922				SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep);
923			} else
924				if (!squashfs_get_cached_block(s, (char *)
925						inodep, block, offset,
926						sizeof(*inodep), &next_block,
927						&next_offset))
928					goto failed_read;
929
930			i->i_nlink = inodep->nlink;
931			i->i_mode |= (inodeb->inode_type ==
932					SQUASHFS_CHRDEV_TYPE) ?  S_IFCHR :
933					S_IFBLK;
934			init_special_inode(i, i->i_mode,
935					old_decode_dev(inodep->rdev));
936
937			TRACE("Device inode %x:%x, rdev %x\n",
938					SQUASHFS_INODE_BLK(inode), offset,
939					inodep->rdev);
940			break;
941		 }
942		 case SQUASHFS_FIFO_TYPE:
943		 case SQUASHFS_SOCKET_TYPE: {
944			struct squashfs_ipc_inode_header *inodep = &id.ipc;
945			struct squashfs_ipc_inode_header *sinodep = &sid.ipc;
946
947			if (msblk->swap) {
948				if (!squashfs_get_cached_block(s, (char *)
949						sinodep, block, offset,
950						sizeof(*sinodep), &next_block,
951						&next_offset))
952					goto failed_read;
953				SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep);
954			} else
955				if (!squashfs_get_cached_block(s, (char *)
956						inodep, block, offset,
957						sizeof(*inodep), &next_block,
958						&next_offset))
959					goto failed_read;
960
961			i->i_nlink = inodep->nlink;
962			i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
963							? S_IFIFO : S_IFSOCK;
964			init_special_inode(i, i->i_mode, 0);
965			break;
966		 }
967		 default:
968			ERROR("Unknown inode type %d in squashfs_iget!\n",
969					inodeb->inode_type);
970			goto failed_read1;
971	}
972
973	return 1;
974
975failed_read:
976	ERROR("Unable to read inode [%llx:%x]\n", block, offset);
977
978failed_read1:
979	make_bad_inode(i);
980	return 0;
981}
982
983
984static int read_inode_lookup_table(struct super_block *s)
985{
986	struct squashfs_sb_info *msblk = s->s_fs_info;
987	struct squashfs_super_block *sblk = &msblk->sblk;
988	unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(sblk->inodes);
989
990	TRACE("In read_inode_lookup_table, length %d\n", length);
991
992	/* Allocate inode lookup table */
993	if (!(msblk->inode_lookup_table = kmalloc(length, GFP_KERNEL))) {
994		ERROR("Failed to allocate inode lookup table\n");
995		return 0;
996	}
997
998	if (!squashfs_read_data(s, (char *) msblk->inode_lookup_table,
999			sblk->lookup_table_start, length |
1000			SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
1001		ERROR("unable to read inode lookup table\n");
1002		return 0;
1003	}
1004
1005	if (msblk->swap) {
1006		int i;
1007		long long block;
1008
1009		for (i = 0; i < SQUASHFS_LOOKUP_BLOCKS(sblk->inodes); i++) {
1010			SQUASHFS_SWAP_LOOKUP_BLOCKS((&block),
1011						&msblk->inode_lookup_table[i], 1);
1012			msblk->inode_lookup_table[i] = block;
1013		}
1014	}
1015
1016	return 1;
1017}
1018
1019
1020static int read_fragment_index_table(struct super_block *s)
1021{
1022	struct squashfs_sb_info *msblk = s->s_fs_info;
1023	struct squashfs_super_block *sblk = &msblk->sblk;
1024	unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments);
1025
1026	if(length == 0)
1027		return 1;
1028
1029	/* Allocate fragment index table */
1030	if (!(msblk->fragment_index = kmalloc(length, GFP_KERNEL))) {
1031		ERROR("Failed to allocate fragment index table\n");
1032		return 0;
1033	}
1034
1035	if (!squashfs_read_data(s, (char *) msblk->fragment_index,
1036			sblk->fragment_table_start, length |
1037			SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
1038		ERROR("unable to read fragment index table\n");
1039		return 0;
1040	}
1041
1042	if (msblk->swap) {
1043		int i;
1044		long long fragment;
1045
1046		for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments); i++) {
1047			SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment),
1048						&msblk->fragment_index[i], 1);
1049			msblk->fragment_index[i] = fragment;
1050		}
1051	}
1052
1053	return 1;
1054}
1055
1056
1057static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
1058{
1059	struct squashfs_super_block *sblk = &msblk->sblk;
1060
1061	msblk->read_inode = squashfs_read_inode;
1062	msblk->read_blocklist = read_blocklist;
1063	msblk->read_fragment_index_table = read_fragment_index_table;
1064
1065	if (sblk->s_major == 1) {
1066		if (!squashfs_1_0_supported(msblk)) {
1067			SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems "
1068				"are unsupported\n");
1069			SERROR("Please recompile with "
1070				"Squashfs 1.0 support enabled\n");
1071			return 0;
1072		}
1073	} else if (sblk->s_major == 2) {
1074		if (!squashfs_2_0_supported(msblk)) {
1075			SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems "
1076				"are unsupported\n");
1077			SERROR("Please recompile with "
1078				"Squashfs 2.0 support enabled\n");
1079			return 0;
1080		}
1081	} else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor >
1082			SQUASHFS_MINOR) {
1083		SERROR("Major/Minor mismatch, trying to mount newer %d.%d "
1084				"filesystem\n", sblk->s_major, sblk->s_minor);
1085		SERROR("Please update your kernel\n");
1086		return 0;
1087	}
1088
1089	return 1;
1090}
1091
1092
1093static int squashfs_fill_super(struct super_block *s, void *data, int silent)
1094{
1095	struct squashfs_sb_info *msblk;
1096	struct squashfs_super_block *sblk;
1097	int i;
1098	char b[BDEVNAME_SIZE];
1099	struct inode *root;
1100	struct squashfs_super_block ssblk;
1101
1102	TRACE("Entered squashfs_read_superblock\n");
1103
1104	if (!(s->s_fs_info = kmalloc(sizeof(struct squashfs_sb_info),
1105						GFP_KERNEL))) {
1106		ERROR("Failed to allocate superblock\n");
1107		goto failure;
1108	}
1109	memset(s->s_fs_info, 0, sizeof(struct squashfs_sb_info));
1110	msblk = s->s_fs_info;
1111	sblk = &msblk->sblk;
1112
1113	msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
1114	msblk->devblksize_log2 = ffz(~msblk->devblksize);
1115
1116	mutex_init(&msblk->read_data_mutex);
1117	mutex_init(&msblk->read_page_mutex);
1118	mutex_init(&msblk->block_cache_mutex);
1119	mutex_init(&msblk->fragment_mutex);
1120	mutex_init(&msblk->meta_index_mutex);
1121
1122	init_waitqueue_head(&msblk->waitq);
1123	init_waitqueue_head(&msblk->fragment_wait_queue);
1124
1125	sblk->bytes_used = sizeof(struct squashfs_super_block);
1126	if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START,
1127					sizeof(struct squashfs_super_block) |
1128					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, sizeof(struct squashfs_super_block))) {
1129		SERROR("unable to read superblock\n");
1130		goto failed_mount;
1131	}
1132
1133	/* Check it is a SQUASHFS superblock */
1134	s->s_magic = sblk->s_magic;
1135	msblk->swap = 0;
1136	switch (sblk->s_magic) {
1137	case SQUASHFS_MAGIC_SWAP:
1138	case SQUASHFS_MAGIC_LZMA_SWAP:
1139		WARNING("Mounting a different endian SQUASHFS "
1140			"filesystem on %s\n", bdevname(s->s_bdev, b));
1141
1142		SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk);
1143		memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block));
1144		msblk->swap = 1;
1145	case SQUASHFS_MAGIC:
1146	case SQUASHFS_MAGIC_LZMA:
1147		break;
1148	default:
1149		SERROR("Can't find a SQUASHFS superblock on %s\n",
1150		       bdevname(s->s_bdev, b));
1151		goto failed_mount;
1152	}
1153
1154	/* Check the MAJOR & MINOR versions */
1155	if(!supported_squashfs_filesystem(msblk, silent))
1156		goto failed_mount;
1157
1158	/* Check the filesystem does not extend beyond the end of the
1159	   block device */
1160	if(sblk->bytes_used < 0 || sblk->bytes_used > i_size_read(s->s_bdev->bd_inode))
1161		goto failed_mount;
1162
1163	/* Check the root inode for sanity */
1164	if (SQUASHFS_INODE_OFFSET(sblk->root_inode) > SQUASHFS_METADATA_SIZE)
1165		goto failed_mount;
1166
1167	TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b));
1168	TRACE("Inodes are %scompressed\n",
1169					SQUASHFS_UNCOMPRESSED_INODES
1170					(sblk->flags) ? "un" : "");
1171	TRACE("Data is %scompressed\n",
1172					SQUASHFS_UNCOMPRESSED_DATA(sblk->flags)
1173					? "un" : "");
1174	TRACE("Check data is %s present in the filesystem\n",
1175					SQUASHFS_CHECK_DATA(sblk->flags) ?
1176					"" : "not");
1177	TRACE("Filesystem size %lld bytes\n", sblk->bytes_used);
1178	TRACE("Block size %d\n", sblk->block_size);
1179	TRACE("Number of inodes %d\n", sblk->inodes);
1180	if (sblk->s_major > 1)
1181		TRACE("Number of fragments %d\n", sblk->fragments);
1182	TRACE("Number of uids %d\n", sblk->no_uids);
1183	TRACE("Number of gids %d\n", sblk->no_guids);
1184	TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start);
1185	TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start);
1186	if (sblk->s_major > 1)
1187		TRACE("sblk->fragment_table_start %llx\n",
1188					sblk->fragment_table_start);
1189	TRACE("sblk->uid_start %llx\n", sblk->uid_start);
1190
1191	s->s_flags |= MS_RDONLY;
1192	s->s_op = &squashfs_super_ops;
1193
1194	/* Init inode_table block pointer array */
1195	if (!(msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
1196					SQUASHFS_CACHED_BLKS, GFP_KERNEL))) {
1197		ERROR("Failed to allocate block cache\n");
1198		goto failed_mount;
1199	}
1200
1201	for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
1202		msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
1203
1204	msblk->next_cache = 0;
1205
1206	/* Allocate read_page block */
1207	if (!(msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL))) {
1208		ERROR("Failed to allocate read_page block\n");
1209		goto failed_mount;
1210	}
1211
1212	/* Allocate uid and gid tables */
1213	if (!(msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
1214					sizeof(unsigned int), GFP_KERNEL))) {
1215		ERROR("Failed to allocate uid/gid table\n");
1216		goto failed_mount;
1217	}
1218	msblk->guid = msblk->uid + sblk->no_uids;
1219
1220	if (msblk->swap) {
1221		unsigned int suid[sblk->no_uids + sblk->no_guids];
1222
1223		if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start,
1224					((sblk->no_uids + sblk->no_guids) *
1225					 sizeof(unsigned int)) |
1226					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) {
1227			ERROR("unable to read uid/gid table\n");
1228			goto failed_mount;
1229		}
1230
1231		SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
1232			sblk->no_guids), (sizeof(unsigned int) * 8));
1233	} else
1234		if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
1235					((sblk->no_uids + sblk->no_guids) *
1236					 sizeof(unsigned int)) |
1237					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) {
1238			ERROR("unable to read uid/gid table\n");
1239			goto failed_mount;
1240		}
1241
1242
1243	if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
1244		goto allocate_root;
1245
1246	if (!(msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) *
1247				SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL))) {
1248		ERROR("Failed to allocate fragment block cache\n");
1249		goto failed_mount;
1250	}
1251
1252	for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) {
1253		msblk->fragment[i].locked = 0;
1254		msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
1255		msblk->fragment[i].data = NULL;
1256	}
1257
1258	msblk->next_fragment = 0;
1259
1260	/* Allocate and read fragment index table */
1261	if (msblk->read_fragment_index_table(s) == 0)
1262		goto failed_mount;
1263
1264	if(sblk->s_major < 3 || sblk->lookup_table_start == SQUASHFS_INVALID_BLK)
1265		goto allocate_root;
1266
1267	/* Allocate and read inode lookup table */
1268	if (read_inode_lookup_table(s) == 0)
1269		goto failed_mount;
1270
1271	s->s_op = &squashfs_export_super_ops;
1272	s->s_export_op = &squashfs_export_ops;
1273
1274allocate_root:
1275	root = new_inode(s);
1276	if ((msblk->read_inode)(root, sblk->root_inode) == 0)
1277		goto failed_mount;
1278	insert_inode_hash(root);
1279
1280	if ((s->s_root = d_alloc_root(root)) == NULL) {
1281		ERROR("Root inode create failed\n");
1282		iput(root);
1283		goto failed_mount;
1284	}
1285
1286	TRACE("Leaving squashfs_read_super\n");
1287	return 0;
1288
1289failed_mount:
1290	if(msblk->inode_lookup_table)
1291		kfree(msblk->inode_lookup_table);
1292	if (msblk->fragment_index)
1293		kfree(msblk->fragment_index);
1294	if(msblk->fragment)	{
1295		for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++)
1296			if(msblk->fragment[i].data)
1297				SQUASHFS_FREE(msblk->fragment[i].data);
1298		kfree(msblk->fragment);
1299	}
1300	if(msblk->uid)
1301		kfree(msblk->uid);
1302	if(msblk->read_page)
1303		kfree(msblk->read_page);
1304	if(msblk->block_cache) {
1305	    for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
1306	        if (msblk->block_cache[i].data)
1307				kfree(msblk->block_cache[i].data);
1308		kfree(msblk->block_cache);
1309	}
1310	if(msblk->fragment_index_2)
1311		kfree(msblk->fragment_index_2);
1312	if(s->s_fs_info)
1313		kfree(s->s_fs_info);
1314	s->s_fs_info = NULL;
1315	return -EINVAL;
1316
1317failure:
1318	return -ENOMEM;
1319}
1320
1321
1322static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1323{
1324	struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info;
1325	struct squashfs_super_block *sblk = &msblk->sblk;
1326
1327	TRACE("Entered squashfs_statfs\n");
1328
1329	buf->f_type = sblk->s_magic;
1330	buf->f_bsize = sblk->block_size;
1331	buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1;
1332	buf->f_bfree = buf->f_bavail = 0;
1333	buf->f_files = sblk->inodes;
1334	buf->f_ffree = 0;
1335	buf->f_namelen = SQUASHFS_NAME_LEN;
1336
1337	return 0;
1338}
1339
1340
1341static int squashfs_symlink_readpage(struct file *file, struct page *page)
1342{
1343	struct inode *inode = page->mapping->host;
1344	int index = page->index << PAGE_CACHE_SHIFT, length, bytes;
1345	long long block = SQUASHFS_I(inode)->start_block;
1346	int offset = SQUASHFS_I(inode)->offset;
1347	void *pageaddr = kmap(page);
1348
1349	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
1350				"%llx, offset %x\n", page->index,
1351				SQUASHFS_I(inode)->start_block,
1352				SQUASHFS_I(inode)->offset);
1353
1354	for (length = 0; length < index; length += bytes) {
1355		if (!(bytes = squashfs_get_cached_block(inode->i_sb, NULL,
1356				block, offset, PAGE_CACHE_SIZE, &block,
1357				&offset))) {
1358			ERROR("Unable to read symbolic link [%llx:%x]\n", block,
1359					offset);
1360			goto skip_read;
1361		}
1362	}
1363
1364	if (length != index) {
1365		ERROR("(squashfs_symlink_readpage) length != index\n");
1366		bytes = 0;
1367		goto skip_read;
1368	}
1369
1370	bytes = (i_size_read(inode) - length) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE :
1371					i_size_read(inode) - length;
1372
1373	if (!(bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block,
1374					offset, bytes, &block, &offset)))
1375		ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
1376
1377skip_read:
1378	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1379	kunmap(page);
1380	flush_dcache_page(page);
1381	SetPageUptodate(page);
1382	unlock_page(page);
1383
1384	return 0;
1385}
1386
1387
1388struct meta_index *locate_meta_index(struct inode *inode, int index, int offset)
1389{
1390	struct meta_index *meta = NULL;
1391	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1392	int i;
1393
1394	mutex_lock(&msblk->meta_index_mutex);
1395
1396	TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
1397
1398	if(msblk->meta_index == NULL)
1399		goto not_allocated;
1400
1401	for (i = 0; i < SQUASHFS_META_NUMBER; i ++)
1402		if (msblk->meta_index[i].inode_number == inode->i_ino &&
1403				msblk->meta_index[i].offset >= offset &&
1404				msblk->meta_index[i].offset <= index &&
1405				msblk->meta_index[i].locked == 0) {
1406			TRACE("locate_meta_index: entry %d, offset %d\n", i,
1407					msblk->meta_index[i].offset);
1408			meta = &msblk->meta_index[i];
1409			offset = meta->offset;
1410		}
1411
1412	if (meta)
1413		meta->locked = 1;
1414
1415not_allocated:
1416	mutex_unlock(&msblk->meta_index_mutex);
1417
1418	return meta;
1419}
1420
1421
1422struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
1423{
1424	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1425	struct meta_index *meta = NULL;
1426	int i;
1427
1428	mutex_lock(&msblk->meta_index_mutex);
1429
1430	TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
1431
1432	if(msblk->meta_index == NULL) {
1433		if (!(msblk->meta_index = kmalloc(sizeof(struct meta_index) *
1434					SQUASHFS_META_NUMBER, GFP_KERNEL))) {
1435			ERROR("Failed to allocate meta_index\n");
1436			goto failed;
1437		}
1438		for(i = 0; i < SQUASHFS_META_NUMBER; i++) {
1439			msblk->meta_index[i].inode_number = 0;
1440			msblk->meta_index[i].locked = 0;
1441		}
1442		msblk->next_meta_index = 0;
1443	}
1444
1445	for(i = SQUASHFS_META_NUMBER; i &&
1446			msblk->meta_index[msblk->next_meta_index].locked; i --)
1447		msblk->next_meta_index = (msblk->next_meta_index + 1) %
1448			SQUASHFS_META_NUMBER;
1449
1450	if(i == 0) {
1451		TRACE("empty_meta_index: failed!\n");
1452		goto failed;
1453	}
1454
1455	TRACE("empty_meta_index: returned meta entry %d, %p\n",
1456			msblk->next_meta_index,
1457			&msblk->meta_index[msblk->next_meta_index]);
1458
1459	meta = &msblk->meta_index[msblk->next_meta_index];
1460	msblk->next_meta_index = (msblk->next_meta_index + 1) %
1461			SQUASHFS_META_NUMBER;
1462
1463	meta->inode_number = inode->i_ino;
1464	meta->offset = offset;
1465	meta->skip = skip;
1466	meta->entries = 0;
1467	meta->locked = 1;
1468
1469failed:
1470	mutex_unlock(&msblk->meta_index_mutex);
1471	return meta;
1472}
1473
1474
1475void release_meta_index(struct inode *inode, struct meta_index *meta)
1476{
1477	meta->locked = 0;
1478	smp_mb();
1479}
1480
1481
1482static int read_block_index(struct super_block *s, int blocks, char *block_list,
1483		long long *start_block, int *offset)
1484{
1485	struct squashfs_sb_info *msblk = s->s_fs_info;
1486	unsigned int *block_listp;
1487	int block = 0;
1488
1489	if (msblk->swap) {
1490		char sblock_list[blocks << 2];
1491
1492		if (!squashfs_get_cached_block(s, sblock_list, *start_block,
1493				*offset, blocks << 2, start_block, offset)) {
1494			ERROR("Unable to read block list [%llx:%x]\n",
1495				*start_block, *offset);
1496			goto failure;
1497		}
1498		SQUASHFS_SWAP_INTS(((unsigned int *)block_list),
1499				((unsigned int *)sblock_list), blocks);
1500	} else
1501		if (!squashfs_get_cached_block(s, block_list, *start_block,
1502				*offset, blocks << 2, start_block, offset)) {
1503			ERROR("Unable to read block list [%llx:%x]\n",
1504				*start_block, *offset);
1505			goto failure;
1506		}
1507
1508	for (block_listp = (unsigned int *) block_list; blocks;
1509				block_listp++, blocks --)
1510		block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp);
1511
1512	return block;
1513
1514failure:
1515	return -1;
1516}
1517
1518
1519#define SIZE 256
1520
1521static inline int calculate_skip(int blocks) {
1522	int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES);
1523	return skip >= 7 ? 7 : skip + 1;
1524}
1525
1526
1527static int get_meta_index(struct inode *inode, int index,
1528		long long *index_block, int *index_offset,
1529		long long *data_block, char *block_list)
1530{
1531	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1532	struct squashfs_super_block *sblk = &msblk->sblk;
1533	int skip = calculate_skip(i_size_read(inode) >> sblk->block_log);
1534	int offset = 0;
1535	struct meta_index *meta;
1536	struct meta_entry *meta_entry;
1537	long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start;
1538	int cur_offset = SQUASHFS_I(inode)->offset;
1539	long long cur_data_block = SQUASHFS_I(inode)->start_block;
1540	int i;
1541
1542	index /= SQUASHFS_META_INDEXES * skip;
1543
1544	while ( offset < index ) {
1545		meta = locate_meta_index(inode, index, offset + 1);
1546
1547		if (meta == NULL) {
1548			if ((meta = empty_meta_index(inode, offset + 1,
1549							skip)) == NULL)
1550				goto all_done;
1551		} else {
1552			if(meta->entries == 0)
1553				goto failed;
1554			offset = index < meta->offset + meta->entries ? index :
1555				meta->offset + meta->entries - 1;
1556			meta_entry = &meta->meta_entry[offset - meta->offset];
1557			cur_index_block = meta_entry->index_block + sblk->inode_table_start;
1558			cur_offset = meta_entry->offset;
1559			cur_data_block = meta_entry->data_block;
1560			TRACE("get_meta_index: offset %d, meta->offset %d, "
1561				"meta->entries %d\n", offset, meta->offset,
1562				meta->entries);
1563			TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
1564				" data_block 0x%llx\n", cur_index_block,
1565				cur_offset, cur_data_block);
1566		}
1567
1568		for (i = meta->offset + meta->entries; i <= index &&
1569				i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
1570			int blocks = skip * SQUASHFS_META_INDEXES;
1571
1572			while (blocks) {
1573				int block = blocks > (SIZE >> 2) ? (SIZE >> 2) :
1574					blocks;
1575				int res = read_block_index(inode->i_sb, block,
1576					block_list, &cur_index_block,
1577					&cur_offset);
1578
1579				if (res == -1)
1580					goto failed;
1581
1582				cur_data_block += res;
1583				blocks -= block;
1584			}
1585
1586			meta_entry = &meta->meta_entry[i - meta->offset];
1587			meta_entry->index_block = cur_index_block - sblk->inode_table_start;
1588			meta_entry->offset = cur_offset;
1589			meta_entry->data_block = cur_data_block;
1590			meta->entries ++;
1591			offset ++;
1592		}
1593
1594		TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
1595				meta->offset, meta->entries);
1596
1597		release_meta_index(inode, meta);
1598	}
1599
1600all_done:
1601	*index_block = cur_index_block;
1602	*index_offset = cur_offset;
1603	*data_block = cur_data_block;
1604
1605	return offset * SQUASHFS_META_INDEXES * skip;
1606
1607failed:
1608	release_meta_index(inode, meta);
1609	return -1;
1610}
1611
1612
1613static long long read_blocklist(struct inode *inode, int index,
1614				int readahead_blks, char *block_list,
1615				unsigned short **block_p, unsigned int *bsize)
1616{
1617	long long block_ptr;
1618	int offset;
1619	long long block;
1620	int res = get_meta_index(inode, index, &block_ptr, &offset, &block,
1621		block_list);
1622
1623	TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset"
1624		       " 0x%x, block 0x%llx\n", res, index, block_ptr, offset,
1625		       block);
1626
1627	if(res == -1)
1628		goto failure;
1629
1630	index -= res;
1631
1632	while ( index ) {
1633		int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index;
1634		int res = read_block_index(inode->i_sb, blocks, block_list,
1635			&block_ptr, &offset);
1636		if (res == -1)
1637			goto failure;
1638		block += res;
1639		index -= blocks;
1640	}
1641
1642	if (read_block_index(inode->i_sb, 1, block_list,
1643			&block_ptr, &offset) == -1)
1644		goto failure;
1645	*bsize = *((unsigned int *) block_list);
1646
1647	return block;
1648
1649failure:
1650	return 0;
1651}
1652
1653
1654static int squashfs_readpage(struct file *file, struct page *page)
1655{
1656	struct inode *inode = page->mapping->host;
1657	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1658	struct squashfs_super_block *sblk = &msblk->sblk;
1659	unsigned char *block_list;
1660	long long block;
1661	unsigned int bsize, i = 0, bytes = 0, byte_offset = 0;
1662	int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT);
1663 	void *pageaddr;
1664	struct squashfs_fragment_cache *fragment = NULL;
1665	char *data_ptr = msblk->read_page;
1666
1667	int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1;
1668	int start_index = page->index & ~mask;
1669	int end_index = start_index | mask;
1670
1671	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
1672					page->index,
1673					SQUASHFS_I(inode)->start_block);
1674
1675	if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
1676		ERROR("Failed to allocate block_list\n");
1677		goto skip_read;
1678	}
1679
1680	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1681					PAGE_CACHE_SHIFT))
1682		goto skip_read;
1683
1684	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1685					|| index < (i_size_read(inode) >>
1686					sblk->block_log)) {
1687		if ((block = (msblk->read_blocklist)(inode, index, 1,
1688					block_list, NULL, &bsize)) == 0)
1689			goto skip_read;
1690
1691		mutex_lock(&msblk->read_page_mutex);
1692
1693		if (!(bytes = squashfs_read_data(inode->i_sb, msblk->read_page,
1694					block, bsize, NULL, sblk->block_size))) {
1695			ERROR("Unable to read page, block %llx, size %x\n", block,
1696					bsize);
1697			mutex_unlock(&msblk->read_page_mutex);
1698			goto skip_read;
1699		}
1700	} else {
1701		if ((fragment = get_cached_fragment(inode->i_sb,
1702					SQUASHFS_I(inode)->
1703					u.s1.fragment_start_block,
1704					SQUASHFS_I(inode)->u.s1.fragment_size))
1705					== NULL) {
1706			ERROR("Unable to read page, block %llx, size %x\n",
1707					SQUASHFS_I(inode)->
1708					u.s1.fragment_start_block,
1709					(int) SQUASHFS_I(inode)->
1710					u.s1.fragment_size);
1711			goto skip_read;
1712		}
1713		bytes = SQUASHFS_I(inode)->u.s1.fragment_offset +
1714					(i_size_read(inode) & (sblk->block_size
1715					- 1));
1716		byte_offset = SQUASHFS_I(inode)->u.s1.fragment_offset;
1717		data_ptr = fragment->data;
1718	}
1719
1720	for (i = start_index; i <= end_index && byte_offset < bytes;
1721					i++, byte_offset += PAGE_CACHE_SIZE) {
1722		struct page *push_page;
1723		int avail = (bytes - byte_offset) > PAGE_CACHE_SIZE ?
1724					PAGE_CACHE_SIZE : bytes - byte_offset;
1725
1726		TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n",
1727					bytes, i, byte_offset, avail);
1728
1729		push_page = (i == page->index) ? page :
1730			grab_cache_page_nowait(page->mapping, i);
1731
1732		if (!push_page)
1733			continue;
1734
1735		if (PageUptodate(push_page))
1736			goto skip_page;
1737
1738 		pageaddr = kmap_atomic(push_page, KM_USER0);
1739		memcpy(pageaddr, data_ptr + byte_offset, avail);
1740		memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
1741		kunmap_atomic(pageaddr, KM_USER0);
1742		flush_dcache_page(push_page);
1743		SetPageUptodate(push_page);
1744skip_page:
1745		unlock_page(push_page);
1746		if(i != page->index)
1747			page_cache_release(push_page);
1748	}
1749
1750	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1751					|| index < (i_size_read(inode) >>
1752					sblk->block_log))
1753		mutex_unlock(&msblk->read_page_mutex);
1754	else
1755		release_cached_fragment(msblk, fragment);
1756
1757	kfree(block_list);
1758	return 0;
1759
1760skip_read:
1761	pageaddr = kmap_atomic(page, KM_USER0);
1762	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1763	kunmap_atomic(pageaddr, KM_USER0);
1764	flush_dcache_page(page);
1765	SetPageUptodate(page);
1766	unlock_page(page);
1767
1768	kfree(block_list);
1769	return 0;
1770}
1771
1772
1773static int squashfs_readpage4K(struct file *file, struct page *page)
1774{
1775	struct inode *inode = page->mapping->host;
1776	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1777	struct squashfs_super_block *sblk = &msblk->sblk;
1778	unsigned char *block_list;
1779	long long block;
1780	unsigned int bsize, bytes = 0;
1781 	void *pageaddr;
1782
1783	TRACE("Entered squashfs_readpage4K, page index %lx, start block %llx\n",
1784					page->index,
1785					SQUASHFS_I(inode)->start_block);
1786
1787	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1788					PAGE_CACHE_SHIFT)) {
1789		block_list = NULL;
1790		goto skip_read;
1791	}
1792
1793	if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
1794		ERROR("Failed to allocate block_list\n");
1795		goto skip_read;
1796	}
1797
1798	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1799					|| page->index < (i_size_read(inode) >>
1800					sblk->block_log)) {
1801		block = (msblk->read_blocklist)(inode, page->index, 1,
1802					block_list, NULL, &bsize);
1803		if(block == 0)
1804			goto skip_read;
1805
1806		mutex_lock(&msblk->read_page_mutex);
1807		bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
1808					bsize, NULL, sblk->block_size);
1809		if (bytes) {
1810			pageaddr = kmap_atomic(page, KM_USER0);
1811			memcpy(pageaddr, msblk->read_page, bytes);
1812			kunmap_atomic(pageaddr, KM_USER0);
1813		} else
1814			ERROR("Unable to read page, block %llx, size %x\n",
1815					block, bsize);
1816		mutex_unlock(&msblk->read_page_mutex);
1817	} else {
1818		struct squashfs_fragment_cache *fragment =
1819			get_cached_fragment(inode->i_sb,
1820					SQUASHFS_I(inode)->
1821					u.s1.fragment_start_block,
1822					SQUASHFS_I(inode)-> u.s1.fragment_size);
1823		if (fragment) {
1824			bytes = i_size_read(inode) & (sblk->block_size - 1);
1825			pageaddr = kmap_atomic(page, KM_USER0);
1826			memcpy(pageaddr, fragment->data + SQUASHFS_I(inode)->
1827					u.s1.fragment_offset, bytes);
1828			kunmap_atomic(pageaddr, KM_USER0);
1829			release_cached_fragment(msblk, fragment);
1830		} else
1831			ERROR("Unable to read page, block %llx, size %x\n",
1832					SQUASHFS_I(inode)->
1833					u.s1.fragment_start_block, (int)
1834					SQUASHFS_I(inode)-> u.s1.fragment_size);
1835	}
1836
1837skip_read:
1838	pageaddr = kmap_atomic(page, KM_USER0);
1839	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1840	kunmap_atomic(pageaddr, KM_USER0);
1841	flush_dcache_page(page);
1842	SetPageUptodate(page);
1843	unlock_page(page);
1844
1845	kfree(block_list);
1846	return 0;
1847}
1848
1849
1850static int get_dir_index_using_offset(struct super_block *s, long long
1851				*next_block, unsigned int *next_offset,
1852				long long index_start,
1853				unsigned int index_offset, int i_count,
1854				long long f_pos)
1855{
1856	struct squashfs_sb_info *msblk = s->s_fs_info;
1857	struct squashfs_super_block *sblk = &msblk->sblk;
1858	int i, length = 0;
1859	struct squashfs_dir_index index;
1860
1861	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
1862					i_count, (unsigned int) f_pos);
1863
1864	f_pos =- 3;
1865	if (f_pos == 0)
1866		goto finish;
1867
1868	for (i = 0; i < i_count; i++) {
1869		if (msblk->swap) {
1870			struct squashfs_dir_index sindex;
1871			squashfs_get_cached_block(s, (char *) &sindex,
1872					index_start, index_offset,
1873					sizeof(sindex), &index_start,
1874					&index_offset);
1875			SQUASHFS_SWAP_DIR_INDEX(&index, &sindex);
1876		} else
1877			squashfs_get_cached_block(s, (char *) &index,
1878					index_start, index_offset,
1879					sizeof(index), &index_start,
1880					&index_offset);
1881
1882		if (index.index > f_pos)
1883			break;
1884
1885		squashfs_get_cached_block(s, NULL, index_start, index_offset,
1886					index.size + 1, &index_start,
1887					&index_offset);
1888
1889		length = index.index;
1890		*next_block = index.start_block + sblk->directory_table_start;
1891	}
1892
1893	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
1894
1895finish:
1896	return length + 3;
1897}
1898
1899
1900static int get_dir_index_using_name(struct super_block *s, long long
1901				*next_block, unsigned int *next_offset,
1902				long long index_start,
1903				unsigned int index_offset, int i_count,
1904				const char *name, int size)
1905{
1906	struct squashfs_sb_info *msblk = s->s_fs_info;
1907	struct squashfs_super_block *sblk = &msblk->sblk;
1908	int i, length = 0;
1909	struct squashfs_dir_index *index;
1910	char *str;
1911
1912	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
1913
1914	if (!(str = kmalloc(sizeof(struct squashfs_dir_index) +
1915		(SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL))) {
1916		ERROR("Failed to allocate squashfs_dir_index\n");
1917		goto failure;
1918	}
1919
1920	index = (struct squashfs_dir_index *) (str + SQUASHFS_NAME_LEN + 1);
1921	strncpy(str, name, size);
1922	str[size] = '\0';
1923
1924	for (i = 0; i < i_count; i++) {
1925		if (msblk->swap) {
1926			struct squashfs_dir_index sindex;
1927			squashfs_get_cached_block(s, (char *) &sindex,
1928					index_start, index_offset,
1929					sizeof(sindex), &index_start,
1930					&index_offset);
1931			SQUASHFS_SWAP_DIR_INDEX(index, &sindex);
1932		} else
1933			squashfs_get_cached_block(s, (char *) index,
1934					index_start, index_offset,
1935					sizeof(struct squashfs_dir_index),
1936					&index_start, &index_offset);
1937
1938		squashfs_get_cached_block(s, index->name, index_start,
1939					index_offset, index->size + 1,
1940					&index_start, &index_offset);
1941
1942		index->name[index->size + 1] = '\0';
1943
1944		if (strcmp(index->name, str) > 0)
1945			break;
1946
1947		length = index->index;
1948		*next_block = index->start_block + sblk->directory_table_start;
1949	}
1950
1951	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
1952	kfree(str);
1953failure:
1954	return length + 3;
1955}
1956
1957
1958static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
1959{
1960	struct inode *i = file->f_dentry->d_inode;
1961	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
1962	struct squashfs_super_block *sblk = &msblk->sblk;
1963	long long next_block = SQUASHFS_I(i)->start_block +
1964		sblk->directory_table_start;
1965	int next_offset = SQUASHFS_I(i)->offset, length = 0,
1966		dir_count;
1967	struct squashfs_dir_header dirh;
1968	struct squashfs_dir_entry *dire;
1969
1970	TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset);
1971
1972	if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
1973		SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
1974		ERROR("Failed to allocate squashfs_dir_entry\n");
1975		goto finish;
1976	}
1977
1978	while(file->f_pos < 3) {
1979		char *name;
1980		int size, i_ino;
1981
1982		if(file->f_pos == 0) {
1983			name = ".";
1984			size = 1;
1985			i_ino = i->i_ino;
1986		} else {
1987			name = "..";
1988			size = 2;
1989			i_ino = SQUASHFS_I(i)->u.s2.parent_inode;
1990		}
1991		TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n",
1992				(unsigned int) dirent, name, size, (int)
1993				file->f_pos, i_ino,
1994				squashfs_filetype_table[1]);
1995
1996		if (filldir(dirent, name, size,
1997				file->f_pos, i_ino,
1998				squashfs_filetype_table[1]) < 0) {
1999				TRACE("Filldir returned less than 0\n");
2000				goto finish;
2001		}
2002		file->f_pos += size;
2003	}
2004
2005	length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
2006				SQUASHFS_I(i)->u.s2.directory_index_start,
2007				SQUASHFS_I(i)->u.s2.directory_index_offset,
2008				SQUASHFS_I(i)->u.s2.directory_index_count,
2009				file->f_pos);
2010
2011	while (length < i_size_read(i)) {
2012		/* read directory header */
2013		if (msblk->swap) {
2014			struct squashfs_dir_header sdirh;
2015
2016			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
2017					next_block, next_offset, sizeof(sdirh),
2018					&next_block, &next_offset))
2019				goto failed_read;
2020
2021			length += sizeof(sdirh);
2022			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
2023		} else {
2024			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
2025					next_block, next_offset, sizeof(dirh),
2026					&next_block, &next_offset))
2027				goto failed_read;
2028
2029			length += sizeof(dirh);
2030		}
2031
2032		dir_count = dirh.count + 1;
2033		while (dir_count--) {
2034			if (msblk->swap) {
2035				struct squashfs_dir_entry sdire;
2036				if (!squashfs_get_cached_block(i->i_sb, (char *)
2037						&sdire, next_block, next_offset,
2038						sizeof(sdire), &next_block,
2039						&next_offset))
2040					goto failed_read;
2041
2042				length += sizeof(sdire);
2043				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
2044			} else {
2045				if (!squashfs_get_cached_block(i->i_sb, (char *)
2046						dire, next_block, next_offset,
2047						sizeof(*dire), &next_block,
2048						&next_offset))
2049					goto failed_read;
2050
2051				length += sizeof(*dire);
2052			}
2053
2054			if (!squashfs_get_cached_block(i->i_sb, dire->name,
2055						next_block, next_offset,
2056						dire->size + 1, &next_block,
2057						&next_offset))
2058				goto failed_read;
2059
2060			length += dire->size + 1;
2061
2062			if (file->f_pos >= length)
2063				continue;
2064
2065			dire->name[dire->size + 1] = '\0';
2066
2067			TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n",
2068					(unsigned int) dirent, dire->name,
2069					dire->size + 1, (int) file->f_pos,
2070					dirh.start_block, dire->offset,
2071					dirh.inode_number + dire->inode_number,
2072					squashfs_filetype_table[dire->type]);
2073
2074			if (filldir(dirent, dire->name, dire->size + 1,
2075					file->f_pos,
2076					dirh.inode_number + dire->inode_number,
2077					squashfs_filetype_table[dire->type])
2078					< 0) {
2079				TRACE("Filldir returned less than 0\n");
2080				goto finish;
2081			}
2082			file->f_pos = length;
2083		}
2084	}
2085
2086finish:
2087	kfree(dire);
2088	return 0;
2089
2090failed_read:
2091	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
2092		next_offset);
2093	kfree(dire);
2094	return 0;
2095}
2096
2097
2098static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry,
2099				struct nameidata *nd)
2100{
2101	const unsigned char *name = dentry->d_name.name;
2102	int len = dentry->d_name.len;
2103	struct inode *inode = NULL;
2104	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
2105	struct squashfs_super_block *sblk = &msblk->sblk;
2106	long long next_block = SQUASHFS_I(i)->start_block +
2107				sblk->directory_table_start;
2108	int next_offset = SQUASHFS_I(i)->offset, length = 0,
2109				dir_count;
2110	struct squashfs_dir_header dirh;
2111	struct squashfs_dir_entry *dire;
2112
2113	TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
2114
2115	if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
2116		SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
2117		ERROR("Failed to allocate squashfs_dir_entry\n");
2118		goto exit_lookup;
2119	}
2120
2121	if (len > SQUASHFS_NAME_LEN)
2122		goto exit_lookup;
2123
2124	length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
2125				SQUASHFS_I(i)->u.s2.directory_index_start,
2126				SQUASHFS_I(i)->u.s2.directory_index_offset,
2127				SQUASHFS_I(i)->u.s2.directory_index_count, name,
2128				len);
2129
2130	while (length < i_size_read(i)) {
2131		/* read directory header */
2132		if (msblk->swap) {
2133			struct squashfs_dir_header sdirh;
2134			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
2135					next_block, next_offset, sizeof(sdirh),
2136					&next_block, &next_offset))
2137				goto failed_read;
2138
2139			length += sizeof(sdirh);
2140			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
2141		} else {
2142			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
2143					next_block, next_offset, sizeof(dirh),
2144					&next_block, &next_offset))
2145				goto failed_read;
2146
2147			length += sizeof(dirh);
2148		}
2149
2150		dir_count = dirh.count + 1;
2151		while (dir_count--) {
2152			if (msblk->swap) {
2153				struct squashfs_dir_entry sdire;
2154				if (!squashfs_get_cached_block(i->i_sb, (char *)
2155						&sdire, next_block,next_offset,
2156						sizeof(sdire), &next_block,
2157						&next_offset))
2158					goto failed_read;
2159
2160				length += sizeof(sdire);
2161				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
2162			} else {
2163				if (!squashfs_get_cached_block(i->i_sb, (char *)
2164						dire, next_block,next_offset,
2165						sizeof(*dire), &next_block,
2166						&next_offset))
2167					goto failed_read;
2168
2169				length += sizeof(*dire);
2170			}
2171
2172			if (!squashfs_get_cached_block(i->i_sb, dire->name,
2173					next_block, next_offset, dire->size + 1,
2174					&next_block, &next_offset))
2175				goto failed_read;
2176
2177			length += dire->size + 1;
2178
2179			if (name[0] < dire->name[0])
2180				goto exit_lookup;
2181
2182			if ((len == dire->size + 1) && !strncmp(name, dire->name, len)) {
2183				squashfs_inode_t ino = SQUASHFS_MKINODE(dirh.start_block,
2184								dire->offset);
2185
2186				TRACE("calling squashfs_iget for directory "
2187					"entry %s, inode %x:%x, %d\n", name,
2188					dirh.start_block, dire->offset,
2189					dirh.inode_number + dire->inode_number);
2190
2191				inode = squashfs_iget(i->i_sb, ino, dirh.inode_number + dire->inode_number);
2192
2193				goto exit_lookup;
2194			}
2195		}
2196	}
2197
2198exit_lookup:
2199	kfree(dire);
2200	if (inode)
2201		return d_splice_alias(inode, dentry);
2202	d_add(dentry, inode);
2203	return ERR_PTR(0);
2204
2205failed_read:
2206	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
2207		next_offset);
2208	goto exit_lookup;
2209}
2210
2211
2212static int squashfs_remount(struct super_block *s, int *flags, char *data)
2213{
2214	*flags |= MS_RDONLY;
2215	return 0;
2216}
2217
2218
2219static void squashfs_put_super(struct super_block *s)
2220{
2221	int i;
2222
2223	if (s->s_fs_info) {
2224		struct squashfs_sb_info *sbi = s->s_fs_info;
2225		if (sbi->block_cache)
2226			for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
2227				if (sbi->block_cache[i].block !=
2228							SQUASHFS_INVALID_BLK)
2229					kfree(sbi->block_cache[i].data);
2230		if (sbi->fragment)
2231			for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++)
2232				SQUASHFS_FREE(sbi->fragment[i].data);
2233		kfree(sbi->fragment);
2234		kfree(sbi->block_cache);
2235		kfree(sbi->read_page);
2236		kfree(sbi->uid);
2237		kfree(sbi->fragment_index);
2238		kfree(sbi->fragment_index_2);
2239		kfree(sbi->meta_index);
2240		kfree(s->s_fs_info);
2241		s->s_fs_info = NULL;
2242	}
2243}
2244
2245
2246static int squashfs_get_sb(struct file_system_type *fs_type, int flags,
2247				const char *dev_name, void *data,
2248				struct vfsmount *mnt)
2249{
2250	return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super,
2251				mnt);
2252}
2253
2254static int __init init_squashfs_fs(void)
2255{
2256	int err = init_inodecache();
2257	if (err)
2258		goto out;
2259
2260	printk(KERN_INFO "squashfs: version 3.2-r2 (2007/01/15) "
2261		"Phillip Lougher\n");
2262
2263	sqread_data =kmalloc(SQUASHFS_FILE_MAX_SIZE, GFP_KERNEL);
2264	if(!sqread_data)
2265		return -ENOMEM;
2266
2267	if ((err = register_filesystem(&squashfs_fs_type)))
2268		destroy_inodecache();
2269
2270out:
2271	return err;
2272}
2273
2274
2275static void __exit exit_squashfs_fs(void)
2276{
2277	if(sqread_data)
2278		kfree(sqread_data);
2279
2280	unregister_filesystem(&squashfs_fs_type);
2281	destroy_inodecache();
2282}
2283
2284
2285static struct kmem_cache * squashfs_inode_cachep;
2286
2287
2288static struct inode *squashfs_alloc_inode(struct super_block *sb)
2289{
2290	struct squashfs_inode_info *ei;
2291	ei = kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL);
2292	if (!ei)
2293		return NULL;
2294	return &ei->vfs_inode;
2295}
2296
2297
2298static void squashfs_destroy_inode(struct inode *inode)
2299{
2300	kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode));
2301}
2302
2303
2304static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
2305{
2306	struct squashfs_inode_info *ei = foo;
2307
2308	inode_init_once(&ei->vfs_inode);
2309}
2310
2311
2312static int __init init_inodecache(void)
2313{
2314	squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
2315	     sizeof(struct squashfs_inode_info),
2316	     0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
2317	     init_once, NULL);
2318	if (squashfs_inode_cachep == NULL)
2319		return -ENOMEM;
2320	return 0;
2321}
2322
2323
2324static void destroy_inodecache(void)
2325{
2326	kmem_cache_destroy(squashfs_inode_cachep);
2327}
2328
2329
2330module_init(init_squashfs_fs);
2331module_exit(exit_squashfs_fs);
2332MODULE_DESCRIPTION("squashfs 3.2-r2, a compressed read-only filesystem");
2333MODULE_AUTHOR("Phillip Lougher <phillip@lougher.org.uk>");
2334MODULE_LICENSE("GPL");
2335