1/*
2 * Create a squashfs filesystem.  This is a highly compressed read only
3 * filesystem.
4 *
5 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
6 * Phillip Lougher <phillip@lougher.demon.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2,
11 * or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * mksquashfs.c
23 */
24
25#define FALSE 0
26#define TRUE 1
27
28#include <pwd.h>
29#include <grp.h>
30#include <time.h>
31#include <unistd.h>
32#include <stdio.h>
33#include <stddef.h>
34#include <sys/time.h>
35#include <sys/types.h>
36#include <sys/stat.h>
37#include <fcntl.h>
38#include <errno.h>
39#include <dirent.h>
40#include <string.h>
41#include <stdlib.h>
42#include <signal.h>
43#include <setjmp.h>
44#include <sys/ioctl.h>
45#include <sys/types.h>
46#include <sys/mman.h>
47#include <pthread.h>
48#include <math.h>
49#include <regex.h>
50#include <fnmatch.h>
51#include <sys/wait.h>
52
53#ifndef linux
54#define __BYTE_ORDER BYTE_ORDER
55#define __BIG_ENDIAN BIG_ENDIAN
56#define __LITTLE_ENDIAN LITTLE_ENDIAN
57#include <sys/sysctl.h>
58#else
59#include <endian.h>
60#include <sys/sysinfo.h>
61#endif
62
63#ifdef SQUASHFS_TRACE
64#define TRACE(s, args...) \
65		do { \
66			if(progress_enabled) \
67				printf("\n"); \
68			printf("mksquashfs: "s, ## args); \
69		} while(0)
70#else
71#define TRACE(s, args...)
72#endif
73
74#define INFO(s, args...) \
75		do {\
76			 if(!silent)\
77				printf("mksquashfs: "s, ## args);\
78		} while(0)
79
80#define ERROR(s, args...) \
81		do {\
82			pthread_mutex_lock(&progress_mutex); \
83			if(progress_enabled) \
84				fprintf(stderr, "\n"); \
85			fprintf(stderr, s, ## args);\
86			pthread_mutex_unlock(&progress_mutex); \
87		} while(0)
88
89#define EXIT_MKSQUASHFS() \
90		do {\
91			if(restore)\
92				restorefs();\
93			if(delete && destination_file && !block_device)\
94				unlink(destination_file);\
95			exit(1);\
96		} while(0)
97
98#define BAD_ERROR(s, args...) \
99		do {\
100			pthread_mutex_lock(&progress_mutex); \
101			if(progress_enabled) \
102				fprintf(stderr, "\n"); \
103			fprintf(stderr, "FATAL ERROR:" s, ##args);\
104			pthread_mutex_unlock(&progress_mutex); \
105			EXIT_MKSQUASHFS();\
106		} while(0)
107
108#include "squashfs_fs.h"
109#include "squashfs_swap.h"
110#include "mksquashfs.h"
111#include "sort.h"
112#include "pseudo.h"
113#include "compressor.h"
114#include "xattr.h"
115
116int delete = FALSE;
117int fd;
118int cur_uncompressed = 0, estimated_uncompressed = 0;
119int columns;
120
121/* filesystem flags for building */
122int comp_opts = FALSE;
123int no_xattrs = XATTR_DEF, noX = 0;
124int duplicate_checking = 1, noF = 0, no_fragments = 0, always_use_fragments = 0;
125int noI = 0, noD = 0;
126int silent = TRUE;
127long long global_uid = -1, global_gid = -1;
128int exportable = TRUE;
129int progress = TRUE;
130int progress_enabled = FALSE;
131int sparse_files = TRUE;
132int old_exclude = TRUE;
133int use_regex = FALSE;
134int first_freelist = TRUE;
135
136/* superblock attributes */
137int block_size = SQUASHFS_FILE_SIZE, block_log;
138unsigned int id_count = 0;
139int file_count = 0, sym_count = 0, dev_count = 0, dir_count = 0, fifo_count = 0,
140	sock_count = 0;
141
142/* write position within data section */
143long long bytes = 0, total_bytes = 0;
144
145/* in memory directory table - possibly compressed */
146char *directory_table = NULL;
147unsigned int directory_bytes = 0, directory_size = 0, total_directory_bytes = 0;
148
149/* cached directory table */
150char *directory_data_cache = NULL;
151unsigned int directory_cache_bytes = 0, directory_cache_size = 0;
152
153/* in memory inode table - possibly compressed */
154char *inode_table = NULL;
155unsigned int inode_bytes = 0, inode_size = 0, total_inode_bytes = 0;
156
157/* cached inode table */
158char *data_cache = NULL;
159unsigned int cache_bytes = 0, cache_size = 0, inode_count = 0;
160
161/* inode lookup table */
162squashfs_inode *inode_lookup_table = NULL;
163
164/* in memory directory data */
165#define I_COUNT_SIZE		128
166#define DIR_ENTRIES		32
167#define INODE_HASH_SIZE		65536
168#define INODE_HASH_MASK		(INODE_HASH_SIZE - 1)
169#define INODE_HASH(dev, ino)	(ino & INODE_HASH_MASK)
170
171struct cached_dir_index {
172	struct squashfs_dir_index	index;
173	char				*name;
174};
175
176struct directory {
177	unsigned int		start_block;
178	unsigned int		size;
179	unsigned char		*buff;
180	unsigned char		*p;
181	unsigned int		entry_count;
182	unsigned char		*entry_count_p;
183	unsigned int		i_count;
184	unsigned int		i_size;
185	struct cached_dir_index	*index;
186	unsigned char		*index_count_p;
187	unsigned int		inode_number;
188};
189
190struct inode_info *inode_info[INODE_HASH_SIZE];
191
192/* hash tables used to do fast duplicate searches in duplicate check */
193struct file_info *dupl[65536];
194int dup_files = 0;
195
196/* exclude file handling */
197/* list of exclude dirs/files */
198struct exclude_info {
199	dev_t			st_dev;
200	ino_t			st_ino;
201};
202
203#define EXCLUDE_SIZE 8192
204int exclude = 0;
205struct exclude_info *exclude_paths = NULL;
206int old_excluded(char *filename, struct stat *buf);
207
208struct path_entry {
209	char *name;
210	regex_t *preg;
211	struct pathname *paths;
212};
213
214struct pathname {
215	int names;
216	struct path_entry *name;
217};
218
219struct pathnames {
220	int count;
221	struct pathname *path[0];
222};
223#define PATHS_ALLOC_SIZE 10
224
225struct pathnames *paths = NULL;
226struct pathname *path = NULL;
227struct pathname *stickypath = NULL;
228int excluded(struct pathnames *paths, char *name, struct pathnames **new);
229
230/* fragment block data structures */
231int fragments = 0;
232struct file_buffer *fragment_data = NULL;
233int fragment_size = 0;
234
235struct fragment {
236	unsigned int		index;
237	int			offset;
238	int			size;
239};
240
241#define FRAG_SIZE 32768
242#define FRAG_INDEX (1LL << 32)
243
244struct squashfs_fragment_entry *fragment_table = NULL;
245int fragments_outstanding = 0;
246
247/* current inode number for directories and non directories */
248unsigned int dir_inode_no = 1;
249unsigned int inode_no = 0;
250unsigned int root_inode_number = 0;
251
252/* list of source dirs/files */
253int source = 0;
254char **source_path;
255
256/* list of root directory entries read from original filesystem */
257int old_root_entries = 0;
258struct old_root_entry_info {
259	char			*name;
260	struct inode_info	inode;
261};
262struct old_root_entry_info *old_root_entry;
263
264/* in memory file info */
265struct file_info {
266	long long		file_size;
267	long long		bytes;
268	unsigned short		checksum;
269	unsigned short		fragment_checksum;
270	long long		start;
271	unsigned int		*block_list;
272	struct file_info	*next;
273	struct fragment		*fragment;
274	char			checksum_flag;
275};
276
277/* count of how many times SIGINT or SIGQUIT has been sent */
278int interrupted = 0;
279
280/* flag if we're restoring existing filesystem */
281int restoring = 0;
282
283/* restore orignal filesystem state if appending to existing filesystem is
284 * cancelled */
285jmp_buf env;
286char *sdata_cache, *sdirectory_data_cache, *sdirectory_compressed;
287
288long long sbytes, stotal_bytes;
289
290unsigned int sinode_bytes, scache_bytes, sdirectory_bytes,
291	sdirectory_cache_bytes, sdirectory_compressed_bytes,
292	stotal_inode_bytes, stotal_directory_bytes,
293	sinode_count = 0, sfile_count, ssym_count, sdev_count,
294	sdir_count, sfifo_count, ssock_count, sdup_files;
295int sfragments;
296int restore = 0;
297int threads;
298
299/* flag whether destination file is a block device */
300int block_device = 0;
301
302/* flag indicating whether files are sorted using sort list(s) */
303int sorted = 0;
304
305/* save destination file name for deleting on error */
306char *destination_file = NULL;
307
308/* recovery file for abnormal exit on appending */
309char recovery_file[1024] = "";
310int recover = TRUE;
311
312/* struct describing a cache entry passed between threads */
313struct file_buffer {
314	struct cache *cache;
315	int keep;
316	long long file_size;
317	long long index;
318	long long block;
319	long long sequence;
320	int size;
321	int c_byte;
322	int used;
323	int	fragment;
324	int error;
325	struct file_buffer *hash_next;
326	struct file_buffer *hash_prev;
327	struct file_buffer *free_next;
328	struct file_buffer *free_prev;
329	struct file_buffer *next;
330	char data[0];
331};
332
333
334/* struct describing queues used to pass data between threads */
335struct queue {
336	int			size;
337	int			readp;
338	int			writep;
339	pthread_mutex_t		mutex;
340	pthread_cond_t		empty;
341	pthread_cond_t		full;
342	void			**data;
343};
344
345
346/* in memory uid tables */
347#define ID_ENTRIES 256
348#define ID_HASH(id) (id & (ID_ENTRIES - 1))
349#define ISA_UID 1
350#define ISA_GID 2
351struct id {
352	unsigned int id;
353	int	index;
354	char	flags;
355	struct id *next;
356};
357struct id *id_hash_table[ID_ENTRIES];
358struct id *id_table[SQUASHFS_IDS], *sid_table[SQUASHFS_IDS];
359unsigned int uid_count = 0, guid_count = 0;
360unsigned int sid_count = 0, suid_count = 0, sguid_count = 0;
361
362struct cache *reader_buffer, *writer_buffer, *fragment_buffer;
363struct queue *to_reader, *from_reader, *to_writer, *from_writer, *from_deflate,
364	*to_frag;
365pthread_t *thread, *deflator_thread, *frag_deflator_thread, progress_thread;
366pthread_mutex_t	fragment_mutex;
367pthread_cond_t fragment_waiting;
368pthread_mutex_t	pos_mutex;
369pthread_mutex_t progress_mutex;
370pthread_cond_t progress_wait;
371int rotate = 0;
372struct pseudo *pseudo = NULL;
373
374/* user options that control parallelisation */
375int processors = -1;
376/* default size of output buffer in Mbytes */
377#define WRITER_BUFFER_DEFAULT 512
378/* default size of input buffer in Mbytes */
379#define READER_BUFFER_DEFAULT 64
380/* default size of fragment buffer in Mbytes */
381#define FRAGMENT_BUFFER_DEFAULT 64
382int writer_buffer_size;
383
384/* compression operations */
385static struct compressor *comp;
386int compressor_opts_parsed = 0;
387void *stream = NULL;
388
389/* xattr stats */
390unsigned int xattr_bytes = 0, total_xattr_bytes = 0;
391
392char *read_from_disk(long long start, unsigned int avail_bytes);
393void add_old_root_entry(char *name, squashfs_inode inode, int inode_number,
394	int type);
395extern struct compressor  *read_super(int fd, struct squashfs_super_block *sBlk,
396	char *source);
397extern long long read_filesystem(char *root_name, int fd,
398	struct squashfs_super_block *sBlk, char **cinode_table, char **data_cache,
399	char **cdirectory_table, char **directory_data_cache,
400	unsigned int *last_directory_block, unsigned int *inode_dir_offset,
401	unsigned int *inode_dir_file_size, unsigned int *root_inode_size,
402	unsigned int *inode_dir_start_block, int *file_count, int *sym_count,
403	int *dev_count, int *dir_count, int *fifo_count, int *sock_count,
404	long long *uncompressed_file, unsigned int *uncompressed_inode,
405	unsigned int *uncompressed_directory,
406	unsigned int *inode_dir_inode_number,
407	unsigned int *inode_dir_parent_inode,
408	void (push_directory_entry)(char *, squashfs_inode, int, int),
409	struct squashfs_fragment_entry **fragment_table,
410	squashfs_inode **inode_lookup_table);
411extern int read_sort_file(char *filename, int source, char *source_path[]);
412extern void sort_files_and_write(struct dir_info *dir);
413struct file_info *duplicate(long long file_size, long long bytes,
414	unsigned int **block_list, long long *start, struct fragment **fragment,
415	struct file_buffer *file_buffer, int blocks, unsigned short checksum,
416	unsigned short fragment_checksum, int checksum_flag);
417struct dir_info *dir_scan1(char *, struct pathnames *, int (_readdir)(char *,
418	char *, struct dir_info *));
419struct dir_info *dir_scan2(struct dir_info *dir, struct pseudo *pseudo);
420void dir_scan3(squashfs_inode *inode, struct dir_info *dir_info);
421struct file_info *add_non_dup(long long file_size, long long bytes,
422	unsigned int *block_list, long long start, struct fragment *fragment,
423	unsigned short checksum, unsigned short fragment_checksum,
424	int checksum_flag);
425extern int generate_file_priorities(struct dir_info *dir, int priority,
426	struct stat *buf);
427extern struct priority_entry *priority_list[65536];
428void progress_bar(long long current, long long max, int columns);
429long long generic_write_table(int, void *, int, void *, int);
430void restorefs();
431
432
433struct queue *queue_init(int size)
434{
435	struct queue *queue = malloc(sizeof(struct queue));
436
437	if(queue == NULL)
438		goto failed;
439
440	queue->data = malloc(sizeof(void *) * (size + 1));
441	if(queue->data == NULL) {
442		free(queue);
443		goto failed;
444	}
445
446	queue->size = size + 1;
447	queue->readp = queue->writep = 0;
448	pthread_mutex_init(&queue->mutex, NULL);
449	pthread_cond_init(&queue->empty, NULL);
450	pthread_cond_init(&queue->full, NULL);
451
452	return queue;
453
454failed:
455	BAD_ERROR("Out of memory in queue_init\n");
456}
457
458
459void queue_put(struct queue *queue, void *data)
460{
461	int nextp;
462
463	pthread_mutex_lock(&queue->mutex);
464
465	while((nextp = (queue->writep + 1) % queue->size) == queue->readp)
466		pthread_cond_wait(&queue->full, &queue->mutex);
467
468	queue->data[queue->writep] = data;
469	queue->writep = nextp;
470	pthread_cond_signal(&queue->empty);
471	pthread_mutex_unlock(&queue->mutex);
472}
473
474
475void *queue_get(struct queue *queue)
476{
477	void *data;
478	pthread_mutex_lock(&queue->mutex);
479
480	while(queue->readp == queue->writep)
481		pthread_cond_wait(&queue->empty, &queue->mutex);
482
483	data = queue->data[queue->readp];
484	queue->readp = (queue->readp + 1) % queue->size;
485	pthread_cond_signal(&queue->full);
486	pthread_mutex_unlock(&queue->mutex);
487
488	return data;
489}
490
491
492/* Cache status struct.  Caches are used to keep
493  track of memory buffers passed between different threads */
494struct cache {
495	int	max_buffers;
496	int	count;
497	int	buffer_size;
498	pthread_mutex_t	mutex;
499	pthread_cond_t wait_for_free;
500	struct file_buffer *free_list;
501	struct file_buffer *hash_table[65536];
502};
503
504
505#define INSERT_LIST(NAME, TYPE) \
506void insert_##NAME##_list(TYPE **list, TYPE *entry) { \
507	if(*list) { \
508		entry->NAME##_next = *list; \
509		entry->NAME##_prev = (*list)->NAME##_prev; \
510		(*list)->NAME##_prev->NAME##_next = entry; \
511		(*list)->NAME##_prev = entry; \
512	} else { \
513		*list = entry; \
514		entry->NAME##_prev = entry->NAME##_next = entry; \
515	} \
516}
517
518
519#define REMOVE_LIST(NAME, TYPE) \
520void remove_##NAME##_list(TYPE **list, TYPE *entry) { \
521	if(entry->NAME##_prev == entry && entry->NAME##_next == entry) { \
522		/* only this entry in the list */ \
523		*list = NULL; \
524	} else if(entry->NAME##_prev != NULL && entry->NAME##_next != NULL) { \
525		/* more than one entry in the list */ \
526		entry->NAME##_next->NAME##_prev = entry->NAME##_prev; \
527		entry->NAME##_prev->NAME##_next = entry->NAME##_next; \
528		if(*list == entry) \
529			*list = entry->NAME##_next; \
530	} \
531	entry->NAME##_prev = entry->NAME##_next = NULL; \
532}
533
534
535#define CALCULATE_HASH(start)	(start & 0xffff) \
536
537
538/* Called with the cache mutex held */
539void insert_hash_table(struct cache *cache, struct file_buffer *entry)
540{
541	int hash = CALCULATE_HASH(entry->index);
542
543	entry->hash_next = cache->hash_table[hash];
544	cache->hash_table[hash] = entry;
545	entry->hash_prev = NULL;
546	if(entry->hash_next)
547		entry->hash_next->hash_prev = entry;
548}
549
550
551/* Called with the cache mutex held */
552void remove_hash_table(struct cache *cache, struct file_buffer *entry)
553{
554	if(entry->hash_prev)
555		entry->hash_prev->hash_next = entry->hash_next;
556	else
557		cache->hash_table[CALCULATE_HASH(entry->index)] =
558			entry->hash_next;
559	if(entry->hash_next)
560		entry->hash_next->hash_prev = entry->hash_prev;
561
562	entry->hash_prev = entry->hash_next = NULL;
563}
564
565
566/* Called with the cache mutex held */
567INSERT_LIST(free, struct file_buffer)
568
569/* Called with the cache mutex held */
570REMOVE_LIST(free, struct file_buffer)
571
572
573struct cache *cache_init(int buffer_size, int max_buffers)
574{
575	struct cache *cache = malloc(sizeof(struct cache));
576
577	if(cache == NULL)
578		BAD_ERROR("Out of memory in cache_init\n");
579
580	cache->max_buffers = max_buffers;
581	cache->buffer_size = buffer_size;
582	cache->count = 0;
583	cache->free_list = NULL;
584	memset(cache->hash_table, 0, sizeof(struct file_buffer *) * 65536);
585	pthread_mutex_init(&cache->mutex, NULL);
586	pthread_cond_init(&cache->wait_for_free, NULL);
587
588	return cache;
589}
590
591
592struct file_buffer *cache_lookup(struct cache *cache, long long index)
593{
594	/* Lookup block in the cache, if found return with usage count
595 	 * incremented, if not found return NULL */
596	int hash = CALCULATE_HASH(index);
597	struct file_buffer *entry;
598
599	pthread_mutex_lock(&cache->mutex);
600
601	for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
602		if(entry->index == index)
603			break;
604
605	if(entry) {
606		/* found the block in the cache, increment used count and
607 		 * if necessary remove from free list so it won't disappear
608 		 */
609		entry->used ++;
610		remove_free_list(&cache->free_list, entry);
611	}
612
613	pthread_mutex_unlock(&cache->mutex);
614
615	return entry;
616}
617
618
619struct file_buffer *cache_get(struct cache *cache, long long index, int keep)
620{
621	/* Get a free block out of the cache indexed on index. */
622	struct file_buffer *entry;
623
624	pthread_mutex_lock(&cache->mutex);
625
626	while(1) {
627		/* first try to get a block from the free list */
628		if(first_freelist && cache->free_list) {
629			/* a block on the free_list is a "keep" block */
630			entry = cache->free_list;
631			remove_free_list(&cache->free_list, entry);
632			remove_hash_table(cache, entry);
633			break;
634		} else if(cache->count < cache->max_buffers) {
635			/* next try to allocate new block */
636			entry = malloc(sizeof(struct file_buffer) +
637				cache->buffer_size);
638			if(entry == NULL)
639				goto failed;
640			entry->cache = cache;
641			entry->free_prev = entry->free_next = NULL;
642			cache->count ++;
643			break;
644		} else if(!first_freelist && cache->free_list) {
645			/* a block on the free_list is a "keep" block */
646			entry = cache->free_list;
647			remove_free_list(&cache->free_list, entry);
648			remove_hash_table(cache, entry);
649			break;
650		} else
651			/* wait for a block */
652			pthread_cond_wait(&cache->wait_for_free, &cache->mutex);
653	}
654
655	/* initialise block and if a keep block insert into the hash table */
656	entry->used = 1;
657	entry->error = FALSE;
658	entry->keep = keep;
659	if(keep) {
660		entry->index = index;
661		insert_hash_table(cache, entry);
662	}
663	pthread_mutex_unlock(&cache->mutex);
664
665	return entry;
666
667failed:
668	pthread_mutex_unlock(&cache->mutex);
669	BAD_ERROR("Out of memory in cache_get\n");
670}
671
672
673void cache_rehash(struct file_buffer *entry, long long index)
674{
675	struct cache *cache = entry->cache;
676
677	pthread_mutex_lock(&cache->mutex);
678	if(entry->keep)
679		remove_hash_table(cache, entry);
680	entry->keep = TRUE;
681	entry->index = index;
682	insert_hash_table(cache, entry);
683	pthread_mutex_unlock(&cache->mutex);
684}
685
686
687void cache_block_put(struct file_buffer *entry)
688{
689	struct cache *cache;
690
691	/* finished with this cache entry, once the usage count reaches zero it
692 	 * can be reused and if a keep block put onto the free list.  As keep
693 	 * blocks remain accessible via the hash table they can be found
694 	 * getting a new lease of life before they are reused. */
695
696	if(entry == NULL)
697		return;
698
699	cache = entry->cache;
700
701	pthread_mutex_lock(&cache->mutex);
702
703	entry->used --;
704	if(entry->used == 0) {
705		if(entry->keep)
706			insert_free_list(&cache->free_list, entry);
707		else {
708			free(entry);
709			cache->count --;
710		}
711
712		/* One or more threads may be waiting on this block */
713		pthread_cond_signal(&cache->wait_for_free);
714	}
715
716	pthread_mutex_unlock(&cache->mutex);
717}
718
719
720#define MKINODE(A)	((squashfs_inode)(((squashfs_inode) inode_bytes << 16) \
721			+ (((char *)A) - data_cache)))
722
723
724inline void inc_progress_bar()
725{
726	cur_uncompressed ++;
727}
728
729
730inline void update_progress_bar()
731{
732	pthread_mutex_lock(&progress_mutex);
733	pthread_cond_signal(&progress_wait);
734	pthread_mutex_unlock(&progress_mutex);
735}
736
737
738inline void waitforthread(int i)
739{
740	TRACE("Waiting for thread %d\n", i);
741	while(thread[i] != 0)
742		sched_yield();
743}
744
745
746void restorefs()
747{
748	int i;
749
750	if(thread == NULL || thread[0] == 0)
751		return;
752
753	if(restoring++)
754		/*
755		 * Recursive failure when trying to restore filesystem!
756		 * Nothing to do except to exit, otherwise we'll just appear
757		 * to hang.  The user should be able to restore from the
758		 * recovery file (which is why it was added, in case of
759		 * catastrophic failure in Mksquashfs)
760		 */
761		exit(1);
762
763	ERROR("Exiting - restoring original filesystem!\n\n");
764
765	for(i = 0; i < 2 + processors * 2; i++)
766		if(thread[i])
767			pthread_kill(thread[i], SIGUSR1);
768	for(i = 0; i < 2 + processors * 2; i++)
769		waitforthread(i);
770	TRACE("All threads in signal handler\n");
771	bytes = sbytes;
772	memcpy(data_cache, sdata_cache, cache_bytes = scache_bytes);
773	memcpy(directory_data_cache, sdirectory_data_cache,
774		sdirectory_cache_bytes);
775	directory_cache_bytes = sdirectory_cache_bytes;
776	inode_bytes = sinode_bytes;
777	directory_bytes = sdirectory_bytes;
778 	memcpy(directory_table + directory_bytes, sdirectory_compressed,
779		sdirectory_compressed_bytes);
780 	directory_bytes += sdirectory_compressed_bytes;
781	total_bytes = stotal_bytes;
782	total_inode_bytes = stotal_inode_bytes;
783	total_directory_bytes = stotal_directory_bytes;
784	inode_count = sinode_count;
785	file_count = sfile_count;
786	sym_count = ssym_count;
787	dev_count = sdev_count;
788	dir_count = sdir_count;
789	fifo_count = sfifo_count;
790	sock_count = ssock_count;
791	dup_files = sdup_files;
792	fragments = sfragments;
793	fragment_size = 0;
794	id_count = sid_count;
795	restore_xattrs();
796	longjmp(env, 1);
797}
798
799
800void sighandler()
801{
802	if(++interrupted > 2)
803		return;
804	if(interrupted == 2)
805		restorefs();
806	else {
807		ERROR("Interrupting will restore original filesystem!\n");
808		ERROR("Interrupt again to quit\n");
809	}
810}
811
812
813void sighandler2()
814{
815	EXIT_MKSQUASHFS();
816}
817
818
819void sigusr1_handler()
820{
821	int i;
822	sigset_t sigmask;
823	pthread_t thread_id = pthread_self();
824
825	for(i = 0; i < (2 + processors * 2) && thread[i] != thread_id; i++);
826	thread[i] = (pthread_t) 0;
827
828	TRACE("Thread %d(%p) in sigusr1_handler\n", i, &thread_id);
829
830	sigemptyset(&sigmask);
831	sigaddset(&sigmask, SIGINT);
832	sigaddset(&sigmask, SIGQUIT);
833	sigaddset(&sigmask, SIGUSR1);
834	while(1) {
835		sigsuspend(&sigmask);
836		TRACE("After wait in sigusr1_handler :(\n");
837	}
838}
839
840
841void sigwinch_handler()
842{
843	struct winsize winsize;
844
845	if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
846		if(isatty(STDOUT_FILENO))
847			printf("TIOCGWINSZ ioctl failed, defaulting to 80 "
848				"columns\n");
849		columns = 80;
850	} else
851		columns = winsize.ws_col;
852}
853
854
855void sigalrm_handler()
856{
857	rotate = (rotate + 1) % 4;
858}
859
860
861int mangle2(void *strm, char *d, char *s, int size,
862	int block_size, int uncompressed, int data_block)
863{
864	int error, c_byte = 0;
865
866	if(!uncompressed) {
867		c_byte = compressor_compress(comp, strm, d, s, size, block_size,
868			 &error);
869		if(c_byte == -1)
870			BAD_ERROR("mangle2:: %s compress failed with error "
871				"code %d\n", comp->name, error);
872	}
873
874	if(c_byte == 0 || c_byte >= size) {
875		memcpy(d, s, size);
876		return size | (data_block ? SQUASHFS_COMPRESSED_BIT_BLOCK :
877			SQUASHFS_COMPRESSED_BIT);
878	}
879
880	return c_byte;
881}
882
883
884int mangle(char *d, char *s, int size, int block_size,
885	int uncompressed, int data_block)
886{
887	return mangle2(stream, d, s, size, block_size, uncompressed,
888		data_block);
889}
890
891
892void *get_inode(int req_size)
893{
894	int data_space;
895	unsigned short c_byte;
896
897	while(cache_bytes >= SQUASHFS_METADATA_SIZE) {
898		if((inode_size - inode_bytes) <
899				((SQUASHFS_METADATA_SIZE << 1)) + 2) {
900			void *it = realloc(inode_table, inode_size +
901				(SQUASHFS_METADATA_SIZE << 1) + 2);
902			if(it == NULL) {
903				goto failed;
904			}
905			inode_table = it;
906			inode_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
907		}
908
909		c_byte = mangle(inode_table + inode_bytes + BLOCK_OFFSET,
910			data_cache, SQUASHFS_METADATA_SIZE,
911			SQUASHFS_METADATA_SIZE, noI, 0);
912		TRACE("Inode block @ 0x%x, size %d\n", inode_bytes, c_byte);
913		SQUASHFS_SWAP_SHORTS(&c_byte, inode_table + inode_bytes, 1);
914		inode_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + BLOCK_OFFSET;
915		total_inode_bytes += SQUASHFS_METADATA_SIZE + BLOCK_OFFSET;
916		memmove(data_cache, data_cache + SQUASHFS_METADATA_SIZE,
917			cache_bytes - SQUASHFS_METADATA_SIZE);
918		cache_bytes -= SQUASHFS_METADATA_SIZE;
919	}
920
921	data_space = (cache_size - cache_bytes);
922	if(data_space < req_size) {
923			int realloc_size = cache_size == 0 ?
924				((req_size + SQUASHFS_METADATA_SIZE) &
925				~(SQUASHFS_METADATA_SIZE - 1)) : req_size -
926				data_space;
927
928			void *dc = realloc(data_cache, cache_size +
929				realloc_size);
930			if(dc == NULL) {
931				goto failed;
932			}
933			cache_size += realloc_size;
934			data_cache = dc;
935	}
936
937	cache_bytes += req_size;
938
939	return data_cache + cache_bytes - req_size;
940
941failed:
942	BAD_ERROR("Out of memory in inode table reallocation!\n");
943}
944
945
946int read_bytes(int fd, void *buff, int bytes)
947{
948	int res, count;
949
950	for(count = 0; count < bytes; count += res) {
951		res = read(fd, buff + count, bytes - count);
952		if(res < 1) {
953			if(res == 0)
954				goto bytes_read;
955			else if(errno != EINTR) {
956				ERROR("Read failed because %s\n",
957						strerror(errno));
958				return -1;
959			} else
960				res = 0;
961		}
962	}
963
964bytes_read:
965	return count;
966}
967
968
969int read_fs_bytes(int fd, long long byte, int bytes, void *buff)
970{
971	off_t off = byte;
972
973	TRACE("read_fs_bytes: reading from position 0x%llx, bytes %d\n",
974		byte, bytes);
975
976	pthread_mutex_lock(&pos_mutex);
977	if(lseek(fd, off, SEEK_SET) == -1) {
978		ERROR("Lseek on destination failed because %s\n",
979			strerror(errno));
980		goto failed;
981	}
982
983	if(read_bytes(fd, buff, bytes) < bytes) {
984		ERROR("Read on destination failed\n");
985		goto failed;
986	}
987
988	pthread_mutex_unlock(&pos_mutex);
989	return 1;
990
991failed:
992	pthread_mutex_unlock(&pos_mutex);
993	return 0;
994}
995
996
997int write_bytes(int fd, void *buff, int bytes)
998{
999	int res, count;
1000
1001	for(count = 0; count < bytes; count += res) {
1002		res = write(fd, buff + count, bytes - count);
1003		if(res == -1) {
1004			if(errno != EINTR) {
1005				ERROR("Write failed because %s\n",
1006						strerror(errno));
1007				return -1;
1008			}
1009			res = 0;
1010		}
1011	}
1012
1013	return 0;
1014}
1015
1016
1017void write_destination(int fd, long long byte, int bytes, void *buff)
1018{
1019	off_t off = byte;
1020
1021	if(!restoring)
1022		pthread_mutex_lock(&pos_mutex);
1023
1024	if(lseek(fd, off, SEEK_SET) == -1)
1025		BAD_ERROR("Lseek on destination failed because %s\n",
1026			strerror(errno));
1027
1028	if(write_bytes(fd, buff, bytes) == -1)
1029		BAD_ERROR("Write on destination failed\n");
1030
1031	if(!restoring)
1032		pthread_mutex_unlock(&pos_mutex);
1033}
1034
1035
1036long long write_inodes()
1037{
1038	unsigned short c_byte;
1039	int avail_bytes;
1040	char *datap = data_cache;
1041	long long start_bytes = bytes;
1042
1043	while(cache_bytes) {
1044		if(inode_size - inode_bytes <
1045				((SQUASHFS_METADATA_SIZE << 1) + 2)) {
1046			void *it = realloc(inode_table, inode_size +
1047				((SQUASHFS_METADATA_SIZE << 1) + 2));
1048			if(it == NULL) {
1049				BAD_ERROR("Out of memory in inode table "
1050					"reallocation!\n");
1051			}
1052			inode_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
1053			inode_table = it;
1054		}
1055		avail_bytes = cache_bytes > SQUASHFS_METADATA_SIZE ?
1056			SQUASHFS_METADATA_SIZE : cache_bytes;
1057		c_byte = mangle(inode_table + inode_bytes + BLOCK_OFFSET, datap,
1058			avail_bytes, SQUASHFS_METADATA_SIZE, noI, 0);
1059		TRACE("Inode block @ 0x%x, size %d\n", inode_bytes, c_byte);
1060		SQUASHFS_SWAP_SHORTS(&c_byte, inode_table + inode_bytes, 1);
1061		inode_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + BLOCK_OFFSET;
1062		total_inode_bytes += avail_bytes + BLOCK_OFFSET;
1063		datap += avail_bytes;
1064		cache_bytes -= avail_bytes;
1065	}
1066
1067	write_destination(fd, bytes, inode_bytes,  inode_table);
1068	bytes += inode_bytes;
1069
1070	return start_bytes;
1071}
1072
1073
1074long long write_directories()
1075{
1076	unsigned short c_byte;
1077	int avail_bytes;
1078	char *directoryp = directory_data_cache;
1079	long long start_bytes = bytes;
1080
1081	while(directory_cache_bytes) {
1082		if(directory_size - directory_bytes <
1083				((SQUASHFS_METADATA_SIZE << 1) + 2)) {
1084			void *dt = realloc(directory_table,
1085				directory_size + ((SQUASHFS_METADATA_SIZE << 1)
1086				+ 2));
1087			if(dt == NULL) {
1088				BAD_ERROR("Out of memory in directory table "
1089					"reallocation!\n");
1090			}
1091			directory_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
1092			directory_table = dt;
1093		}
1094		avail_bytes = directory_cache_bytes > SQUASHFS_METADATA_SIZE ?
1095			SQUASHFS_METADATA_SIZE : directory_cache_bytes;
1096		c_byte = mangle(directory_table + directory_bytes +
1097			BLOCK_OFFSET, directoryp, avail_bytes,
1098			SQUASHFS_METADATA_SIZE, noI, 0);
1099		TRACE("Directory block @ 0x%x, size %d\n", directory_bytes,
1100			c_byte);
1101		SQUASHFS_SWAP_SHORTS(&c_byte,
1102			directory_table + directory_bytes, 1);
1103		directory_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) +
1104			BLOCK_OFFSET;
1105		total_directory_bytes += avail_bytes + BLOCK_OFFSET;
1106		directoryp += avail_bytes;
1107		directory_cache_bytes -= avail_bytes;
1108	}
1109	write_destination(fd, bytes, directory_bytes, directory_table);
1110	bytes += directory_bytes;
1111
1112	return start_bytes;
1113}
1114
1115
1116long long write_id_table()
1117{
1118	unsigned int id_bytes = SQUASHFS_ID_BYTES(id_count);
1119	unsigned int p[id_count];
1120	int i;
1121
1122	TRACE("write_id_table: ids %d, id_bytes %d\n", id_count, id_bytes);
1123	for(i = 0; i < id_count; i++) {
1124		TRACE("write_id_table: id index %d, id %d", i, id_table[i]->id);
1125		SQUASHFS_SWAP_INTS(&id_table[i]->id, p + i, 1);
1126	}
1127
1128	return generic_write_table(id_bytes, p, 0, NULL, noI);
1129}
1130
1131
1132struct id *get_id(unsigned int id)
1133{
1134	int hash = ID_HASH(id);
1135	struct id *entry = id_hash_table[hash];
1136
1137	for(; entry; entry = entry->next)
1138		if(entry->id == id)
1139			break;
1140
1141	return entry;
1142}
1143
1144
1145struct id *create_id(unsigned int id)
1146{
1147	int hash = ID_HASH(id);
1148	struct id *entry = malloc(sizeof(struct id));
1149	if(entry == NULL)
1150		BAD_ERROR("Out of memory in create_id\n");
1151	entry->id = id;
1152	entry->index = id_count ++;
1153	entry->flags = 0;
1154	entry->next = id_hash_table[hash];
1155	id_hash_table[hash] = entry;
1156	id_table[entry->index] = entry;
1157	return entry;
1158}
1159
1160
1161unsigned int get_uid(unsigned int uid)
1162{
1163	struct id *entry = get_id(uid);
1164
1165	if(entry == NULL) {
1166		if(id_count == SQUASHFS_IDS)
1167			BAD_ERROR("Out of uids!\n");
1168		entry = create_id(uid);
1169	}
1170
1171	if((entry->flags & ISA_UID) == 0) {
1172		entry->flags |= ISA_UID;
1173		uid_count ++;
1174	}
1175
1176	return entry->index;
1177}
1178
1179
1180unsigned int get_guid(unsigned int guid)
1181{
1182	struct id *entry = get_id(guid);
1183
1184	if(entry == NULL) {
1185		if(id_count == SQUASHFS_IDS)
1186			BAD_ERROR("Out of gids!\n");
1187		entry = create_id(guid);
1188	}
1189
1190	if((entry->flags & ISA_GID) == 0) {
1191		entry->flags |= ISA_GID;
1192		guid_count ++;
1193	}
1194
1195	return entry->index;
1196}
1197
1198
1199int create_inode(squashfs_inode *i_no, struct dir_info *dir_info,
1200	struct dir_ent *dir_ent, int type, long long byte_size,
1201	long long start_block, unsigned int offset, unsigned int *block_list,
1202	struct fragment *fragment, struct directory *dir_in, long long sparse)
1203{
1204	struct stat *buf = &dir_ent->inode->buf;
1205	union squashfs_inode_header inode_header;
1206	struct squashfs_base_inode_header *base = &inode_header.base;
1207	void *inode;
1208	char *filename = dir_ent->pathname;
1209	int nlink = dir_ent->inode->nlink;
1210	int inode_number = type == SQUASHFS_DIR_TYPE ?
1211		dir_ent->inode->inode_number :
1212		dir_ent->inode->inode_number + dir_inode_no;
1213	int xattr = read_xattrs(dir_ent);
1214
1215	switch(type) {
1216	case SQUASHFS_FILE_TYPE:
1217		if(dir_ent->inode->nlink > 1 ||
1218				byte_size >= (1LL << 32) ||
1219				start_block >= (1LL << 32) ||
1220				sparse || IS_XATTR(xattr))
1221			type = SQUASHFS_LREG_TYPE;
1222		break;
1223	case SQUASHFS_DIR_TYPE:
1224		if(dir_info->dir_is_ldir || IS_XATTR(xattr))
1225			type = SQUASHFS_LDIR_TYPE;
1226		break;
1227	case SQUASHFS_SYMLINK_TYPE:
1228		if(IS_XATTR(xattr))
1229			type = SQUASHFS_LSYMLINK_TYPE;
1230		break;
1231	case SQUASHFS_BLKDEV_TYPE:
1232		if(IS_XATTR(xattr))
1233			type = SQUASHFS_LBLKDEV_TYPE;
1234		break;
1235	case SQUASHFS_CHRDEV_TYPE:
1236		if(IS_XATTR(xattr))
1237			type = SQUASHFS_LCHRDEV_TYPE;
1238		break;
1239	case SQUASHFS_FIFO_TYPE:
1240		if(IS_XATTR(xattr))
1241			type = SQUASHFS_LFIFO_TYPE;
1242		break;
1243	case SQUASHFS_SOCKET_TYPE:
1244		if(IS_XATTR(xattr))
1245			type = SQUASHFS_LSOCKET_TYPE;
1246		break;
1247	}
1248
1249	base->mode = SQUASHFS_MODE(buf->st_mode);
1250	base->uid = get_uid((unsigned int) global_uid == -1 ?
1251		buf->st_uid : global_uid);
1252	base->inode_type = type;
1253	base->guid = get_guid((unsigned int) global_gid == -1 ?
1254		buf->st_gid : global_gid);
1255	base->mtime = buf->st_mtime;
1256	base->inode_number = inode_number;
1257
1258	if(type == SQUASHFS_FILE_TYPE) {
1259		int i;
1260		struct squashfs_reg_inode_header *reg = &inode_header.reg;
1261		size_t off = offsetof(struct squashfs_reg_inode_header, block_list);
1262
1263		inode = get_inode(sizeof(*reg) + offset * sizeof(unsigned int));
1264		reg->file_size = byte_size;
1265		reg->start_block = start_block;
1266		reg->fragment = fragment->index;
1267		reg->offset = fragment->offset;
1268		SQUASHFS_SWAP_REG_INODE_HEADER(reg, inode);
1269		SQUASHFS_SWAP_INTS(block_list, inode + off, offset);
1270		TRACE("File inode, file_size %lld, start_block 0x%llx, blocks "
1271			"%d, fragment %d, offset %d, size %d\n", byte_size,
1272			start_block, offset, fragment->index, fragment->offset,
1273			fragment->size);
1274		for(i = 0; i < offset; i++)
1275			TRACE("Block %d, size %d\n", i, block_list[i]);
1276	}
1277	else if(type == SQUASHFS_LREG_TYPE) {
1278		int i;
1279		struct squashfs_lreg_inode_header *reg = &inode_header.lreg;
1280		size_t off = offsetof(struct squashfs_lreg_inode_header, block_list);
1281
1282		inode = get_inode(sizeof(*reg) + offset * sizeof(unsigned int));
1283		reg->nlink = nlink;
1284		reg->file_size = byte_size;
1285		reg->start_block = start_block;
1286		reg->fragment = fragment->index;
1287		reg->offset = fragment->offset;
1288		if(sparse && sparse >= byte_size)
1289			sparse = byte_size - 1;
1290		reg->sparse = sparse;
1291		reg->xattr = xattr;
1292		SQUASHFS_SWAP_LREG_INODE_HEADER(reg, inode);
1293		SQUASHFS_SWAP_INTS(block_list, inode + off, offset);
1294		TRACE("Long file inode, file_size %lld, start_block 0x%llx, "
1295			"blocks %d, fragment %d, offset %d, size %d, nlink %d"
1296			"\n", byte_size, start_block, offset, fragment->index,
1297			fragment->offset, fragment->size, nlink);
1298		for(i = 0; i < offset; i++)
1299			TRACE("Block %d, size %d\n", i, block_list[i]);
1300	}
1301	else if(type == SQUASHFS_LDIR_TYPE) {
1302		int i;
1303		unsigned char *p;
1304		struct squashfs_ldir_inode_header *dir = &inode_header.ldir;
1305		struct cached_dir_index *index = dir_in->index;
1306		unsigned int i_count = dir_in->i_count;
1307		unsigned int i_size = dir_in->i_size;
1308
1309		if(byte_size >= 1 << 27)
1310			BAD_ERROR("directory greater than 2^27-1 bytes!\n");
1311
1312		inode = get_inode(sizeof(*dir) + i_size);
1313		dir->inode_type = SQUASHFS_LDIR_TYPE;
1314		dir->nlink = dir_ent->dir->directory_count + 2;
1315		dir->file_size = byte_size;
1316		dir->offset = offset;
1317		dir->start_block = start_block;
1318		dir->i_count = i_count;
1319		dir->parent_inode = dir_ent->our_dir ?
1320			dir_ent->our_dir->dir_ent->inode->inode_number :
1321			dir_inode_no + inode_no;
1322		dir->xattr = xattr;
1323
1324		SQUASHFS_SWAP_LDIR_INODE_HEADER(dir, inode);
1325		p = inode + offsetof(struct squashfs_ldir_inode_header, index);
1326		for(i = 0; i < i_count; i++) {
1327			SQUASHFS_SWAP_DIR_INDEX(&index[i].index, p);
1328			p += offsetof(struct squashfs_dir_index, name);
1329			memcpy(p, index[i].name, index[i].index.size + 1);
1330			p += index[i].index.size + 1;
1331		}
1332		TRACE("Long directory inode, file_size %lld, start_block "
1333			"0x%llx, offset 0x%x, nlink %d\n", byte_size,
1334			start_block, offset, dir_ent->dir->directory_count + 2);
1335	}
1336	else if(type == SQUASHFS_DIR_TYPE) {
1337		struct squashfs_dir_inode_header *dir = &inode_header.dir;
1338
1339		inode = get_inode(sizeof(*dir));
1340		dir->nlink = dir_ent->dir->directory_count + 2;
1341		dir->file_size = byte_size;
1342		dir->offset = offset;
1343		dir->start_block = start_block;
1344		dir->parent_inode = dir_ent->our_dir ?
1345			dir_ent->our_dir->dir_ent->inode->inode_number :
1346			dir_inode_no + inode_no;
1347		SQUASHFS_SWAP_DIR_INODE_HEADER(dir, inode);
1348		TRACE("Directory inode, file_size %lld, start_block 0x%llx, "
1349			"offset 0x%x, nlink %d\n", byte_size, start_block,
1350			offset, dir_ent->dir->directory_count + 2);
1351	}
1352	else if(type == SQUASHFS_CHRDEV_TYPE || type == SQUASHFS_BLKDEV_TYPE) {
1353		struct squashfs_dev_inode_header *dev = &inode_header.dev;
1354		unsigned int major = major(buf->st_rdev);
1355		unsigned int minor = minor(buf->st_rdev);
1356
1357		if(major > 0xfff) {
1358			ERROR("Major %d out of range in device node %s, "
1359				"truncating to %d\n", major, filename,
1360				major & 0xfff);
1361			major &= 0xfff;
1362		}
1363		if(minor > 0xfffff) {
1364			ERROR("Minor %d out of range in device node %s, "
1365				"truncating to %d\n", minor, filename,
1366				minor & 0xfffff);
1367			minor &= 0xfffff;
1368		}
1369		inode = get_inode(sizeof(*dev));
1370		dev->nlink = nlink;
1371		dev->rdev = (major << 8) | (minor & 0xff) |
1372				((minor & ~0xff) << 12);
1373		SQUASHFS_SWAP_DEV_INODE_HEADER(dev, inode);
1374		TRACE("Device inode, rdev 0x%x, nlink %d\n", dev->rdev, nlink);
1375	}
1376	else if(type == SQUASHFS_LCHRDEV_TYPE || type == SQUASHFS_LBLKDEV_TYPE) {
1377		struct squashfs_ldev_inode_header *dev = &inode_header.ldev;
1378		unsigned int major = major(buf->st_rdev);
1379		unsigned int minor = minor(buf->st_rdev);
1380
1381		if(major > 0xfff) {
1382			ERROR("Major %d out of range in device node %s, "
1383				"truncating to %d\n", major, filename,
1384				major & 0xfff);
1385			major &= 0xfff;
1386		}
1387		if(minor > 0xfffff) {
1388			ERROR("Minor %d out of range in device node %s, "
1389				"truncating to %d\n", minor, filename,
1390				minor & 0xfffff);
1391			minor &= 0xfffff;
1392		}
1393		inode = get_inode(sizeof(*dev));
1394		dev->nlink = nlink;
1395		dev->rdev = (major << 8) | (minor & 0xff) |
1396				((minor & ~0xff) << 12);
1397		dev->xattr = xattr;
1398		SQUASHFS_SWAP_LDEV_INODE_HEADER(dev, inode);
1399		TRACE("Device inode, rdev 0x%x, nlink %d\n", dev->rdev, nlink);
1400	}
1401	else if(type == SQUASHFS_SYMLINK_TYPE) {
1402		struct squashfs_symlink_inode_header *symlink = &inode_header.symlink;
1403		int byte;
1404		char buff[65536];
1405		size_t off = offsetof(struct squashfs_symlink_inode_header, symlink);
1406
1407		byte = readlink(filename, buff, 65536);
1408		if(byte == -1) {
1409			ERROR("Failed to read symlink %s, creating empty "
1410				"symlink\n", filename);
1411			byte = 0;
1412		}
1413
1414		if(byte == 65536) {
1415			ERROR("Symlink %s is greater than 65536 bytes! "
1416				"Creating empty symlink\n", filename);
1417			byte = 0;
1418		}
1419
1420		inode = get_inode(sizeof(*symlink) + byte);
1421		symlink->nlink = nlink;
1422		symlink->symlink_size = byte;
1423		SQUASHFS_SWAP_SYMLINK_INODE_HEADER(symlink, inode);
1424		strncpy(inode + off, buff, byte);
1425		TRACE("Symbolic link inode, symlink_size %d, nlink %d\n", byte,
1426			nlink);
1427	}
1428	else if(type == SQUASHFS_LSYMLINK_TYPE) {
1429		struct squashfs_symlink_inode_header *symlink = &inode_header.symlink;
1430		int byte;
1431		char buff[65536];
1432		size_t off = offsetof(struct squashfs_symlink_inode_header, symlink);
1433
1434		byte = readlink(filename, buff, 65536);
1435		if(byte == -1) {
1436			ERROR("Failed to read symlink %s, creating empty "
1437				"symlink\n", filename);
1438			byte = 0;
1439		}
1440
1441		if(byte == 65536) {
1442			ERROR("Symlink %s is greater than 65536 bytes! "
1443				"Creating empty symlink\n", filename);
1444			byte = 0;
1445		}
1446
1447		inode = get_inode(sizeof(*symlink) + byte +
1448						sizeof(unsigned int));
1449		symlink->nlink = nlink;
1450		symlink->symlink_size = byte;
1451		SQUASHFS_SWAP_SYMLINK_INODE_HEADER(symlink, inode);
1452		strncpy(inode + off, buff, byte);
1453		SQUASHFS_SWAP_INTS(&xattr, inode + off + byte, 1);
1454		TRACE("Symbolic link inode, symlink_size %d, nlink %d\n", byte,
1455			nlink);
1456	}
1457	else if(type == SQUASHFS_FIFO_TYPE || type == SQUASHFS_SOCKET_TYPE) {
1458		struct squashfs_ipc_inode_header *ipc = &inode_header.ipc;
1459
1460		inode = get_inode(sizeof(*ipc));
1461		ipc->nlink = nlink;
1462		SQUASHFS_SWAP_IPC_INODE_HEADER(ipc, inode);
1463		TRACE("ipc inode, type %s, nlink %d\n", type ==
1464			SQUASHFS_FIFO_TYPE ? "fifo" : "socket", nlink);
1465	}
1466	else if(type == SQUASHFS_LFIFO_TYPE || type == SQUASHFS_LSOCKET_TYPE) {
1467		struct squashfs_lipc_inode_header *ipc = &inode_header.lipc;
1468
1469		inode = get_inode(sizeof(*ipc));
1470		ipc->nlink = nlink;
1471		ipc->xattr = xattr;
1472		SQUASHFS_SWAP_LIPC_INODE_HEADER(ipc, inode);
1473		TRACE("ipc inode, type %s, nlink %d\n", type ==
1474			SQUASHFS_FIFO_TYPE ? "fifo" : "socket", nlink);
1475	} else
1476		BAD_ERROR("Unrecognised inode %d in create_inode\n", type);
1477
1478	*i_no = MKINODE(inode);
1479	inode_count ++;
1480
1481	TRACE("Created inode 0x%llx, type %d, uid %d, guid %d\n", *i_no, type,
1482		base->uid, base->guid);
1483
1484	return TRUE;
1485}
1486
1487
1488void scan3_init_dir(struct directory *dir)
1489{
1490	dir->buff = malloc(SQUASHFS_METADATA_SIZE);
1491	if(dir->buff == NULL) {
1492		BAD_ERROR("Out of memory allocating directory buffer\n");
1493	}
1494
1495	dir->size = SQUASHFS_METADATA_SIZE;
1496	dir->p = dir->index_count_p = dir->buff;
1497	dir->entry_count = 256;
1498	dir->entry_count_p = NULL;
1499	dir->index = NULL;
1500	dir->i_count = dir->i_size = 0;
1501}
1502
1503
1504void add_dir(squashfs_inode inode, unsigned int inode_number, char *name,
1505	int type, struct directory *dir)
1506{
1507	unsigned char *buff;
1508	struct squashfs_dir_entry idir;
1509	unsigned int start_block = inode >> 16;
1510	unsigned int offset = inode & 0xffff;
1511	unsigned int size = strlen(name);
1512	size_t name_off = offsetof(struct squashfs_dir_entry, name);
1513
1514	if(size > SQUASHFS_NAME_LEN) {
1515		size = SQUASHFS_NAME_LEN;
1516		ERROR("Filename is greater than %d characters, truncating! ..."
1517			"\n", SQUASHFS_NAME_LEN);
1518	}
1519
1520	if(dir->p + sizeof(struct squashfs_dir_entry) + size +
1521			sizeof(struct squashfs_dir_header)
1522			>= dir->buff + dir->size) {
1523		buff = realloc(dir->buff, dir->size += SQUASHFS_METADATA_SIZE);
1524		if(buff == NULL)  {
1525			BAD_ERROR("Out of memory reallocating directory buffer"
1526				"\n");
1527		}
1528
1529		dir->p = (dir->p - dir->buff) + buff;
1530		if(dir->entry_count_p)
1531			dir->entry_count_p = (dir->entry_count_p - dir->buff +
1532			buff);
1533		dir->index_count_p = dir->index_count_p - dir->buff + buff;
1534		dir->buff = buff;
1535	}
1536
1537	if(dir->entry_count == 256 || start_block != dir->start_block ||
1538			((dir->entry_count_p != NULL) &&
1539			((dir->p + sizeof(struct squashfs_dir_entry) + size -
1540			dir->index_count_p) > SQUASHFS_METADATA_SIZE)) ||
1541			((long long) inode_number - dir->inode_number) > 32767
1542			|| ((long long) inode_number - dir->inode_number)
1543			< -32768) {
1544		if(dir->entry_count_p) {
1545			struct squashfs_dir_header dir_header;
1546
1547			if((dir->p + sizeof(struct squashfs_dir_entry) + size -
1548					dir->index_count_p) >
1549					SQUASHFS_METADATA_SIZE) {
1550				if(dir->i_count % I_COUNT_SIZE == 0) {
1551					dir->index = realloc(dir->index,
1552						(dir->i_count + I_COUNT_SIZE) *
1553						sizeof(struct cached_dir_index));
1554					if(dir->index == NULL)
1555						BAD_ERROR("Out of memory in "
1556							"directory index table "
1557							"reallocation!\n");
1558				}
1559				dir->index[dir->i_count].index.index =
1560					dir->p - dir->buff;
1561				dir->index[dir->i_count].index.size = size - 1;
1562				dir->index[dir->i_count++].name = name;
1563				dir->i_size += sizeof(struct squashfs_dir_index)
1564					+ size;
1565				dir->index_count_p = dir->p;
1566			}
1567
1568			dir_header.count = dir->entry_count - 1;
1569			dir_header.start_block = dir->start_block;
1570			dir_header.inode_number = dir->inode_number;
1571			SQUASHFS_SWAP_DIR_HEADER(&dir_header,
1572				dir->entry_count_p);
1573
1574		}
1575
1576
1577		dir->entry_count_p = dir->p;
1578		dir->start_block = start_block;
1579		dir->entry_count = 0;
1580		dir->inode_number = inode_number;
1581		dir->p += sizeof(struct squashfs_dir_header);
1582	}
1583
1584	idir.offset = offset;
1585	idir.type = type;
1586	idir.size = size - 1;
1587	idir.inode_number = ((long long) inode_number - dir->inode_number);
1588	SQUASHFS_SWAP_DIR_ENTRY(&idir, dir->p);
1589	strncpy((char *) dir->p + name_off, name, size);
1590	dir->p += sizeof(struct squashfs_dir_entry) + size;
1591	dir->entry_count ++;
1592}
1593
1594
1595void write_dir(squashfs_inode *inode, struct dir_info *dir_info,
1596	struct directory *dir)
1597{
1598	unsigned int dir_size = dir->p - dir->buff;
1599	int data_space = directory_cache_size - directory_cache_bytes;
1600	unsigned int directory_block, directory_offset, i_count, index;
1601	unsigned short c_byte;
1602
1603	if(data_space < dir_size) {
1604		int realloc_size = directory_cache_size == 0 ?
1605			((dir_size + SQUASHFS_METADATA_SIZE) &
1606			~(SQUASHFS_METADATA_SIZE - 1)) : dir_size - data_space;
1607
1608		void *dc = realloc(directory_data_cache,
1609			directory_cache_size + realloc_size);
1610		if(dc == NULL) {
1611			goto failed;
1612		}
1613		directory_cache_size += realloc_size;
1614		directory_data_cache = dc;
1615	}
1616
1617	if(dir_size) {
1618		struct squashfs_dir_header dir_header;
1619
1620		dir_header.count = dir->entry_count - 1;
1621		dir_header.start_block = dir->start_block;
1622		dir_header.inode_number = dir->inode_number;
1623		SQUASHFS_SWAP_DIR_HEADER(&dir_header, dir->entry_count_p);
1624		memcpy(directory_data_cache + directory_cache_bytes, dir->buff,
1625			dir_size);
1626	}
1627	directory_offset = directory_cache_bytes;
1628	directory_block = directory_bytes;
1629	directory_cache_bytes += dir_size;
1630	i_count = 0;
1631	index = SQUASHFS_METADATA_SIZE - directory_offset;
1632
1633	while(1) {
1634		while(i_count < dir->i_count &&
1635				dir->index[i_count].index.index < index)
1636			dir->index[i_count++].index.start_block =
1637				directory_bytes;
1638		index += SQUASHFS_METADATA_SIZE;
1639
1640		if(directory_cache_bytes < SQUASHFS_METADATA_SIZE)
1641			break;
1642
1643		if((directory_size - directory_bytes) <
1644					((SQUASHFS_METADATA_SIZE << 1) + 2)) {
1645			void *dt = realloc(directory_table,
1646				directory_size + (SQUASHFS_METADATA_SIZE << 1)
1647				+ 2);
1648			if(dt == NULL) {
1649				goto failed;
1650			}
1651			directory_size += SQUASHFS_METADATA_SIZE << 1;
1652			directory_table = dt;
1653		}
1654
1655		c_byte = mangle(directory_table + directory_bytes +
1656				BLOCK_OFFSET, directory_data_cache,
1657				SQUASHFS_METADATA_SIZE, SQUASHFS_METADATA_SIZE,
1658				noI, 0);
1659		TRACE("Directory block @ 0x%x, size %d\n", directory_bytes,
1660			c_byte);
1661		SQUASHFS_SWAP_SHORTS(&c_byte,
1662			directory_table + directory_bytes, 1);
1663		directory_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) +
1664			BLOCK_OFFSET;
1665		total_directory_bytes += SQUASHFS_METADATA_SIZE + BLOCK_OFFSET;
1666		memmove(directory_data_cache, directory_data_cache +
1667			SQUASHFS_METADATA_SIZE, directory_cache_bytes -
1668			SQUASHFS_METADATA_SIZE);
1669		directory_cache_bytes -= SQUASHFS_METADATA_SIZE;
1670	}
1671
1672	create_inode(inode, dir_info, dir_info->dir_ent, SQUASHFS_DIR_TYPE,
1673		dir_size + 3, directory_block, directory_offset, NULL, NULL,
1674		dir, 0);
1675
1676#ifdef SQUASHFS_TRACE
1677	{
1678		unsigned char *dirp;
1679		int count;
1680
1681		TRACE("Directory contents of inode 0x%llx\n", *inode);
1682		dirp = dir->buff;
1683		while(dirp < dir->p) {
1684			char buffer[SQUASHFS_NAME_LEN + 1];
1685			struct squashfs_dir_entry idir, *idirp;
1686			struct squashfs_dir_header dirh;
1687			SQUASHFS_SWAP_DIR_HEADER((struct squashfs_dir_header *) dirp,
1688				&dirh);
1689			count = dirh.count + 1;
1690			dirp += sizeof(struct squashfs_dir_header);
1691
1692			TRACE("\tStart block 0x%x, count %d\n",
1693				dirh.start_block, count);
1694
1695			while(count--) {
1696				idirp = (struct squashfs_dir_entry *) dirp;
1697				SQUASHFS_SWAP_DIR_ENTRY(idirp, &idir);
1698				strncpy(buffer, idirp->name, idir.size + 1);
1699				buffer[idir.size + 1] = '\0';
1700				TRACE("\t\tname %s, inode offset 0x%x, type "
1701					"%d\n", buffer, idir.offset, idir.type);
1702				dirp += sizeof(struct squashfs_dir_entry) + idir.size +
1703					1;
1704			}
1705		}
1706	}
1707#endif
1708	dir_count ++;
1709
1710	return;
1711
1712failed:
1713	BAD_ERROR("Out of memory in directory table reallocation!\n");
1714}
1715
1716
1717struct file_buffer *get_fragment(struct fragment *fragment)
1718{
1719	struct squashfs_fragment_entry *disk_fragment;
1720	int res, size;
1721	long long start_block;
1722	struct file_buffer *buffer, *compressed_buffer;
1723
1724	if(fragment->index == SQUASHFS_INVALID_FRAG)
1725		return NULL;
1726
1727	buffer = cache_lookup(fragment_buffer, fragment->index);
1728	if(buffer)
1729		return buffer;
1730
1731	compressed_buffer = cache_lookup(writer_buffer, fragment->index +
1732		FRAG_INDEX);
1733
1734	buffer = cache_get(fragment_buffer, fragment->index, 1);
1735
1736	pthread_mutex_lock(&fragment_mutex);
1737	disk_fragment = &fragment_table[fragment->index];
1738	size = SQUASHFS_COMPRESSED_SIZE_BLOCK(disk_fragment->size);
1739	start_block = disk_fragment->start_block;
1740	pthread_mutex_unlock(&fragment_mutex);
1741
1742	if(SQUASHFS_COMPRESSED_BLOCK(disk_fragment->size)) {
1743		int error;
1744		char *data;
1745
1746		if(compressed_buffer)
1747			data = compressed_buffer->data;
1748		else
1749			data = read_from_disk(start_block, size);
1750
1751		res = compressor_uncompress(comp, buffer->data, data, size,
1752			block_size, &error);
1753		if(res == -1)
1754			BAD_ERROR("%s uncompress failed with error code %d\n",
1755				comp->name, error);
1756	} else if(compressed_buffer)
1757		memcpy(buffer->data, compressed_buffer->data, size);
1758	else {
1759		res = read_fs_bytes(fd, start_block, size, buffer->data);
1760		if(res == 0)
1761			EXIT_MKSQUASHFS();
1762	}
1763
1764	cache_block_put(compressed_buffer);
1765
1766	return buffer;
1767}
1768
1769
1770struct frag_locked {
1771	struct file_buffer *buffer;
1772	int c_byte;
1773	int fragment;
1774	struct frag_locked *fragment_prev;
1775	struct frag_locked *fragment_next;
1776};
1777
1778int fragments_locked = FALSE;
1779struct frag_locked *frag_locked_list = NULL;
1780
1781INSERT_LIST(fragment, struct frag_locked)
1782REMOVE_LIST(fragment, struct frag_locked)
1783
1784int lock_fragments()
1785{
1786	int count;
1787	pthread_mutex_lock(&fragment_mutex);
1788	fragments_locked = TRUE;
1789	count = fragments_outstanding;
1790	pthread_mutex_unlock(&fragment_mutex);
1791	return count;
1792}
1793
1794
1795void unlock_fragments()
1796{
1797	struct frag_locked *entry;
1798	int compressed_size;
1799
1800	pthread_mutex_lock(&fragment_mutex);
1801	while(frag_locked_list) {
1802		entry = frag_locked_list;
1803		remove_fragment_list(&frag_locked_list, entry);
1804		compressed_size = SQUASHFS_COMPRESSED_SIZE_BLOCK(entry->c_byte);
1805		fragment_table[entry->fragment].size = entry->c_byte;
1806		fragment_table[entry->fragment].start_block = bytes;
1807		entry->buffer->block = bytes;
1808		bytes += compressed_size;
1809		fragments_outstanding --;
1810		queue_put(to_writer, entry->buffer);
1811		TRACE("fragment_locked writing fragment %d, compressed size %d"
1812			"\n", entry->fragment, compressed_size);
1813		free(entry);
1814	}
1815	fragments_locked = FALSE;
1816	pthread_mutex_unlock(&fragment_mutex);
1817}
1818
1819
1820void add_pending_fragment(struct file_buffer *write_buffer, int c_byte,
1821	int fragment)
1822{
1823	struct frag_locked *entry = malloc(sizeof(struct frag_locked));
1824	if(entry == NULL)
1825		BAD_ERROR("Out of memory in add_pending fragment\n");
1826	entry->buffer = write_buffer;
1827	entry->c_byte = c_byte;
1828	entry->fragment = fragment;
1829	entry->fragment_prev = entry->fragment_next = NULL;
1830	pthread_mutex_lock(&fragment_mutex);
1831	insert_fragment_list(&frag_locked_list, entry);
1832	pthread_mutex_unlock(&fragment_mutex);
1833}
1834
1835
1836void write_fragment()
1837{
1838	if(fragment_size == 0)
1839		return;
1840
1841	pthread_mutex_lock(&fragment_mutex);
1842	if(fragments % FRAG_SIZE == 0) {
1843		void *ft = realloc(fragment_table, (fragments +
1844			FRAG_SIZE) * sizeof(struct squashfs_fragment_entry));
1845		if(ft == NULL) {
1846			pthread_mutex_unlock(&fragment_mutex);
1847			BAD_ERROR("Out of memory in fragment table\n");
1848		}
1849		fragment_table = ft;
1850	}
1851	fragment_data->size = fragment_size;
1852	fragment_data->block = fragments;
1853	fragment_table[fragments].unused = 0;
1854	fragments_outstanding ++;
1855	queue_put(to_frag, fragment_data);
1856	fragments ++;
1857	fragment_size = 0;
1858	pthread_mutex_unlock(&fragment_mutex);
1859}
1860
1861
1862static struct fragment empty_fragment = {SQUASHFS_INVALID_FRAG, 0, 0};
1863struct fragment *get_and_fill_fragment(struct file_buffer *file_buffer)
1864{
1865	struct fragment *ffrg;
1866
1867
1868	if(file_buffer == NULL || file_buffer->size == 0)
1869		return &empty_fragment;
1870
1871	if(fragment_size + file_buffer->size > block_size)
1872		write_fragment();
1873
1874	ffrg = malloc(sizeof(struct fragment));
1875	if(ffrg == NULL)
1876		BAD_ERROR("Out of memory in fragment block allocation!\n");
1877
1878	if(fragment_size == 0)
1879		fragment_data = cache_get(fragment_buffer, fragments, 1);
1880
1881	ffrg->index = fragments;
1882	ffrg->offset = fragment_size;
1883	ffrg->size = file_buffer->size;
1884	memcpy(fragment_data->data + fragment_size, file_buffer->data,
1885		file_buffer->size);
1886	fragment_size += file_buffer->size;
1887
1888	return ffrg;
1889}
1890
1891
1892long long generic_write_table(int length, void *buffer, int length2,
1893	void *buffer2, int uncompressed)
1894{
1895	int meta_blocks = (length + SQUASHFS_METADATA_SIZE - 1) /
1896		SQUASHFS_METADATA_SIZE;
1897	long long list[meta_blocks], start_bytes;
1898	int compressed_size, i;
1899	unsigned short c_byte;
1900	char cbuffer[(SQUASHFS_METADATA_SIZE << 2) + 2];
1901
1902#ifdef SQUASHFS_TRACE
1903	long long obytes = bytes;
1904	int olength = length;
1905#endif
1906
1907	for(i = 0; i < meta_blocks; i++) {
1908		int avail_bytes = length > SQUASHFS_METADATA_SIZE ?
1909			SQUASHFS_METADATA_SIZE : length;
1910		c_byte = mangle(cbuffer + BLOCK_OFFSET, buffer + i *
1911			SQUASHFS_METADATA_SIZE , avail_bytes,
1912			SQUASHFS_METADATA_SIZE, uncompressed, 0);
1913		SQUASHFS_SWAP_SHORTS(&c_byte, cbuffer, 1);
1914		list[i] = bytes;
1915		compressed_size = SQUASHFS_COMPRESSED_SIZE(c_byte) +
1916			BLOCK_OFFSET;
1917		TRACE("block %d @ 0x%llx, compressed size %d\n", i, bytes,
1918			compressed_size);
1919		write_destination(fd, bytes, compressed_size, cbuffer);
1920		bytes += compressed_size;
1921		total_bytes += avail_bytes;
1922		length -= avail_bytes;
1923	}
1924
1925	start_bytes = bytes;
1926	if(length2) {
1927		write_destination(fd, bytes, length2, buffer2);
1928		bytes += length2;
1929		total_bytes += length2;
1930	}
1931
1932	SQUASHFS_INSWAP_LONG_LONGS(list, meta_blocks);
1933	write_destination(fd, bytes, sizeof(list), list);
1934	bytes += sizeof(list);
1935	total_bytes += sizeof(list);
1936
1937	TRACE("generic_write_table: total uncompressed %d compressed %lld\n",
1938		olength, bytes - obytes);
1939
1940	return start_bytes;
1941}
1942
1943
1944long long write_fragment_table()
1945{
1946	unsigned int frag_bytes = SQUASHFS_FRAGMENT_BYTES(fragments);
1947	struct squashfs_fragment_entry p[fragments];
1948	int i;
1949
1950	TRACE("write_fragment_table: fragments %d, frag_bytes %d\n", fragments,
1951		frag_bytes);
1952	for(i = 0; i < fragments; i++) {
1953		TRACE("write_fragment_table: fragment %d, start_block 0x%llx, "
1954			"size %d\n", i, fragment_table[i].start_block,
1955			fragment_table[i].size);
1956		SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_table[i], p + i);
1957	}
1958
1959	return generic_write_table(frag_bytes, p, 0, NULL, noF);
1960}
1961
1962
1963char read_from_file_buffer[SQUASHFS_FILE_MAX_SIZE];
1964char *read_from_disk(long long start, unsigned int avail_bytes)
1965{
1966	int res;
1967
1968	res = read_fs_bytes(fd, start, avail_bytes, read_from_file_buffer);
1969	if(res == 0)
1970		EXIT_MKSQUASHFS();
1971
1972	return read_from_file_buffer;
1973}
1974
1975
1976char read_from_file_buffer2[SQUASHFS_FILE_MAX_SIZE];
1977char *read_from_disk2(long long start, unsigned int avail_bytes)
1978{
1979	int res;
1980
1981	res = read_fs_bytes(fd, start, avail_bytes, read_from_file_buffer2);
1982	if(res == 0)
1983		EXIT_MKSQUASHFS();
1984
1985	return read_from_file_buffer2;
1986}
1987
1988
1989/*
1990 * Compute 16 bit BSD checksum over the data
1991 */
1992unsigned short get_checksum(char *buff, int bytes, unsigned short chksum)
1993{
1994	unsigned char *b = (unsigned char *) buff;
1995
1996	while(bytes --) {
1997		chksum = (chksum & 1) ? (chksum >> 1) | 0x8000 : chksum >> 1;
1998		chksum += *b++;
1999	}
2000
2001	return chksum;
2002}
2003
2004
2005unsigned short get_checksum_disk(long long start, long long l,
2006	unsigned int *blocks)
2007{
2008	unsigned short chksum = 0;
2009	unsigned int bytes;
2010	struct file_buffer *write_buffer;
2011	int i;
2012
2013	for(i = 0; l; i++)  {
2014		bytes = SQUASHFS_COMPRESSED_SIZE_BLOCK(blocks[i]);
2015		if(bytes == 0) /* sparse block */
2016			continue;
2017		write_buffer = cache_lookup(writer_buffer, start);
2018		if(write_buffer) {
2019			chksum = get_checksum(write_buffer->data, bytes,
2020				chksum);
2021			cache_block_put(write_buffer);
2022		} else
2023			chksum = get_checksum(read_from_disk(start, bytes),
2024				bytes, chksum);
2025		l -= bytes;
2026		start += bytes;
2027	}
2028
2029	return chksum;
2030}
2031
2032
2033unsigned short get_checksum_mem(char *buff, int bytes)
2034{
2035	return get_checksum(buff, bytes, 0);
2036}
2037
2038
2039unsigned short get_checksum_mem_buffer(struct file_buffer *file_buffer)
2040{
2041	if(file_buffer == NULL)
2042		return 0;
2043	else
2044		return get_checksum(file_buffer->data, file_buffer->size, 0);
2045}
2046
2047
2048#define DUP_HASH(a) (a & 0xffff)
2049void add_file(long long start, long long file_size, long long file_bytes,
2050	unsigned int *block_listp, int blocks, unsigned int fragment,
2051	int offset, int bytes)
2052{
2053	struct fragment *frg;
2054	unsigned int *block_list = block_listp;
2055	struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)];
2056
2057	if(!duplicate_checking || file_size == 0)
2058		return;
2059
2060	for(; dupl_ptr; dupl_ptr = dupl_ptr->next) {
2061		if(file_size != dupl_ptr->file_size)
2062			continue;
2063		if(blocks != 0 && start != dupl_ptr->start)
2064			continue;
2065		if(fragment != dupl_ptr->fragment->index)
2066			continue;
2067		if(fragment != SQUASHFS_INVALID_FRAG && (offset !=
2068				dupl_ptr->fragment->offset || bytes !=
2069				dupl_ptr->fragment->size))
2070			continue;
2071		return;
2072	}
2073
2074	frg = malloc(sizeof(struct fragment));
2075	if(frg == NULL)
2076		BAD_ERROR("Out of memory in fragment block allocation!\n");
2077
2078	frg->index = fragment;
2079	frg->offset = offset;
2080	frg->size = bytes;
2081
2082	add_non_dup(file_size, file_bytes, block_list, start, frg, 0, 0, FALSE);
2083}
2084
2085
2086int pre_duplicate(long long file_size)
2087{
2088	struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)];
2089
2090	for(; dupl_ptr; dupl_ptr = dupl_ptr->next)
2091		if(dupl_ptr->file_size == file_size)
2092			return TRUE;
2093
2094	return FALSE;
2095}
2096
2097
2098int pre_duplicate_frag(long long file_size, unsigned short checksum)
2099{
2100	struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)];
2101
2102	for(; dupl_ptr; dupl_ptr = dupl_ptr->next)
2103		if(file_size == dupl_ptr->file_size && file_size ==
2104				dupl_ptr->fragment->size) {
2105			if(dupl_ptr->checksum_flag == FALSE) {
2106				struct file_buffer *frag_buffer =
2107					get_fragment(dupl_ptr->fragment);
2108				dupl_ptr->checksum =
2109					get_checksum_disk(dupl_ptr->start,
2110					dupl_ptr->bytes, dupl_ptr->block_list);
2111				dupl_ptr->fragment_checksum =
2112					get_checksum_mem(frag_buffer->data +
2113					dupl_ptr->fragment->offset, file_size);
2114				cache_block_put(frag_buffer);
2115				dupl_ptr->checksum_flag = TRUE;
2116			}
2117			if(dupl_ptr->fragment_checksum == checksum)
2118				return TRUE;
2119		}
2120
2121	return FALSE;
2122}
2123
2124
2125struct file_info *add_non_dup(long long file_size, long long bytes,
2126	unsigned int *block_list, long long start, struct fragment *fragment,
2127	unsigned short checksum, unsigned short fragment_checksum,
2128	int checksum_flag)
2129{
2130	struct file_info *dupl_ptr = malloc(sizeof(struct file_info));
2131
2132	if(dupl_ptr == NULL) {
2133		BAD_ERROR("Out of memory in dup_files allocation!\n");
2134	}
2135
2136	dupl_ptr->file_size = file_size;
2137	dupl_ptr->bytes = bytes;
2138	dupl_ptr->block_list = block_list;
2139	dupl_ptr->start = start;
2140	dupl_ptr->fragment = fragment;
2141	dupl_ptr->checksum = checksum;
2142	dupl_ptr->fragment_checksum = fragment_checksum;
2143	dupl_ptr->checksum_flag = checksum_flag;
2144	dupl_ptr->next = dupl[DUP_HASH(file_size)];
2145	dupl[DUP_HASH(file_size)] = dupl_ptr;
2146	dup_files ++;
2147
2148	return dupl_ptr;
2149}
2150
2151
2152struct file_info *duplicate(long long file_size, long long bytes,
2153	unsigned int **block_list, long long *start, struct fragment **fragment,
2154	struct file_buffer *file_buffer, int blocks, unsigned short checksum,
2155	unsigned short fragment_checksum, int checksum_flag)
2156{
2157	struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)];
2158	int frag_bytes = file_buffer ? file_buffer->size : 0;
2159
2160	for(; dupl_ptr; dupl_ptr = dupl_ptr->next)
2161		if(file_size == dupl_ptr->file_size && bytes == dupl_ptr->bytes
2162				 && frag_bytes == dupl_ptr->fragment->size) {
2163			long long target_start, dup_start = dupl_ptr->start;
2164			int block;
2165
2166			if(memcmp(*block_list, dupl_ptr->block_list, blocks *
2167					sizeof(unsigned int)) != 0)
2168				continue;
2169
2170			if(checksum_flag == FALSE) {
2171				checksum = get_checksum_disk(*start, bytes,
2172					*block_list);
2173				fragment_checksum =
2174					get_checksum_mem_buffer(file_buffer);
2175				checksum_flag = TRUE;
2176			}
2177
2178			if(dupl_ptr->checksum_flag == FALSE) {
2179				struct file_buffer *frag_buffer =
2180					get_fragment(dupl_ptr->fragment);
2181				dupl_ptr->checksum =
2182					get_checksum_disk(dupl_ptr->start,
2183					dupl_ptr->bytes, dupl_ptr->block_list);
2184				dupl_ptr->fragment_checksum =
2185					get_checksum_mem(frag_buffer->data +
2186					dupl_ptr->fragment->offset, frag_bytes);
2187				cache_block_put(frag_buffer);
2188				dupl_ptr->checksum_flag = TRUE;
2189			}
2190
2191			if(checksum != dupl_ptr->checksum ||
2192					fragment_checksum !=
2193					dupl_ptr->fragment_checksum)
2194				continue;
2195
2196			target_start = *start;
2197			for(block = 0; block < blocks; block ++) {
2198				int size = SQUASHFS_COMPRESSED_SIZE_BLOCK
2199					((*block_list)[block]);
2200				struct file_buffer *target_buffer = NULL;
2201				struct file_buffer *dup_buffer = NULL;
2202				char *target_data, *dup_data;
2203				int res;
2204
2205				if(size == 0)
2206					continue;
2207				target_buffer = cache_lookup(writer_buffer,
2208					target_start);
2209				if(target_buffer)
2210					target_data = target_buffer->data;
2211				else
2212					target_data =
2213						read_from_disk(target_start,
2214						size);
2215
2216				dup_buffer = cache_lookup(writer_buffer,
2217					dup_start);
2218				if(dup_buffer)
2219					dup_data = dup_buffer->data;
2220				else
2221					dup_data = read_from_disk2(dup_start,
2222						size);
2223
2224				res = memcmp(target_data, dup_data, size);
2225				cache_block_put(target_buffer);
2226				cache_block_put(dup_buffer);
2227				if(res != 0)
2228					break;
2229				target_start += size;
2230				dup_start += size;
2231			}
2232			if(block == blocks) {
2233				struct file_buffer *frag_buffer =
2234					get_fragment(dupl_ptr->fragment);
2235
2236				if(frag_bytes == 0 ||
2237						memcmp(file_buffer->data,
2238						frag_buffer->data +
2239						dupl_ptr->fragment->offset,
2240						frag_bytes) == 0) {
2241					TRACE("Found duplicate file, start "
2242						"0x%llx, size %lld, checksum "
2243						"0x%x, fragment %d, size %d, "
2244						"offset %d, checksum 0x%x\n",
2245						dupl_ptr->start,
2246						dupl_ptr->bytes,
2247						dupl_ptr->checksum,
2248						dupl_ptr->fragment->index,
2249						frag_bytes,
2250						dupl_ptr->fragment->offset,
2251						fragment_checksum);
2252					*block_list = dupl_ptr->block_list;
2253					*start = dupl_ptr->start;
2254					*fragment = dupl_ptr->fragment;
2255					cache_block_put(frag_buffer);
2256					return 0;
2257				}
2258				cache_block_put(frag_buffer);
2259			}
2260		}
2261
2262
2263	return add_non_dup(file_size, bytes, *block_list, *start, *fragment,
2264		checksum, fragment_checksum, checksum_flag);
2265}
2266
2267
2268static int seq = 0;
2269void reader_read_process(struct dir_ent *dir_ent)
2270{
2271	struct file_buffer *prev_buffer = NULL, *file_buffer;
2272	int status, res, byte, count = 0;
2273	int file = get_pseudo_file(dir_ent->inode->pseudo_id)->fd;
2274	int child = get_pseudo_file(dir_ent->inode->pseudo_id)->child;
2275	long long bytes = 0;
2276
2277	while(1) {
2278		file_buffer = cache_get(reader_buffer, 0, 0);
2279		file_buffer->sequence = seq ++;
2280
2281		byte = read_bytes(file, file_buffer->data, block_size);
2282		if(byte == -1)
2283			goto read_err;
2284
2285		file_buffer->size = byte;
2286		file_buffer->file_size = -1;
2287		file_buffer->block = count ++;
2288		file_buffer->error = FALSE;
2289		file_buffer->fragment = FALSE;
2290		bytes += byte;
2291
2292		if(byte == 0)
2293			break;
2294
2295		/*
2296		 * Update estimated_uncompressed block count.  This is done
2297		 * on every block rather than waiting for all blocks to be
2298		 * read incase write_file_process() is running in parallel
2299		 * with this.  Otherwise cur uncompressed block count may
2300		 * get ahead of the total uncompressed block count.
2301		 */
2302		estimated_uncompressed ++;
2303
2304		if(prev_buffer)
2305			queue_put(from_reader, prev_buffer);
2306		prev_buffer = file_buffer;
2307	}
2308
2309	/*
2310 	 * Update inode file size now that the size of the dynamic pseudo file
2311	 * is known.  This is needed for the -info option.
2312	 */
2313	dir_ent->inode->buf.st_size = bytes;
2314
2315	res = waitpid(child, &status, 0);
2316	if(res == -1 || !WIFEXITED(status) || WEXITSTATUS(status) != 0)
2317		goto read_err;
2318
2319	if(prev_buffer == NULL)
2320		prev_buffer = file_buffer;
2321	else {
2322		cache_block_put(file_buffer);
2323		seq --;
2324	}
2325	prev_buffer->file_size = bytes;
2326	prev_buffer->fragment = !no_fragments &&
2327		(count == 2 || always_use_fragments) && (byte < block_size);
2328	queue_put(from_reader, prev_buffer);
2329
2330	return;
2331
2332read_err:
2333	if(prev_buffer) {
2334		cache_block_put(file_buffer);
2335		seq --;
2336		file_buffer = prev_buffer;
2337	}
2338	file_buffer->error = TRUE;
2339	queue_put(from_deflate, file_buffer);
2340}
2341
2342
2343void reader_read_file(struct dir_ent *dir_ent)
2344{
2345	struct stat *buf = &dir_ent->inode->buf, buf2;
2346	struct file_buffer *file_buffer;
2347	int blocks, byte, count, expected, file, frag_block;
2348	long long bytes, read_size;
2349
2350	if(dir_ent->inode->read)
2351		return;
2352
2353	dir_ent->inode->read = TRUE;
2354again:
2355	bytes = 0;
2356	count = 0;
2357	file_buffer = NULL;
2358	read_size = buf->st_size;
2359	blocks = (read_size + block_size - 1) >> block_log;
2360	frag_block = !no_fragments && (always_use_fragments ||
2361		(read_size < block_size)) ? read_size >> block_log : -1;
2362
2363	file = open(dir_ent->pathname, O_RDONLY);
2364	if(file == -1) {
2365		file_buffer = cache_get(reader_buffer, 0, 0);
2366		file_buffer->sequence = seq ++;
2367		goto read_err;
2368	}
2369
2370	do {
2371		expected = read_size - ((long long) count * block_size) >
2372			block_size ? block_size :
2373			read_size - ((long long) count * block_size);
2374
2375		if(file_buffer)
2376			queue_put(from_reader, file_buffer);
2377		file_buffer = cache_get(reader_buffer, 0, 0);
2378		file_buffer->sequence = seq ++;
2379
2380		/*
2381		 * Always try to read block_size bytes from the file rather
2382		 * than expected bytes (which will be less than the block_size
2383		 * at the file tail) to check that the file hasn't grown
2384		 * since being stated.  If it is longer (or shorter) than
2385		 * expected, then restat, and try again.  Note the special
2386		 * case where the file is an exact multiple of the block_size
2387		 * is dealt with later.
2388		 */
2389		byte = file_buffer->size = read_bytes(file, file_buffer->data,
2390			block_size);
2391
2392		file_buffer->file_size = read_size;
2393
2394		if(byte == -1)
2395			goto read_err;
2396
2397		if(byte != expected)
2398			goto restat;
2399
2400		file_buffer->block = count;
2401		file_buffer->error = FALSE;
2402		file_buffer->fragment = (file_buffer->block == frag_block);
2403
2404		bytes += byte;
2405		count ++;
2406	} while(count < blocks);
2407
2408	if(read_size != bytes)
2409		goto restat;
2410
2411	if(expected == block_size) {
2412		/*
2413		 * Special case where we've not tried to read past the end of
2414		 * the file.  We expect to get EOF, i.e. the file isn't larger
2415		 * than we expect.
2416		 */
2417		char buffer;
2418		int res;
2419
2420		res = read_bytes(file, &buffer, 1);
2421		if(res == -1)
2422			goto read_err;
2423
2424		if(res != 0)
2425			goto restat;
2426	}
2427
2428	queue_put(from_reader, file_buffer);
2429
2430	close(file);
2431
2432	return;
2433
2434restat:
2435	fstat(file, &buf2);
2436	close(file);
2437	if(read_size != buf2.st_size) {
2438		memcpy(buf, &buf2, sizeof(struct stat));
2439		file_buffer->error = 2;
2440		queue_put(from_deflate, file_buffer);
2441		goto again;
2442	}
2443read_err:
2444	file_buffer->error = TRUE;
2445	queue_put(from_deflate, file_buffer);
2446}
2447
2448
2449void reader_scan(struct dir_info *dir) {
2450	int i;
2451
2452	for(i = 0; i < dir->count; i++) {
2453		struct dir_ent *dir_ent = dir->list[i];
2454		struct stat *buf = &dir_ent->inode->buf;
2455		if(dir_ent->inode->root_entry)
2456			continue;
2457
2458		if(IS_PSEUDO_PROCESS(dir_ent->inode)) {
2459			reader_read_process(dir_ent);
2460			continue;
2461		}
2462
2463		switch(buf->st_mode & S_IFMT) {
2464			case S_IFREG:
2465				reader_read_file(dir_ent);
2466				break;
2467			case S_IFDIR:
2468				reader_scan(dir_ent->dir);
2469				break;
2470		}
2471	}
2472}
2473
2474
2475void *reader(void *arg)
2476{
2477	int oldstate;
2478
2479	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
2480	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldstate);
2481
2482	if(!sorted)
2483		reader_scan(queue_get(to_reader));
2484	else {
2485		int i;
2486		struct priority_entry *entry;
2487
2488		queue_get(to_reader);
2489		for(i = 65535; i >= 0; i--)
2490			for(entry = priority_list[i]; entry;
2491							entry = entry->next)
2492				reader_read_file(entry->dir);
2493	}
2494
2495	thread[0] = 0;
2496
2497	pthread_exit(NULL);
2498}
2499
2500
2501void *writer(void *arg)
2502{
2503	int write_error = FALSE;
2504	int oldstate;
2505
2506	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
2507	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldstate);
2508
2509	while(1) {
2510		struct file_buffer *file_buffer = queue_get(to_writer);
2511		off_t off;
2512
2513		if(file_buffer == NULL) {
2514			queue_put(from_writer,
2515				write_error ? &write_error : NULL);
2516			continue;
2517		}
2518
2519		off = file_buffer->block;
2520
2521		pthread_mutex_lock(&pos_mutex);
2522
2523		if(!write_error && lseek(fd, off, SEEK_SET) == -1) {
2524			ERROR("Lseek on destination failed because %s\n",
2525				strerror(errno));
2526			write_error = TRUE;
2527		}
2528
2529		if(!write_error && write_bytes(fd, file_buffer->data,
2530				file_buffer->size) == -1) {
2531			ERROR("Write on destination failed because %s\n",
2532				strerror(errno));
2533			write_error = TRUE;
2534		}
2535		pthread_mutex_unlock(&pos_mutex);
2536
2537		cache_block_put(file_buffer);
2538	}
2539}
2540
2541
2542int all_zero(struct file_buffer *file_buffer)
2543{
2544	int i;
2545	long entries = file_buffer->size / sizeof(long);
2546	long *p = (long *) file_buffer->data;
2547
2548	for(i = 0; i < entries && p[i] == 0; i++);
2549
2550	if(i == entries) {
2551		for(i = file_buffer->size & ~(sizeof(long) - 1);
2552			i < file_buffer->size && file_buffer->data[i] == 0;
2553			i++);
2554
2555		return i == file_buffer->size;
2556	}
2557
2558	return 0;
2559}
2560
2561
2562void *deflator(void *arg)
2563{
2564	void *stream = NULL;
2565	int res, oldstate;
2566
2567	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
2568	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldstate);
2569
2570	res = compressor_init(comp, &stream, block_size, 1);
2571	if(res)
2572		BAD_ERROR("deflator:: compressor_init failed\n");
2573
2574	while(1) {
2575		struct file_buffer *file_buffer = queue_get(from_reader);
2576		struct file_buffer *write_buffer;
2577
2578		if(sparse_files && all_zero(file_buffer)) {
2579			file_buffer->c_byte = 0;
2580			queue_put(from_deflate, file_buffer);
2581		} else if(file_buffer->fragment) {
2582			file_buffer->c_byte = file_buffer->size;
2583			queue_put(from_deflate, file_buffer);
2584		} else {
2585			write_buffer = cache_get(writer_buffer, 0, 0);
2586			write_buffer->c_byte = mangle2(stream,
2587				write_buffer->data, file_buffer->data,
2588				file_buffer->size, block_size, noD, 1);
2589			write_buffer->sequence = file_buffer->sequence;
2590			write_buffer->file_size = file_buffer->file_size;
2591			write_buffer->block = file_buffer->block;
2592			write_buffer->size = SQUASHFS_COMPRESSED_SIZE_BLOCK
2593				(write_buffer->c_byte);
2594			write_buffer->fragment = FALSE;
2595			write_buffer->error = FALSE;
2596			cache_block_put(file_buffer);
2597			queue_put(from_deflate, write_buffer);
2598		}
2599	}
2600}
2601
2602
2603void *frag_deflator(void *arg)
2604{
2605	void *stream = NULL;
2606	int res, oldstate;
2607
2608	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
2609	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldstate);
2610
2611	res = compressor_init(comp, &stream, block_size, 1);
2612	if(res)
2613		BAD_ERROR("frag_deflator:: compressor_init failed\n");
2614
2615	while(1) {
2616		int c_byte, compressed_size;
2617		struct file_buffer *file_buffer = queue_get(to_frag);
2618		struct file_buffer *write_buffer =
2619			cache_get(writer_buffer, file_buffer->block +
2620			FRAG_INDEX, 1);
2621
2622		c_byte = mangle2(stream, write_buffer->data, file_buffer->data,
2623			file_buffer->size, block_size, noF, 1);
2624		compressed_size = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
2625		write_buffer->size = compressed_size;
2626		pthread_mutex_lock(&fragment_mutex);
2627		if(fragments_locked == FALSE) {
2628			fragment_table[file_buffer->block].size = c_byte;
2629			fragment_table[file_buffer->block].start_block = bytes;
2630			write_buffer->block = bytes;
2631			bytes += compressed_size;
2632			fragments_outstanding --;
2633			queue_put(to_writer, write_buffer);
2634			pthread_mutex_unlock(&fragment_mutex);
2635			TRACE("Writing fragment %lld, uncompressed size %d, "
2636				"compressed size %d\n", file_buffer->block,
2637				file_buffer->size, compressed_size);
2638		} else {
2639				pthread_mutex_unlock(&fragment_mutex);
2640				add_pending_fragment(write_buffer, c_byte,
2641					file_buffer->block);
2642		}
2643		cache_block_put(file_buffer);
2644	}
2645}
2646
2647
2648#define HASH_ENTRIES		256
2649#define BLOCK_HASH(a)		(a % HASH_ENTRIES)
2650struct file_buffer		*block_hash[HASH_ENTRIES];
2651
2652void push_buffer(struct file_buffer *file_buffer)
2653{
2654	int hash = BLOCK_HASH(file_buffer->sequence);
2655
2656	file_buffer->next = block_hash[hash];
2657	block_hash[hash] = file_buffer;
2658}
2659
2660
2661struct file_buffer *get_file_buffer(struct queue *queue)
2662{
2663	static unsigned int sequence = 0;
2664	int hash = BLOCK_HASH(sequence);
2665	struct file_buffer *file_buffer = block_hash[hash], *prev = NULL;
2666
2667	for(;file_buffer; prev = file_buffer, file_buffer = file_buffer->next)
2668		if(file_buffer->sequence == sequence)
2669			break;
2670
2671	if(file_buffer) {
2672		if(prev)
2673			prev->next = file_buffer->next;
2674		else
2675			block_hash[hash] = file_buffer->next;
2676	} else {
2677		while(1) {
2678			file_buffer = queue_get(queue);
2679			if(file_buffer->sequence == sequence)
2680				break;
2681			push_buffer(file_buffer);
2682		}
2683	}
2684
2685	sequence ++;
2686
2687	return file_buffer;
2688}
2689
2690
2691void *progress_thrd(void *arg)
2692{
2693	struct timeval timeval;
2694	struct timespec timespec;
2695	struct itimerval itimerval;
2696	struct winsize winsize;
2697
2698	if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
2699		if(isatty(STDOUT_FILENO))
2700			printf("TIOCGWINSZ ioctl failed, defaulting to 80 "
2701				"columns\n");
2702		columns = 80;
2703	} else
2704		columns = winsize.ws_col;
2705	signal(SIGWINCH, sigwinch_handler);
2706	signal(SIGALRM, sigalrm_handler);
2707
2708	itimerval.it_value.tv_sec = 0;
2709	itimerval.it_value.tv_usec = 250000;
2710	itimerval.it_interval.tv_sec = 0;
2711	itimerval.it_interval.tv_usec = 250000;
2712	setitimer(ITIMER_REAL, &itimerval, NULL);
2713
2714	pthread_cond_init(&progress_wait, NULL);
2715
2716	pthread_mutex_lock(&progress_mutex);
2717
2718	while(1) {
2719		gettimeofday(&timeval, NULL);
2720		timespec.tv_sec = timeval.tv_sec;
2721		if(timeval.tv_usec + 250000 > 999999)
2722			timespec.tv_sec++;
2723		timespec.tv_nsec = ((timeval.tv_usec + 250000) % 1000000) *
2724			1000;
2725		pthread_cond_timedwait(&progress_wait, &progress_mutex,
2726			&timespec);
2727		if(progress_enabled && estimated_uncompressed)
2728			progress_bar(cur_uncompressed, estimated_uncompressed,
2729				columns);
2730	}
2731}
2732
2733
2734void enable_progress_bar()
2735{
2736	pthread_mutex_lock(&progress_mutex);
2737	progress_enabled = TRUE;
2738	pthread_mutex_unlock(&progress_mutex);
2739}
2740
2741
2742void disable_progress_bar()
2743{
2744	pthread_mutex_lock(&progress_mutex);
2745	progress_enabled = FALSE;
2746	pthread_mutex_unlock(&progress_mutex);
2747}
2748
2749
2750void progress_bar(long long current, long long max, int columns)
2751{
2752	char rotate_list[] = { '|', '/', '-', '\\' };
2753	int max_digits, used, hashes, spaces;
2754	static int tty = -1;
2755
2756	if(max == 0)
2757		return;
2758
2759	max_digits = floor(log10(max)) + 1;
2760	used = max_digits * 2 + 11;
2761	hashes = (current * (columns - used)) / max;
2762	spaces = columns - used - hashes;
2763
2764	if((current > max) || (columns - used < 0))
2765		return;
2766
2767	if(tty == -1)
2768		tty = isatty(STDOUT_FILENO);
2769	if(!tty) {
2770		static long long previous = -1;
2771
2772		/* Updating much more frequently than this results in huge
2773		 * log files. */
2774		if((current % 100) != 0 && current != max)
2775			return;
2776		/* Don't update just to rotate the spinner. */
2777		if(current == previous)
2778			return;
2779		previous = current;
2780	}
2781
2782	printf("\r[");
2783
2784	while (hashes --)
2785		putchar('=');
2786
2787	putchar(rotate_list[rotate]);
2788
2789	while(spaces --)
2790		putchar(' ');
2791
2792	printf("] %*lld/%*lld", max_digits, current, max_digits, max);
2793	printf(" %3lld%%", current * 100 / max);
2794	fflush(stdout);
2795}
2796
2797
2798void write_file_empty(squashfs_inode *inode, struct dir_ent *dir_ent,
2799	int *duplicate_file)
2800{
2801	file_count ++;
2802	*duplicate_file = FALSE;
2803	create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, 0, 0, 0,
2804		 NULL, &empty_fragment, NULL, 0);
2805}
2806
2807
2808void write_file_frag_dup(squashfs_inode *inode, struct dir_ent *dir_ent,
2809	int size, int *duplicate_file, struct file_buffer *file_buffer,
2810	unsigned short checksum)
2811{
2812	struct file_info *dupl_ptr;
2813	struct fragment *fragment;
2814	unsigned int *block_listp = NULL;
2815	long long start = 0;
2816
2817	dupl_ptr = duplicate(size, 0, &block_listp, &start, &fragment,
2818		file_buffer, 0, 0, checksum, TRUE);
2819
2820	if(dupl_ptr) {
2821		*duplicate_file = FALSE;
2822		fragment = get_and_fill_fragment(file_buffer);
2823		dupl_ptr->fragment = fragment;
2824	} else
2825		*duplicate_file = TRUE;
2826
2827	cache_block_put(file_buffer);
2828
2829	total_bytes += size;
2830	file_count ++;
2831
2832	inc_progress_bar();
2833
2834	create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, size, 0,
2835			0, NULL, fragment, NULL, 0);
2836}
2837
2838
2839void write_file_frag(squashfs_inode *inode, struct dir_ent *dir_ent, int size,
2840	struct file_buffer *file_buffer, int *duplicate_file)
2841{
2842	struct fragment *fragment;
2843	unsigned short checksum;
2844
2845	checksum = get_checksum_mem_buffer(file_buffer);
2846
2847	if(pre_duplicate_frag(size, checksum)) {
2848		write_file_frag_dup(inode, dir_ent, size, duplicate_file,
2849			file_buffer, checksum);
2850		return;
2851	}
2852
2853	fragment = get_and_fill_fragment(file_buffer);
2854
2855	cache_block_put(file_buffer);
2856
2857	if(duplicate_checking)
2858		add_non_dup(size, 0, NULL, 0, fragment, 0, checksum, TRUE);
2859
2860	total_bytes += size;
2861	file_count ++;
2862
2863	*duplicate_file = FALSE;
2864
2865	inc_progress_bar();
2866
2867	create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, size, 0,
2868			0, NULL, fragment, NULL, 0);
2869
2870	return;
2871}
2872
2873
2874int write_file_process(squashfs_inode *inode, struct dir_ent *dir_ent,
2875	struct file_buffer *read_buffer, int *duplicate_file)
2876{
2877	long long read_size, file_bytes, start;
2878	struct fragment *fragment;
2879	unsigned int *block_list = NULL;
2880	int block = 0, status;
2881	long long sparse = 0;
2882	struct file_buffer *fragment_buffer = NULL;
2883
2884	*duplicate_file = FALSE;
2885
2886	lock_fragments();
2887
2888	file_bytes = 0;
2889	start = bytes;
2890	while (1) {
2891		read_size = read_buffer->file_size;
2892		if(read_buffer->fragment && read_buffer->c_byte)
2893			fragment_buffer = read_buffer;
2894		else {
2895			block_list = realloc(block_list, (block + 1) *
2896				sizeof(unsigned int));
2897			if(block_list == NULL)
2898				BAD_ERROR("Out of memory allocating block_list"
2899					"\n");
2900			block_list[block ++] = read_buffer->c_byte;
2901			if(read_buffer->c_byte) {
2902				read_buffer->block = bytes;
2903				bytes += read_buffer->size;
2904				cache_rehash(read_buffer, read_buffer->block);
2905				file_bytes += read_buffer->size;
2906				queue_put(to_writer, read_buffer);
2907			} else {
2908				sparse += read_buffer->size;
2909				cache_block_put(read_buffer);
2910			}
2911		}
2912		inc_progress_bar();
2913
2914		if(read_size != -1)
2915			break;
2916
2917		read_buffer = get_file_buffer(from_deflate);
2918		if(read_buffer->error)
2919			goto read_err;
2920	}
2921
2922	unlock_fragments();
2923	fragment = get_and_fill_fragment(fragment_buffer);
2924	cache_block_put(fragment_buffer);
2925
2926	if(duplicate_checking)
2927		add_non_dup(read_size, file_bytes, block_list, start, fragment,
2928			0, 0, FALSE);
2929	file_count ++;
2930	total_bytes += read_size;
2931
2932	create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, read_size, start,
2933		 block, block_list, fragment, NULL, sparse);
2934
2935	if(duplicate_checking == FALSE)
2936		free(block_list);
2937
2938	return 0;
2939
2940read_err:
2941	cur_uncompressed -= block;
2942	status = read_buffer->error;
2943	bytes = start;
2944	if(!block_device) {
2945		int res;
2946
2947		queue_put(to_writer, NULL);
2948		if(queue_get(from_writer) != 0)
2949			EXIT_MKSQUASHFS();
2950		res = ftruncate(fd, bytes);
2951		if(res != 0)
2952			BAD_ERROR("Failed to truncate dest file because %s\n",
2953				strerror(errno));
2954	}
2955	unlock_fragments();
2956	free(block_list);
2957	cache_block_put(read_buffer);
2958	return status;
2959}
2960
2961
2962int write_file_blocks(squashfs_inode *inode, struct dir_ent *dir_ent,
2963	long long read_size, struct file_buffer *read_buffer,
2964	int *duplicate_file)
2965{
2966	long long file_bytes, start;
2967	struct fragment *fragment;
2968	unsigned int *block_list;
2969	int block, status;
2970	int blocks = (read_size + block_size - 1) >> block_log;
2971	long long sparse = 0;
2972	struct file_buffer *fragment_buffer = NULL;
2973
2974	*duplicate_file = FALSE;
2975
2976	block_list = malloc(blocks * sizeof(unsigned int));
2977	if(block_list == NULL)
2978		BAD_ERROR("Out of memory allocating block_list\n");
2979
2980	lock_fragments();
2981
2982	file_bytes = 0;
2983	start = bytes;
2984	for(block = 0; block < blocks;) {
2985		if(read_buffer->fragment && read_buffer->c_byte) {
2986			fragment_buffer = read_buffer;
2987			blocks = read_size >> block_log;
2988		} else {
2989			block_list[block] = read_buffer->c_byte;
2990			if(read_buffer->c_byte) {
2991				read_buffer->block = bytes;
2992				bytes += read_buffer->size;
2993				cache_rehash(read_buffer, read_buffer->block);
2994				file_bytes += read_buffer->size;
2995				queue_put(to_writer, read_buffer);
2996			} else {
2997				sparse += read_buffer->size;
2998				cache_block_put(read_buffer);
2999			}
3000		}
3001		inc_progress_bar();
3002
3003		if(++block < blocks) {
3004			read_buffer = get_file_buffer(from_deflate);
3005			if(read_buffer->error)
3006				goto read_err;
3007		}
3008	}
3009
3010	unlock_fragments();
3011	fragment = get_and_fill_fragment(fragment_buffer);
3012	cache_block_put(fragment_buffer);
3013
3014	if(duplicate_checking)
3015		add_non_dup(read_size, file_bytes, block_list, start, fragment,
3016			0, 0, FALSE);
3017	file_count ++;
3018	total_bytes += read_size;
3019
3020	/*
3021	 * sparse count is needed to ensure squashfs correctly reports a
3022 	 * a smaller block count on stat calls to sparse files.  This is
3023 	 * to ensure intelligent applications like cp correctly handle the
3024 	 * file as a sparse file.  If the file in the original filesystem isn't
3025 	 * stored as a sparse file then still store it sparsely in squashfs, but
3026 	 * report it as non-sparse on stat calls to preserve semantics
3027 	 */
3028	if(sparse && (dir_ent->inode->buf.st_blocks << 9) >= read_size)
3029		sparse = 0;
3030
3031	create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, read_size, start,
3032		 blocks, block_list, fragment, NULL, sparse);
3033
3034	if(duplicate_checking == FALSE)
3035		free(block_list);
3036
3037	return 0;
3038
3039read_err:
3040	cur_uncompressed -= block;
3041	status = read_buffer->error;
3042	bytes = start;
3043	if(!block_device) {
3044		int res;
3045
3046		queue_put(to_writer, NULL);
3047		if(queue_get(from_writer) != 0)
3048			EXIT_MKSQUASHFS();
3049		res = ftruncate(fd, bytes);
3050		if(res != 0)
3051			BAD_ERROR("Failed to truncate dest file because %s\n",
3052				strerror(errno));
3053	}
3054	unlock_fragments();
3055	free(block_list);
3056	cache_block_put(read_buffer);
3057	return status;
3058}
3059
3060
3061int write_file_blocks_dup(squashfs_inode *inode, struct dir_ent *dir_ent,
3062	long long read_size, struct file_buffer *read_buffer,
3063	int *duplicate_file)
3064{
3065	int block, thresh;
3066	long long file_bytes, dup_start, start;
3067	struct fragment *fragment;
3068	struct file_info *dupl_ptr;
3069	int blocks = (read_size + block_size - 1) >> block_log;
3070	unsigned int *block_list, *block_listp;
3071	struct file_buffer **buffer_list;
3072	int status, num_locked_fragments;
3073	long long sparse = 0;
3074	struct file_buffer *fragment_buffer = NULL;
3075
3076	block_list = malloc(blocks * sizeof(unsigned int));
3077	if(block_list == NULL)
3078		BAD_ERROR("Out of memory allocating block_list\n");
3079	block_listp = block_list;
3080
3081	buffer_list = malloc(blocks * sizeof(struct file_buffer *));
3082	if(buffer_list == NULL)
3083		BAD_ERROR("Out of memory allocating file block list\n");
3084
3085	num_locked_fragments = lock_fragments();
3086
3087	file_bytes = 0;
3088	start = dup_start = bytes;
3089	thresh = blocks > (writer_buffer_size - num_locked_fragments) ?
3090		blocks - (writer_buffer_size - num_locked_fragments): 0;
3091
3092	for(block = 0; block < blocks;) {
3093		if(read_buffer->fragment && read_buffer->c_byte) {
3094			fragment_buffer = read_buffer;
3095			blocks = read_size >> block_log;
3096		} else {
3097			block_list[block] = read_buffer->c_byte;
3098
3099			if(read_buffer->c_byte) {
3100				read_buffer->block = bytes;
3101				bytes += read_buffer->size;
3102				file_bytes += read_buffer->size;
3103				cache_rehash(read_buffer, read_buffer->block);
3104				if(block < thresh) {
3105					buffer_list[block] = NULL;
3106					queue_put(to_writer, read_buffer);
3107				} else
3108					buffer_list[block] = read_buffer;
3109			} else {
3110				buffer_list[block] = NULL;
3111				sparse += read_buffer->size;
3112				cache_block_put(read_buffer);
3113			}
3114		}
3115		inc_progress_bar();
3116
3117		if(++block < blocks) {
3118			read_buffer = get_file_buffer(from_deflate);
3119			if(read_buffer->error)
3120				goto read_err;
3121		}
3122	}
3123
3124	dupl_ptr = duplicate(read_size, file_bytes, &block_listp, &dup_start,
3125		&fragment, fragment_buffer, blocks, 0, 0, FALSE);
3126
3127	if(dupl_ptr) {
3128		*duplicate_file = FALSE;
3129		for(block = thresh; block < blocks; block ++)
3130			if(buffer_list[block])
3131				queue_put(to_writer, buffer_list[block]);
3132		fragment = get_and_fill_fragment(fragment_buffer);
3133		dupl_ptr->fragment = fragment;
3134	} else {
3135		*duplicate_file = TRUE;
3136		for(block = thresh; block < blocks; block ++)
3137			cache_block_put(buffer_list[block]);
3138		bytes = start;
3139		if(thresh && !block_device) {
3140			int res;
3141
3142			queue_put(to_writer, NULL);
3143			if(queue_get(from_writer) != 0)
3144				EXIT_MKSQUASHFS();
3145			res = ftruncate(fd, bytes);
3146			if(res != 0)
3147				BAD_ERROR("Failed to truncate dest file because"
3148					"  %s\n", strerror(errno));
3149		}
3150	}
3151
3152	unlock_fragments();
3153	cache_block_put(fragment_buffer);
3154	free(buffer_list);
3155	file_count ++;
3156	total_bytes += read_size;
3157
3158	/*
3159	 * sparse count is needed to ensure squashfs correctly reports a
3160 	 * a smaller block count on stat calls to sparse files.  This is
3161 	 * to ensure intelligent applications like cp correctly handle the
3162 	 * file as a sparse file.  If the file in the original filesystem isn't
3163 	 * stored as a sparse file then still store it sparsely in squashfs, but
3164 	 * report it as non-sparse on stat calls to preserve semantics
3165 	 */
3166	if(sparse && (dir_ent->inode->buf.st_blocks << 9) >= read_size)
3167		sparse = 0;
3168
3169	create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, read_size,
3170		dup_start, blocks, block_listp, fragment, NULL, sparse);
3171
3172	if(*duplicate_file == TRUE)
3173		free(block_list);
3174
3175	return 0;
3176
3177read_err:
3178	cur_uncompressed -= block;
3179	status = read_buffer->error;
3180	bytes = start;
3181	if(thresh && !block_device) {
3182		int res;
3183
3184		queue_put(to_writer, NULL);
3185		if(queue_get(from_writer) != 0)
3186			EXIT_MKSQUASHFS();
3187		res = ftruncate(fd, bytes);
3188		if(res != 0)
3189			BAD_ERROR("Failed to truncate dest file because %s\n",
3190				strerror(errno));
3191	}
3192	unlock_fragments();
3193	for(blocks = thresh; blocks < block; blocks ++)
3194		cache_block_put(buffer_list[blocks]);
3195	free(buffer_list);
3196	free(block_list);
3197	cache_block_put(read_buffer);
3198	return status;
3199}
3200
3201
3202void write_file(squashfs_inode *inode, struct dir_ent *dir_ent,
3203	int *duplicate_file)
3204{
3205	int status;
3206	struct file_buffer *read_buffer;
3207	long long read_size;
3208
3209again:
3210	read_buffer = get_file_buffer(from_deflate);
3211
3212	status = read_buffer->error;
3213	if(status) {
3214		cache_block_put(read_buffer);
3215		goto file_err;
3216	}
3217
3218	read_size = read_buffer->file_size;
3219
3220	if(read_size == -1)
3221		status = write_file_process(inode, dir_ent, read_buffer,
3222			duplicate_file);
3223	else if(read_size == 0) {
3224		write_file_empty(inode, dir_ent, duplicate_file);
3225		cache_block_put(read_buffer);
3226	} else if(read_buffer->fragment && read_buffer->c_byte)
3227		write_file_frag(inode, dir_ent, read_size, read_buffer,
3228			duplicate_file);
3229	else if(pre_duplicate(read_size))
3230		status = write_file_blocks_dup(inode, dir_ent, read_size,
3231			read_buffer, duplicate_file);
3232	else
3233		status = write_file_blocks(inode, dir_ent, read_size,
3234			read_buffer, duplicate_file);
3235
3236file_err:
3237	if(status == 2) {
3238		ERROR("File %s changed size while reading filesystem, "
3239			"attempting to re-read\n", dir_ent->pathname);
3240		goto again;
3241	} else if(status == 1) {
3242		ERROR("Failed to read file %s, creating empty file\n",
3243			dir_ent->pathname);
3244		write_file_empty(inode, dir_ent, duplicate_file);
3245	}
3246}
3247
3248
3249#define BUFF_SIZE 8192
3250char b_buffer[BUFF_SIZE];
3251char *name;
3252char *basename_r();
3253
3254char *getbase(char *pathname)
3255{
3256	char *result;
3257
3258	if(*pathname != '/') {
3259		result = getcwd(b_buffer, BUFF_SIZE);
3260		if(result == NULL)
3261			return NULL;
3262		strcat(strcat(b_buffer, "/"), pathname);
3263	} else
3264		strcpy(b_buffer, pathname);
3265	name = b_buffer;
3266	if(((result = basename_r()) == NULL) || (strcmp(result, "..") == 0))
3267		return NULL;
3268	else
3269		return result;
3270}
3271
3272
3273char *basename_r()
3274{
3275	char *s;
3276	char *p;
3277	int n = 1;
3278
3279	for(;;) {
3280		s = name;
3281		if(*name == '\0')
3282			return NULL;
3283		if(*name != '/') {
3284			while(*name != '\0' && *name != '/') name++;
3285			n = name - s;
3286		}
3287		while(*name == '/') name++;
3288		if(strncmp(s, ".", n) == 0)
3289			continue;
3290		if((*name == '\0') || (strncmp(s, "..", n) == 0) ||
3291				((p = basename_r()) == NULL)) {
3292			s[n] = '\0';
3293			return s;
3294		}
3295		if(strcmp(p, "..") == 0)
3296			continue;
3297		return p;
3298	}
3299}
3300
3301
3302struct inode_info *lookup_inode(struct stat *buf)
3303{
3304	int inode_hash = INODE_HASH(buf->st_dev, buf->st_ino);
3305	struct inode_info *inode = inode_info[inode_hash];
3306
3307	while(inode != NULL) {
3308		if(memcmp(buf, &inode->buf, sizeof(struct stat)) == 0) {
3309			inode->nlink ++;
3310			return inode;
3311		}
3312		inode = inode->next;
3313	}
3314
3315	inode = malloc(sizeof(struct inode_info));
3316	if(inode == NULL)
3317		BAD_ERROR("Out of memory in inode hash table entry allocation"
3318			"\n");
3319
3320	memcpy(&inode->buf, buf, sizeof(struct stat));
3321	inode->read = FALSE;
3322	inode->root_entry = FALSE;
3323	inode->pseudo_file = FALSE;
3324	inode->inode = SQUASHFS_INVALID_BLK;
3325	inode->nlink = 1;
3326
3327	if((buf->st_mode & S_IFMT) == S_IFREG)
3328		estimated_uncompressed += (buf->st_size + block_size - 1) >>
3329			block_log;
3330
3331	if((buf->st_mode & S_IFMT) == S_IFDIR)
3332		inode->inode_number = dir_inode_no ++;
3333	else
3334		inode->inode_number = inode_no ++;
3335
3336	inode->next = inode_info[inode_hash];
3337	inode_info[inode_hash] = inode;
3338
3339	return inode;
3340}
3341
3342
3343inline void add_dir_entry(char *name, char *pathname, struct dir_info *sub_dir,
3344	struct inode_info *inode_info, struct dir_info *dir)
3345{
3346	if((dir->count % DIR_ENTRIES) == 0) {
3347		dir->list = realloc(dir->list, (dir->count + DIR_ENTRIES) *
3348				sizeof(struct dir_ent *));
3349		if(dir->list == NULL)
3350			BAD_ERROR("Out of memory in add_dir_entry\n");
3351	}
3352
3353	dir->list[dir->count] = malloc(sizeof(struct dir_ent));
3354	if(dir->list[dir->count] == NULL)
3355		BAD_ERROR("Out of memory in linux_opendir\n");
3356
3357	if(sub_dir)
3358		sub_dir->dir_ent = dir->list[dir->count];
3359	dir->list[dir->count]->name = strdup(name);
3360	dir->list[dir->count]->pathname = pathname != NULL ? strdup(pathname) :
3361		NULL;
3362	dir->list[dir->count]->inode = inode_info;
3363	dir->list[dir->count]->dir = sub_dir;
3364	dir->list[dir->count++]->our_dir = dir;
3365	dir->byte_count += strlen(name) + sizeof(struct squashfs_dir_entry);
3366}
3367
3368
3369int compare_name(const void *ent1_ptr, const void *ent2_ptr)
3370{
3371	struct dir_ent *ent1 = *((struct dir_ent **) ent1_ptr);
3372	struct dir_ent *ent2 = *((struct dir_ent **) ent2_ptr);
3373
3374	return strcmp(ent1->name, ent2->name);
3375}
3376
3377
3378void sort_directory(struct dir_info *dir)
3379{
3380	qsort(dir->list, dir->count, sizeof(struct dir_ent *), compare_name);
3381
3382	if((dir->count < 257 && dir->byte_count < SQUASHFS_METADATA_SIZE))
3383		dir->dir_is_ldir = FALSE;
3384}
3385
3386
3387struct dir_info *scan1_opendir(char *pathname)
3388{
3389	struct dir_info *dir;
3390
3391	dir = malloc(sizeof(struct dir_info));
3392	if(dir == NULL)
3393		BAD_ERROR("Out of memory in scan1_opendir\n");
3394
3395	if(pathname[0] != '\0' && (dir->linuxdir = opendir(pathname)) == NULL) {
3396		free(dir);
3397		return NULL;
3398	}
3399	dir->pathname = strdup(pathname);
3400	dir->count = dir->directory_count = dir->current_count = dir->byte_count
3401		= 0;
3402	dir->dir_is_ldir = TRUE;
3403	dir->list = NULL;
3404
3405	return dir;
3406}
3407
3408
3409int scan1_encomp_readdir(char *pathname, char *dir_name, struct dir_info *dir)
3410{
3411	static int index = 0;
3412
3413	if(dir->count < old_root_entries) {
3414		int i;
3415
3416		for(i = 0; i < old_root_entries; i++) {
3417			if(old_root_entry[i].inode.type == SQUASHFS_DIR_TYPE)
3418				dir->directory_count ++;
3419			add_dir_entry(old_root_entry[i].name, "", NULL,
3420				&old_root_entry[i].inode, dir);
3421		}
3422	}
3423
3424	while(index < source) {
3425		char *basename = getbase(source_path[index]);
3426		int n, pass = 1;
3427
3428		if(basename == NULL) {
3429			ERROR("Bad source directory %s - skipping ...\n",
3430				source_path[index]);
3431			index ++;
3432			continue;
3433		}
3434		strcpy(dir_name, basename);
3435		for(;;) {
3436			for(n = 0; n < dir->count &&
3437				strcmp(dir->list[n]->name, dir_name) != 0; n++);
3438			if(n == dir->count)
3439				break;
3440			ERROR("Source directory entry %s already used! - trying"
3441				" ", dir_name);
3442			sprintf(dir_name, "%s_%d", basename, pass++);
3443			ERROR("%s\n", dir_name);
3444		}
3445		strcpy(pathname, source_path[index ++]);
3446		return 1;
3447	}
3448	return 0;
3449}
3450
3451
3452int scan1_single_readdir(char *pathname, char *dir_name, struct dir_info *dir)
3453{
3454	struct dirent *d_name;
3455	int i;
3456
3457	if(dir->count < old_root_entries) {
3458		for(i = 0; i < old_root_entries; i++) {
3459			if(old_root_entry[i].inode.type == SQUASHFS_DIR_TYPE)
3460				dir->directory_count ++;
3461			add_dir_entry(old_root_entry[i].name, "", NULL,
3462				&old_root_entry[i].inode, dir);
3463		}
3464	}
3465
3466	if((d_name = readdir(dir->linuxdir)) != NULL) {
3467		int pass = 1;
3468
3469		strcpy(dir_name, d_name->d_name);
3470		for(;;) {
3471			for(i = 0; i < dir->count &&
3472				strcmp(dir->list[i]->name, dir_name) != 0; i++);
3473			if(i == dir->count)
3474				break;
3475			ERROR("Source directory entry %s already used! - trying"
3476				" ", dir_name);
3477			sprintf(dir_name, "%s_%d", d_name->d_name, pass++);
3478			ERROR("%s\n", dir_name);
3479		}
3480		strcat(strcat(strcpy(pathname, dir->pathname), "/"),
3481			d_name->d_name);
3482		return 1;
3483	}
3484
3485	return 0;
3486}
3487
3488
3489int scan1_readdir(char *pathname, char *dir_name, struct dir_info *dir)
3490{
3491	struct dirent *d_name = readdir(dir->linuxdir);
3492
3493	if(d_name != NULL) {
3494		strcpy(dir_name, d_name->d_name);
3495		strcat(strcat(strcpy(pathname, dir->pathname), "/"),
3496			d_name->d_name);
3497		return 1;
3498	}
3499
3500	return 0;
3501}
3502
3503
3504struct dir_ent *scan2_readdir(struct dir_info *dir_info)
3505{
3506	int current_count;
3507
3508	while((current_count = dir_info->current_count++) < dir_info->count)
3509		if(dir_info->list[current_count]->inode->root_entry)
3510			continue;
3511		else
3512			return dir_info->list[current_count];
3513	return NULL;
3514}
3515
3516
3517struct dir_ent *scan2_lookup(struct dir_info *dir, char *name)
3518{
3519	int i;
3520
3521	for(i = 0; i < dir->count; i++)
3522		if(strcmp(dir->list[i]->name, name) == 0)
3523			return dir->list[i];
3524
3525	return NULL;
3526}
3527
3528
3529struct dir_ent *scan3_readdir(struct directory *dir, struct dir_info *dir_info)
3530{
3531	int current_count;
3532
3533	while((current_count = dir_info->current_count++) < dir_info->count)
3534		if(dir_info->list[current_count]->inode->root_entry)
3535			add_dir(dir_info->list[current_count]->inode->inode,
3536				dir_info->list[current_count]->inode->inode_number,
3537				dir_info->list[current_count]->name,
3538				dir_info->list[current_count]->inode->type, dir);
3539		else
3540			return dir_info->list[current_count];
3541	return NULL;
3542}
3543
3544
3545void scan1_freedir(struct dir_info *dir)
3546{
3547	if(dir->pathname[0] != '\0')
3548		closedir(dir->linuxdir);
3549	free(dir->pathname);
3550	dir->pathname = NULL;
3551}
3552
3553
3554void scan2_freedir(struct dir_info *dir)
3555{
3556	dir->current_count = 0;
3557	if(dir->pathname) {
3558		free(dir->pathname);
3559		dir->pathname = NULL;
3560	}
3561}
3562
3563
3564void scan3_freedir(struct directory *dir)
3565{
3566	if(dir->index)
3567		free(dir->index);
3568	free(dir->buff);
3569}
3570
3571
3572void dir_scan(squashfs_inode *inode, char *pathname,
3573	int (_readdir)(char *, char *, struct dir_info *))
3574{
3575	struct stat buf;
3576	struct dir_info *dir_info = dir_scan1(pathname, paths, _readdir);
3577	struct dir_ent *dir_ent;
3578
3579	if(dir_info == NULL)
3580		return;
3581
3582	dir_scan2(dir_info, pseudo);
3583
3584	dir_ent = malloc(sizeof(struct dir_ent));
3585	if(dir_ent == NULL)
3586		BAD_ERROR("Out of memory in dir_scan\n");
3587
3588	if(pathname[0] == '\0') {
3589		/*
3590 		 * dummy top level directory, if multiple sources specified on
3591		 * command line
3592		 */
3593		memset(&buf, 0, sizeof(buf));
3594		buf.st_mode = S_IRWXU | S_IRWXG | S_IRWXO | S_IFDIR;
3595		buf.st_uid = getuid();
3596		buf.st_gid = getgid();
3597		buf.st_mtime = time(NULL);
3598		buf.st_dev = 0;
3599		buf.st_ino = 0;
3600		dir_ent->inode = lookup_inode(&buf);
3601		dir_ent->inode->pseudo_file = PSEUDO_FILE_OTHER;
3602	} else {
3603		if(lstat(pathname, &buf) == -1) {
3604			ERROR("Cannot stat dir/file %s because %s, ignoring",
3605				pathname, strerror(errno));
3606			return;
3607		}
3608		dir_ent->inode = lookup_inode(&buf);
3609	}
3610
3611	if(root_inode_number) {
3612		dir_ent->inode->inode_number = root_inode_number;
3613		dir_inode_no --;
3614	}
3615	dir_ent->name = dir_ent->pathname = strdup(pathname);
3616	dir_ent->dir = dir_info;
3617	dir_ent->our_dir = NULL;
3618	dir_info->dir_ent = dir_ent;
3619
3620	if(sorted) {
3621		int res = generate_file_priorities(dir_info, 0,
3622			&dir_info->dir_ent->inode->buf);
3623
3624		if(res == FALSE)
3625			BAD_ERROR("generate_file_priorities failed\n");
3626	}
3627	queue_put(to_reader, dir_info);
3628	if(sorted)
3629		sort_files_and_write(dir_info);
3630	if(progress)
3631		enable_progress_bar();
3632	dir_scan3(inode, dir_info);
3633	dir_ent->inode->inode = *inode;
3634	dir_ent->inode->type = SQUASHFS_DIR_TYPE;
3635}
3636
3637
3638struct dir_info *dir_scan1(char *pathname, struct pathnames *paths,
3639	int (_readdir)(char *, char *, struct dir_info *))
3640{
3641	char filename[8192], dir_name[8192];
3642	struct dir_info *dir = scan1_opendir(pathname);
3643
3644	if(dir == NULL) {
3645		ERROR("Could not open %s, skipping...\n", pathname);
3646		goto error;
3647	}
3648
3649	while(_readdir(filename, dir_name, dir) != FALSE) {
3650		struct dir_info *sub_dir;
3651		struct stat buf;
3652		struct pathnames *new;
3653
3654		if(strcmp(dir_name, ".") == 0 || strcmp(dir_name, "..") == 0)
3655			continue;
3656
3657		if(lstat(filename, &buf) == -1) {
3658			ERROR("Cannot stat dir/file %s because %s, ignoring",
3659				filename, strerror(errno));
3660			continue;
3661		}
3662
3663		if((buf.st_mode & S_IFMT) != S_IFREG &&
3664			(buf.st_mode & S_IFMT) != S_IFDIR &&
3665			(buf.st_mode & S_IFMT) != S_IFLNK &&
3666			(buf.st_mode & S_IFMT) != S_IFCHR &&
3667			(buf.st_mode & S_IFMT) != S_IFBLK &&
3668			(buf.st_mode & S_IFMT) != S_IFIFO &&
3669			(buf.st_mode & S_IFMT) != S_IFSOCK) {
3670			ERROR("File %s has unrecognised filetype %d, ignoring"
3671				"\n", filename, buf.st_mode & S_IFMT);
3672			continue;
3673		}
3674
3675		if(old_exclude) {
3676			if(old_excluded(filename, &buf))
3677				continue;
3678		} else {
3679			if(excluded(paths, dir_name, &new))
3680				continue;
3681		}
3682
3683		if((buf.st_mode & S_IFMT) == S_IFDIR) {
3684			sub_dir = dir_scan1(filename, new, scan1_readdir);
3685			if(sub_dir == NULL)
3686				continue;
3687			dir->directory_count ++;
3688		} else
3689			sub_dir = NULL;
3690
3691		add_dir_entry(dir_name, filename, sub_dir, lookup_inode(&buf),
3692			dir);
3693	}
3694
3695	scan1_freedir(dir);
3696
3697error:
3698	return dir;
3699}
3700
3701
3702struct dir_info *dir_scan2(struct dir_info *dir, struct pseudo *pseudo)
3703{
3704	struct dir_info *sub_dir;
3705	struct dir_ent *dir_ent;
3706	struct pseudo_entry *pseudo_ent;
3707	struct stat buf;
3708	static int pseudo_ino = 1;
3709
3710	if(dir == NULL && (dir = scan1_opendir("")) == NULL)
3711		return NULL;
3712
3713	while((dir_ent = scan2_readdir(dir)) != NULL) {
3714		struct inode_info *inode_info = dir_ent->inode;
3715		struct stat *buf = &inode_info->buf;
3716		char *name = dir_ent->name;
3717
3718		if((buf->st_mode & S_IFMT) == S_IFDIR)
3719			dir_scan2(dir_ent->dir, pseudo_subdir(name, pseudo));
3720	}
3721
3722	while((pseudo_ent = pseudo_readdir(pseudo)) != NULL) {
3723		dir_ent = scan2_lookup(dir, pseudo_ent->name);
3724		if(pseudo_ent->dev->type == 'm') {
3725			struct stat *buf;
3726			if(dir_ent == NULL) {
3727				ERROR("Pseudo modify file \"%s\" does not exist "
3728					"in source filesystem.  Ignoring.\n",
3729					pseudo_ent->pathname);
3730				continue;
3731			}
3732			if(dir_ent->inode->root_entry) {
3733				ERROR("Pseudo modify file \"%s\" is a pre-existing"
3734					" file in the filesystem being appended"
3735					"  to.  It cannot be modified. "
3736					"Ignoring.\n", pseudo_ent->pathname);
3737				continue;
3738			}
3739			buf = &dir_ent->inode->buf;
3740			buf->st_mode = (buf->st_mode & S_IFMT) |
3741				pseudo_ent->dev->mode;
3742			buf->st_uid = pseudo_ent->dev->uid;
3743			buf->st_gid = pseudo_ent->dev->gid;
3744			continue;
3745		}
3746
3747		if(dir_ent) {
3748			if(dir_ent->inode->root_entry)
3749				ERROR("Pseudo file \"%s\" is a pre-existing"
3750					" file in the filesystem being appended"
3751					"  to.  Ignoring.\n",
3752					pseudo_ent->pathname);
3753			else
3754				ERROR("Pseudo file \"%s\" exists in source "
3755					"filesystem \"%s\".\nIgnoring, "
3756					"exclude it (-e/-ef) to override.\n",
3757					pseudo_ent->pathname,
3758					dir_ent->pathname);
3759			continue;
3760		}
3761
3762		if(pseudo_ent->dev->type == 'd') {
3763			sub_dir = dir_scan2(NULL, pseudo_ent->pseudo);
3764			if(sub_dir == NULL) {
3765				ERROR("Could not create pseudo directory \"%s\""
3766					", skipping...\n",
3767					pseudo_ent->pathname);
3768				continue;
3769			}
3770			dir->directory_count ++;
3771		} else
3772			sub_dir = NULL;
3773
3774		memset(&buf, 0, sizeof(buf));
3775		buf.st_mode = pseudo_ent->dev->mode;
3776		buf.st_uid = pseudo_ent->dev->uid;
3777		buf.st_gid = pseudo_ent->dev->gid;
3778		buf.st_rdev = makedev(pseudo_ent->dev->major,
3779			pseudo_ent->dev->minor);
3780		buf.st_mtime = time(NULL);
3781		buf.st_ino = pseudo_ino ++;
3782
3783		if(pseudo_ent->dev->type == 'f') {
3784#ifdef USE_TMP_FILE
3785			struct stat buf2;
3786			int res = stat(pseudo_ent->dev->filename, &buf2);
3787			struct inode_info *inode;
3788			if(res == -1) {
3789				ERROR("Stat on pseudo file \"%s\" failed, "
3790					"skipping...", pseudo_ent->pathname);
3791				continue;
3792			}
3793			buf.st_size = buf2.st_size;
3794			inode = lookup_inode(&buf);
3795			inode->pseudo_file = PSEUDO_FILE_OTHER;
3796			add_dir_entry(pseudo_ent->name,
3797				pseudo_ent->dev->filename, sub_dir, inode,
3798				dir);
3799#else
3800			struct inode_info *inode = lookup_inode(&buf);
3801			inode->pseudo_id = pseudo_ent->dev->pseudo_id;
3802			inode->pseudo_file = PSEUDO_FILE_PROCESS;
3803			add_dir_entry(pseudo_ent->name, pseudo_ent->pathname,
3804				sub_dir, inode, dir);
3805#endif
3806		} else {
3807			struct inode_info *inode = lookup_inode(&buf);
3808			inode->pseudo_file = PSEUDO_FILE_OTHER;
3809			add_dir_entry(pseudo_ent->name, pseudo_ent->pathname,
3810				sub_dir, inode, dir);
3811		}
3812	}
3813
3814	scan2_freedir(dir);
3815	sort_directory(dir);
3816
3817	return dir;
3818}
3819
3820
3821void dir_scan3(squashfs_inode *inode, struct dir_info *dir_info)
3822{
3823	int squashfs_type;
3824	int duplicate_file;
3825	char *pathname = dir_info->dir_ent->pathname;
3826	struct directory dir;
3827	struct dir_ent *dir_ent;
3828
3829	scan3_init_dir(&dir);
3830
3831	while((dir_ent = scan3_readdir(&dir, dir_info)) != NULL) {
3832		struct inode_info *inode_info = dir_ent->inode;
3833		struct stat *buf = &inode_info->buf;
3834		char *filename = dir_ent->pathname;
3835		char *dir_name = dir_ent->name;
3836		unsigned int inode_number = ((buf->st_mode & S_IFMT) == S_IFDIR)
3837			?  dir_ent->inode->inode_number :
3838			dir_ent->inode->inode_number + dir_inode_no;
3839
3840		if(dir_ent->inode->inode == SQUASHFS_INVALID_BLK) {
3841			switch(buf->st_mode & S_IFMT) {
3842				case S_IFREG:
3843					squashfs_type = SQUASHFS_FILE_TYPE;
3844					write_file(inode, dir_ent,
3845						&duplicate_file);
3846					INFO("file %s, uncompressed size %lld "
3847						"bytes %s\n", filename,
3848						(long long) buf->st_size,
3849						duplicate_file ?  "DUPLICATE" :
3850						 "");
3851					break;
3852
3853				case S_IFDIR:
3854					squashfs_type = SQUASHFS_DIR_TYPE;
3855					dir_scan3(inode, dir_ent->dir);
3856					break;
3857
3858				case S_IFLNK:
3859					squashfs_type = SQUASHFS_SYMLINK_TYPE;
3860					create_inode(inode, NULL, dir_ent,
3861						squashfs_type, 0, 0, 0, NULL,
3862						NULL, NULL, 0);
3863					INFO("symbolic link %s inode 0x%llx\n",
3864						dir_name, *inode);
3865					sym_count ++;
3866					break;
3867
3868				case S_IFCHR:
3869					squashfs_type = SQUASHFS_CHRDEV_TYPE;
3870					create_inode(inode, NULL, dir_ent,
3871						squashfs_type, 0, 0, 0, NULL,
3872						NULL, NULL, 0);
3873					INFO("character device %s inode 0x%llx"
3874						"\n", dir_name, *inode);
3875					dev_count ++;
3876					break;
3877
3878				case S_IFBLK:
3879					squashfs_type = SQUASHFS_BLKDEV_TYPE;
3880					create_inode(inode, NULL, dir_ent,
3881						squashfs_type, 0, 0, 0, NULL,
3882						NULL, NULL, 0);
3883					INFO("block device %s inode 0x%llx\n",
3884						dir_name, *inode);
3885					dev_count ++;
3886					break;
3887
3888				case S_IFIFO:
3889					squashfs_type = SQUASHFS_FIFO_TYPE;
3890					create_inode(inode, NULL, dir_ent,
3891						squashfs_type, 0, 0, 0, NULL,
3892						NULL, NULL, 0);
3893					INFO("fifo %s inode 0x%llx\n",dir_name,
3894						*inode);
3895					fifo_count ++;
3896					break;
3897
3898				case S_IFSOCK:
3899					squashfs_type = SQUASHFS_SOCKET_TYPE;
3900					create_inode(inode, NULL, dir_ent,
3901						squashfs_type, 0, 0, 0, NULL,
3902						NULL, NULL, 0);
3903					INFO("unix domain socket %s inode "
3904						"0x%llx\n", dir_name, *inode);
3905					sock_count ++;
3906					break;
3907
3908				default:
3909					BAD_ERROR("%s unrecognised file type, "
3910						"mode is %x\n", filename,
3911						buf->st_mode);
3912			}
3913			dir_ent->inode->inode = *inode;
3914			dir_ent->inode->type = squashfs_type;
3915		 } else {
3916			*inode = dir_ent->inode->inode;
3917			squashfs_type = dir_ent->inode->type;
3918			switch(squashfs_type) {
3919				case SQUASHFS_FILE_TYPE:
3920					if(!sorted)
3921						INFO("file %s, uncompressed "
3922							"size %lld bytes LINK"
3923							"\n", filename,
3924							(long long)
3925							buf->st_size);
3926					break;
3927				case SQUASHFS_SYMLINK_TYPE:
3928					INFO("symbolic link %s inode 0x%llx "
3929						"LINK\n", dir_name, *inode);
3930					break;
3931				case SQUASHFS_CHRDEV_TYPE:
3932					INFO("character device %s inode 0x%llx "
3933						"LINK\n", dir_name, *inode);
3934					break;
3935				case SQUASHFS_BLKDEV_TYPE:
3936					INFO("block device %s inode 0x%llx "
3937						"LINK\n", dir_name, *inode);
3938					break;
3939				case SQUASHFS_FIFO_TYPE:
3940					INFO("fifo %s inode 0x%llx LINK\n",
3941						dir_name, *inode);
3942					break;
3943				case SQUASHFS_SOCKET_TYPE:
3944					INFO("unix domain socket %s inode "
3945						"0x%llx LINK\n", dir_name,
3946						*inode);
3947					break;
3948			}
3949		}
3950
3951		add_dir(*inode, inode_number, dir_name, squashfs_type, &dir);
3952		update_progress_bar();
3953	}
3954
3955	write_dir(inode, dir_info, &dir);
3956	INFO("directory %s inode 0x%llx\n", pathname, *inode);
3957
3958	scan3_freedir(&dir);
3959}
3960
3961
3962unsigned int slog(unsigned int block)
3963{
3964	int i;
3965
3966	for(i = 12; i <= 20; i++)
3967		if(block == (1 << i))
3968			return i;
3969	return 0;
3970}
3971
3972
3973int old_excluded(char *filename, struct stat *buf)
3974{
3975	int i;
3976
3977	for(i = 0; i < exclude; i++)
3978		if((exclude_paths[i].st_dev == buf->st_dev) &&
3979				(exclude_paths[i].st_ino == buf->st_ino))
3980			return TRUE;
3981	return FALSE;
3982}
3983
3984
3985#define ADD_ENTRY(buf) \
3986	if(exclude % EXCLUDE_SIZE == 0) { \
3987		exclude_paths = realloc(exclude_paths, (exclude + EXCLUDE_SIZE) \
3988			* sizeof(struct exclude_info)); \
3989		if(exclude_paths == NULL) \
3990			BAD_ERROR("Out of memory in exclude dir/file table\n"); \
3991	} \
3992	exclude_paths[exclude].st_dev = buf.st_dev; \
3993	exclude_paths[exclude++].st_ino = buf.st_ino;
3994int old_add_exclude(char *path)
3995{
3996	int i;
3997	char filename[4096];
3998	struct stat buf;
3999
4000	if(path[0] == '/' || strncmp(path, "./", 2) == 0 ||
4001			strncmp(path, "../", 3) == 0) {
4002		if(lstat(path, &buf) == -1) {
4003			ERROR("Cannot stat exclude dir/file %s because %s, "
4004				"ignoring", path, strerror(errno));
4005			return TRUE;
4006		}
4007		ADD_ENTRY(buf);
4008		return TRUE;
4009	}
4010
4011	for(i = 0; i < source; i++) {
4012		strcat(strcat(strcpy(filename, source_path[i]), "/"), path);
4013		if(lstat(filename, &buf) == -1) {
4014			if(!(errno == ENOENT || errno == ENOTDIR))
4015				ERROR("Cannot stat exclude dir/file %s because "
4016					"%s, ignoring", filename,
4017					strerror(errno));
4018			continue;
4019		}
4020		ADD_ENTRY(buf);
4021	}
4022	return TRUE;
4023}
4024
4025
4026void add_old_root_entry(char *name, squashfs_inode inode, int inode_number,
4027	int type)
4028{
4029	old_root_entry = realloc(old_root_entry,
4030		sizeof(struct old_root_entry_info) * (old_root_entries + 1));
4031	if(old_root_entry == NULL)
4032		BAD_ERROR("Out of memory in old root directory entries "
4033			"reallocation\n");
4034
4035	old_root_entry[old_root_entries].name = strdup(name);
4036	old_root_entry[old_root_entries].inode.inode = inode;
4037	old_root_entry[old_root_entries].inode.inode_number = inode_number;
4038	old_root_entry[old_root_entries].inode.type = type;
4039	old_root_entry[old_root_entries++].inode.root_entry = TRUE;
4040}
4041
4042
4043void initialise_threads(int readb_mbytes, int writeb_mbytes,
4044	int fragmentb_mbytes)
4045{
4046	int i;
4047	sigset_t sigmask, old_mask;
4048	int reader_buffer_size = readb_mbytes << (20 - block_log);
4049	int fragment_buffer_size = fragmentb_mbytes << (20 - block_log);
4050
4051	/*
4052	 * writer_buffer_size is global because it is needed in
4053	 * write_file_blocks_dup()
4054	 */
4055	writer_buffer_size = writeb_mbytes << (20 - block_log);
4056
4057	sigemptyset(&sigmask);
4058	sigaddset(&sigmask, SIGINT);
4059	sigaddset(&sigmask, SIGQUIT);
4060	if(sigprocmask(SIG_BLOCK, &sigmask, &old_mask) == -1)
4061		BAD_ERROR("Failed to set signal mask in intialise_threads\n");
4062
4063	signal(SIGUSR1, sigusr1_handler);
4064
4065	if(processors == -1) {
4066#ifndef linux
4067		int mib[2];
4068		size_t len = sizeof(processors);
4069
4070		mib[0] = CTL_HW;
4071#ifdef HW_AVAILCPU
4072		mib[1] = HW_AVAILCPU;
4073#else
4074		mib[1] = HW_NCPU;
4075#endif
4076
4077		if(sysctl(mib, 2, &processors, &len, NULL, 0) == -1) {
4078			ERROR("Failed to get number of available processors.  "
4079				"Defaulting to 1\n");
4080			processors = 1;
4081		}
4082#else
4083		processors = sysconf(_SC_NPROCESSORS_ONLN);
4084#endif
4085	}
4086
4087	thread = malloc((2 + processors * 2) * sizeof(pthread_t));
4088	if(thread == NULL)
4089		BAD_ERROR("Out of memory allocating thread descriptors\n");
4090	deflator_thread = &thread[2];
4091	frag_deflator_thread = &deflator_thread[processors];
4092
4093	to_reader = queue_init(1);
4094	from_reader = queue_init(reader_buffer_size);
4095	to_writer = queue_init(writer_buffer_size);
4096	from_writer = queue_init(1);
4097	from_deflate = queue_init(reader_buffer_size);
4098	to_frag = queue_init(fragment_buffer_size);
4099	reader_buffer = cache_init(block_size, reader_buffer_size);
4100	writer_buffer = cache_init(block_size, writer_buffer_size);
4101	fragment_buffer = cache_init(block_size, fragment_buffer_size);
4102	pthread_create(&thread[0], NULL, reader, NULL);
4103	pthread_create(&thread[1], NULL, writer, NULL);
4104	pthread_create(&progress_thread, NULL, progress_thrd, NULL);
4105	pthread_mutex_init(&fragment_mutex, NULL);
4106	pthread_cond_init(&fragment_waiting, NULL);
4107
4108	for(i = 0; i < processors; i++) {
4109		if(pthread_create(&deflator_thread[i], NULL, deflator, NULL) !=
4110				 0)
4111			BAD_ERROR("Failed to create thread\n");
4112		if(pthread_create(&frag_deflator_thread[i], NULL, frag_deflator,
4113				NULL) != 0)
4114			BAD_ERROR("Failed to create thread\n");
4115	}
4116
4117	printf("Parallel mksquashfs: Using %d processor%s\n", processors,
4118			processors == 1 ? "" : "s");
4119
4120	if(sigprocmask(SIG_SETMASK, &old_mask, NULL) == -1)
4121		BAD_ERROR("Failed to set signal mask in intialise_threads\n");
4122}
4123
4124
4125long long write_inode_lookup_table()
4126{
4127	int i, inode_number, lookup_bytes = SQUASHFS_LOOKUP_BYTES(inode_count);
4128	void *it;
4129
4130	if(inode_count == sinode_count)
4131		goto skip_inode_hash_table;
4132
4133	it = realloc(inode_lookup_table, lookup_bytes);
4134	if(it == NULL)
4135		BAD_ERROR("Out of memory in write_inode_table\n");
4136	inode_lookup_table = it;
4137
4138	for(i = 0; i < INODE_HASH_SIZE; i ++) {
4139		struct inode_info *inode = inode_info[i];
4140
4141		for(inode = inode_info[i]; inode; inode = inode->next) {
4142
4143			inode_number = inode->type == SQUASHFS_DIR_TYPE ?
4144				inode->inode_number : inode->inode_number +
4145				dir_inode_no;
4146
4147			SQUASHFS_SWAP_LONG_LONGS(&inode->inode,
4148				&inode_lookup_table[inode_number - 1], 1);
4149
4150		}
4151	}
4152
4153skip_inode_hash_table:
4154	return generic_write_table(lookup_bytes, inode_lookup_table, 0, NULL,
4155		noI);
4156}
4157
4158
4159char *get_component(char *target, char *targname)
4160{
4161	while(*target == '/')
4162		target ++;
4163
4164	while(*target != '/' && *target!= '\0')
4165		*targname ++ = *target ++;
4166
4167	*targname = '\0';
4168
4169	return target;
4170}
4171
4172
4173void free_path(struct pathname *paths)
4174{
4175	int i;
4176
4177	for(i = 0; i < paths->names; i++) {
4178		if(paths->name[i].paths)
4179			free_path(paths->name[i].paths);
4180		free(paths->name[i].name);
4181		if(paths->name[i].preg) {
4182			regfree(paths->name[i].preg);
4183			free(paths->name[i].preg);
4184		}
4185	}
4186
4187	free(paths);
4188}
4189
4190
4191struct pathname *add_path(struct pathname *paths, char *target, char *alltarget)
4192{
4193	char targname[1024];
4194	int i, error;
4195
4196	target = get_component(target, targname);
4197
4198	if(paths == NULL) {
4199		paths = malloc(sizeof(struct pathname));
4200		if(paths == NULL)
4201			BAD_ERROR("failed to allocate paths\n");
4202
4203		paths->names = 0;
4204		paths->name = NULL;
4205	}
4206
4207	for(i = 0; i < paths->names; i++)
4208		if(strcmp(paths->name[i].name, targname) == 0)
4209			break;
4210
4211	if(i == paths->names) {
4212		/* allocate new name entry */
4213		paths->names ++;
4214		paths->name = realloc(paths->name, (i + 1) *
4215			sizeof(struct path_entry));
4216		if(paths->name == NULL)
4217			BAD_ERROR("Out of memory in add path\n");
4218		paths->name[i].name = strdup(targname);
4219		paths->name[i].paths = NULL;
4220		if(use_regex) {
4221			paths->name[i].preg = malloc(sizeof(regex_t));
4222			if(paths->name[i].preg == NULL)
4223				BAD_ERROR("Out of memory in add_path\n");
4224			error = regcomp(paths->name[i].preg, targname,
4225				REG_EXTENDED|REG_NOSUB);
4226			if(error) {
4227				char str[1024];
4228
4229				regerror(error, paths->name[i].preg, str, 1024);
4230				BAD_ERROR("invalid regex %s in export %s, "
4231					"because %s\n", targname, alltarget,
4232					str);
4233			}
4234		} else
4235			paths->name[i].preg = NULL;
4236
4237		if(target[0] == '\0')
4238			/* at leaf pathname component */
4239			paths->name[i].paths = NULL;
4240		else
4241			/* recurse adding child components */
4242			paths->name[i].paths = add_path(NULL, target,
4243				alltarget);
4244	} else {
4245		/* existing matching entry */
4246		if(paths->name[i].paths == NULL) {
4247			/* No sub-directory which means this is the leaf
4248			 * component of a pre-existing exclude which subsumes
4249			 * the exclude currently being added, in which case stop
4250			 * adding components */
4251		} else if(target[0] == '\0') {
4252			/* at leaf pathname component and child components exist
4253			 * from more specific excludes, delete as they're
4254			 * subsumed by this exclude */
4255			free_path(paths->name[i].paths);
4256			paths->name[i].paths = NULL;
4257		} else
4258			/* recurse adding child components */
4259			add_path(paths->name[i].paths, target, alltarget);
4260	}
4261
4262	return paths;
4263}
4264
4265
4266void add_exclude(char *target)
4267{
4268
4269	if(target[0] == '/' || strncmp(target, "./", 2) == 0 ||
4270			strncmp(target, "../", 3) == 0)
4271		BAD_ERROR("/, ./ and ../ prefixed excludes not supported with "
4272			"-wildcards or -regex options\n");
4273	else if(strncmp(target, "... ", 4) == 0)
4274		stickypath = add_path(stickypath, target + 4, target + 4);
4275	else
4276		path = add_path(path, target, target);
4277}
4278
4279
4280void display_path(int depth, struct pathname *paths)
4281{
4282	int i, n;
4283
4284	if(paths == NULL)
4285		return;
4286
4287	for(i = 0; i < paths->names; i++) {
4288		for(n = 0; n < depth; n++)
4289			printf("\t");
4290		printf("%d: %s\n", depth, paths->name[i].name);
4291		display_path(depth + 1, paths->name[i].paths);
4292	}
4293}
4294
4295
4296void display_path2(struct pathname *paths, char *string)
4297{
4298	int i;
4299	char path[1024];
4300
4301	if(paths == NULL) {
4302		printf("%s\n", string);
4303		return;
4304	}
4305
4306	for(i = 0; i < paths->names; i++) {
4307		strcat(strcat(strcpy(path, string), "/"), paths->name[i].name);
4308		display_path2(paths->name[i].paths, path);
4309	}
4310}
4311
4312
4313struct pathnames *init_subdir()
4314{
4315	struct pathnames *new = malloc(sizeof(struct pathnames));
4316	if(new == NULL)
4317		BAD_ERROR("Out of memory in init_subdir\n");
4318	new->count = 0;
4319	return new;
4320}
4321
4322
4323struct pathnames *add_subdir(struct pathnames *paths, struct pathname *path)
4324{
4325	if(paths->count % PATHS_ALLOC_SIZE == 0) {
4326		paths = realloc(paths, sizeof(struct pathnames *) +
4327			(paths->count + PATHS_ALLOC_SIZE) *
4328			sizeof(struct pathname *));
4329		if(paths == NULL)
4330			BAD_ERROR("Out of memory in add_subdir\n");
4331	}
4332
4333	paths->path[paths->count++] = path;
4334	return paths;
4335}
4336
4337
4338void free_subdir(struct pathnames *paths)
4339{
4340	free(paths);
4341}
4342
4343
4344int excluded(struct pathnames *paths, char *name, struct pathnames **new)
4345{
4346	int i, n, res;
4347
4348	if(paths == NULL) {
4349		*new = NULL;
4350		return FALSE;
4351	}
4352
4353
4354	*new = init_subdir();
4355	if(stickypath)
4356		*new = add_subdir(*new, stickypath);
4357
4358	for(n = 0; n < paths->count; n++) {
4359		struct pathname *path = paths->path[n];
4360
4361		for(i = 0; i < path->names; i++) {
4362			int match = use_regex ?
4363				regexec(path->name[i].preg, name, (size_t) 0,
4364					NULL, 0) == 0 :
4365				fnmatch(path->name[i].name, name,
4366					FNM_PATHNAME|FNM_PERIOD|FNM_EXTMATCH) ==
4367					 0;
4368
4369			if(match && path->name[i].paths == NULL) {
4370				/* match on a leaf component, any subdirectories
4371				 * in the filesystem should be excluded */
4372				res = TRUE;
4373				goto empty_set;
4374			}
4375
4376			if(match)
4377				/* match on a non-leaf component, add any
4378				 * subdirectories to the new set of
4379				 * subdirectories to scan for this name */
4380				*new = add_subdir(*new, path->name[i].paths);
4381		}
4382	}
4383
4384	if((*new)->count == 0) {
4385			/* no matching names found, return empty new search set
4386			 */
4387			res = FALSE;
4388			goto empty_set;
4389	}
4390
4391	/* one or more matches with sub-directories found (no leaf matches).
4392	 * Return new set */
4393	return FALSE;
4394
4395empty_set:
4396	free_subdir(*new);
4397	*new = NULL;
4398	return res;
4399}
4400
4401
4402#define RECOVER_ID "Squashfs recovery file v1.0\n"
4403#define RECOVER_ID_SIZE 28
4404
4405void write_recovery_data(struct squashfs_super_block *sBlk)
4406{
4407	int res, recoverfd, bytes = sBlk->bytes_used - sBlk->inode_table_start;
4408	pid_t pid = getpid();
4409	char *metadata;
4410	char header[] = RECOVER_ID;
4411
4412	if(recover == FALSE) {
4413		printf("No recovery data option specified.\n");
4414		printf("Skipping saving recovery file.\n\n");
4415		return;
4416	}
4417
4418	metadata = malloc(bytes);
4419	if(metadata == NULL)
4420		BAD_ERROR("Failed to alloc metadata buffer in "
4421			"write_recovery_data\n");
4422
4423	res = read_fs_bytes(fd, sBlk->inode_table_start, bytes, metadata);
4424	if(res == 0)
4425		EXIT_MKSQUASHFS();
4426
4427	sprintf(recovery_file, "squashfs_recovery_%s_%d",
4428		getbase(destination_file), pid);
4429	recoverfd = open(recovery_file, O_CREAT | O_TRUNC | O_RDWR, S_IRWXU);
4430	if(recoverfd == -1)
4431		BAD_ERROR("Failed to create recovery file, because %s.  "
4432			"Aborting\n", strerror(errno));
4433
4434	if(write_bytes(recoverfd, header, RECOVER_ID_SIZE) == -1)
4435		BAD_ERROR("Failed to write recovery file, because %s\n",
4436			strerror(errno));
4437
4438	if(write_bytes(recoverfd, sBlk, sizeof(struct squashfs_super_block)) == -1)
4439		BAD_ERROR("Failed to write recovery file, because %s\n",
4440			strerror(errno));
4441
4442	if(write_bytes(recoverfd, metadata, bytes) == -1)
4443		BAD_ERROR("Failed to write recovery file, because %s\n",
4444			strerror(errno));
4445
4446	close(recoverfd);
4447	free(metadata);
4448
4449	printf("Recovery file \"%s\" written\n", recovery_file);
4450	printf("If Mksquashfs aborts abnormally (i.e. power failure), run\n");
4451	printf("mksquashfs dummy %s -recover %s\n", destination_file,
4452		recovery_file);
4453	printf("to restore filesystem\n\n");
4454}
4455
4456
4457void read_recovery_data(char *recovery_file, char *destination_file)
4458{
4459	int fd, recoverfd, bytes;
4460	struct squashfs_super_block orig_sBlk, sBlk;
4461	char *metadata;
4462	int res;
4463	struct stat buf;
4464	char header[] = RECOVER_ID;
4465	char header2[RECOVER_ID_SIZE];
4466
4467	recoverfd = open(recovery_file, O_RDONLY);
4468	if(recoverfd == -1)
4469		BAD_ERROR("Failed to open recovery file because %s\n",
4470			strerror(errno));
4471
4472	if(stat(destination_file, &buf) == -1)
4473		BAD_ERROR("Failed to stat destination file, because %s\n",
4474			strerror(errno));
4475
4476	fd = open(destination_file, O_RDWR);
4477	if(fd == -1)
4478		BAD_ERROR("Failed to open destination file because %s\n",
4479			strerror(errno));
4480
4481	res = read_bytes(recoverfd, header2, RECOVER_ID_SIZE);
4482	if(res == -1)
4483		BAD_ERROR("Failed to read recovery file, because %s\n",
4484			strerror(errno));
4485	if(res < RECOVER_ID_SIZE)
4486		BAD_ERROR("Recovery file appears to be truncated\n");
4487	if(strncmp(header, header2, RECOVER_ID_SIZE) !=0 )
4488		BAD_ERROR("Not a recovery file\n");
4489
4490	res = read_bytes(recoverfd, &sBlk, sizeof(struct squashfs_super_block));
4491	if(res == -1)
4492		BAD_ERROR("Failed to read recovery file, because %s\n",
4493			strerror(errno));
4494	if(res < sizeof(struct squashfs_super_block))
4495		BAD_ERROR("Recovery file appears to be truncated\n");
4496
4497	res = read_fs_bytes(fd, 0, sizeof(struct squashfs_super_block), &orig_sBlk);
4498	if(res == 0)
4499		EXIT_MKSQUASHFS();
4500
4501	if(memcmp(((char *) &sBlk) + 4, ((char *) &orig_sBlk) + 4,
4502			sizeof(struct squashfs_super_block) - 4) != 0)
4503		BAD_ERROR("Recovery file and destination file do not seem to "
4504			"match\n");
4505
4506	bytes = sBlk.bytes_used - sBlk.inode_table_start;
4507
4508	metadata = malloc(bytes);
4509	if(metadata == NULL)
4510		BAD_ERROR("Failed to alloc metadata buffer in "
4511			"read_recovery_data\n");
4512
4513	res = read_bytes(recoverfd, metadata, bytes);
4514	if(res == -1)
4515		BAD_ERROR("Failed to read recovery file, because %s\n",
4516			strerror(errno));
4517	if(res < bytes)
4518		BAD_ERROR("Recovery file appears to be truncated\n");
4519
4520	write_destination(fd, 0, sizeof(struct squashfs_super_block), &sBlk);
4521
4522	write_destination(fd, sBlk.inode_table_start, bytes, metadata);
4523
4524	close(recoverfd);
4525	close(fd);
4526
4527	printf("Successfully wrote recovery file \"%s\".  Exiting\n",
4528		recovery_file);
4529
4530	exit(0);
4531}
4532
4533
4534#define VERSION() \
4535	printf("mksquashfs version 4.2 (2011/02/28)\n");\
4536	printf("copyright (C) 2011 Phillip Lougher "\
4537		"<phillip@lougher.demon.co.uk>\n\n"); \
4538	printf("This program is free software; you can redistribute it and/or"\
4539		"\n");\
4540	printf("modify it under the terms of the GNU General Public License"\
4541		"\n");\
4542	printf("as published by the Free Software Foundation; either version "\
4543		"2,\n");\
4544	printf("or (at your option) any later version.\n\n");\
4545	printf("This program is distributed in the hope that it will be "\
4546		"useful,\n");\
4547	printf("but WITHOUT ANY WARRANTY; without even the implied warranty "\
4548		"of\n");\
4549	printf("MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the"\
4550		"\n");\
4551	printf("GNU General Public License for more details.\n");
4552int main(int argc, char *argv[])
4553{
4554	struct stat buf, source_buf;
4555	int res, i;
4556	struct squashfs_super_block sBlk;
4557	char *b, *root_name = NULL;
4558	int nopad = FALSE, keep_as_directory = FALSE;
4559	squashfs_inode inode;
4560	int readb_mbytes = READER_BUFFER_DEFAULT,
4561		writeb_mbytes = WRITER_BUFFER_DEFAULT,
4562		fragmentb_mbytes = FRAGMENT_BUFFER_DEFAULT;
4563
4564	pthread_mutex_init(&progress_mutex, NULL);
4565	block_log = slog(block_size);
4566	if(argc > 1 && strcmp(argv[1], "-version") == 0) {
4567		VERSION();
4568		exit(0);
4569	}
4570        for(i = 1; i < argc && argv[i][0] != '-'; i++);
4571	if(i < 3)
4572		goto printOptions;
4573	source_path = argv + 1;
4574	source = i - 2;
4575	/*
4576	 * lookup default compressor.  Note the Makefile ensures the default
4577	 * compressor has been built, and so we don't need to to check
4578	 * for failure here
4579	 */
4580	comp = lookup_compressor(COMP_DEFAULT);
4581	for(; i < argc; i++) {
4582		if(strcmp(argv[i], "-comp") == 0) {
4583			if(compressor_opts_parsed) {
4584				ERROR("%s: -comp must appear before -X options"
4585					"\n", argv[0]);
4586				exit(1);
4587			}
4588			if(++i == argc) {
4589				ERROR("%s: -comp missing compression type\n",
4590					argv[0]);
4591				exit(1);
4592			}
4593			comp = lookup_compressor(argv[i]);
4594			if(!comp->supported) {
4595				ERROR("%s: Compressor \"%s\" is not supported!"
4596					"\n", argv[0], argv[i]);
4597				ERROR("%s: Compressors available:\n", argv[0]);
4598				display_compressors("", COMP_DEFAULT);
4599				exit(1);
4600			}
4601
4602		} else if(strncmp(argv[i], "-X", 2) == 0) {
4603			int args = compressor_options(comp, argv + i, argc - i);
4604			if(args < 0) {
4605				if(args == -1) {
4606					ERROR("%s: Unrecognised compressor"
4607						" option %s\n", argv[0],
4608						argv[i]);
4609					ERROR("%s: Did you forget to specify"
4610						" -comp, or specify it after"
4611						" the compressor specific"
4612						" option?\n", argv[0]);
4613					}
4614				exit(1);
4615			}
4616			i += args;
4617			compressor_opts_parsed = 1;
4618
4619		} else if(strcmp(argv[i], "-pf") == 0) {
4620			if(++i == argc) {
4621				ERROR("%s: -pf missing filename\n", argv[0]);
4622				exit(1);
4623			}
4624			if(read_pseudo_file(&pseudo, argv[i]) == FALSE)
4625				exit(1);
4626		} else if(strcmp(argv[i], "-p") == 0) {
4627			if(++i == argc) {
4628				ERROR("%s: -p missing pseudo file definition\n",
4629					argv[0]);
4630				exit(1);
4631			}
4632			if(read_pseudo_def(&pseudo, argv[i]) == FALSE)
4633				exit(1);
4634		} else if(strcmp(argv[i], "-recover") == 0) {
4635			if(++i == argc) {
4636				ERROR("%s: -recover missing recovery file\n",
4637					argv[0]);
4638				exit(1);
4639			}
4640			read_recovery_data(argv[i], argv[source + 1]);
4641		} else if(strcmp(argv[i], "-no-recovery") == 0)
4642			recover = FALSE;
4643		else if(strcmp(argv[i], "-wildcards") == 0) {
4644			old_exclude = FALSE;
4645			use_regex = FALSE;
4646		} else if(strcmp(argv[i], "-regex") == 0) {
4647			old_exclude = FALSE;
4648			use_regex = TRUE;
4649		} else if(strcmp(argv[i], "-no-sparse") == 0)
4650			sparse_files = FALSE;
4651		else if(strcmp(argv[i], "-no-progress") == 0)
4652			progress = FALSE;
4653		else if(strcmp(argv[i], "-no-exports") == 0)
4654			exportable = FALSE;
4655		else if(strcmp(argv[i], "-processors") == 0) {
4656			if((++i == argc) || (processors =
4657					strtol(argv[i], &b, 10), *b != '\0')) {
4658				ERROR("%s: -processors missing or invalid "
4659					"processor number\n", argv[0]);
4660				exit(1);
4661			}
4662			if(processors < 1) {
4663				ERROR("%s: -processors should be 1 or larger\n",
4664					argv[0]);
4665				exit(1);
4666			}
4667		} else if(strcmp(argv[i], "-read-queue") == 0) {
4668			if((++i == argc) || (readb_mbytes =
4669					strtol(argv[i], &b, 10), *b != '\0')) {
4670				ERROR("%s: -read-queue missing or invalid "
4671					"queue size\n", argv[0]);
4672				exit(1);
4673			}
4674			if(readb_mbytes < 1) {
4675				ERROR("%s: -read-queue should be 1 megabyte or "
4676					"larger\n", argv[0]);
4677				exit(1);
4678			}
4679		} else if(strcmp(argv[i], "-write-queue") == 0) {
4680			if((++i == argc) || (writeb_mbytes =
4681					strtol(argv[i], &b, 10), *b != '\0')) {
4682				ERROR("%s: -write-queue missing or invalid "
4683					"queue size\n", argv[0]);
4684				exit(1);
4685			}
4686			if(writeb_mbytes < 1) {
4687				ERROR("%s: -write-queue should be 1 megabyte "
4688					"or larger\n", argv[0]);
4689				exit(1);
4690			}
4691		} else if(strcmp(argv[i], "-fragment-queue") == 0) {
4692			if((++i == argc) ||
4693					(fragmentb_mbytes =
4694					strtol(argv[i], &b, 10), *b != '\0')) {
4695				ERROR("%s: -fragment-queue missing or invalid "
4696					"queue size\n", argv[0]);
4697				exit(1);
4698			}
4699			if(fragmentb_mbytes < 1) {
4700				ERROR("%s: -fragment-queue should be 1 "
4701					"megabyte or larger\n", argv[0]);
4702				exit(1);
4703			}
4704		} else if(strcmp(argv[i], "-b") == 0) {
4705			if(++i == argc) {
4706				ERROR("%s: -b missing block size\n", argv[0]);
4707				exit(1);
4708			}
4709			block_size = strtol(argv[i], &b, 10);
4710			if(*b == 'm' || *b == 'M')
4711				block_size *= 1048576;
4712			else if(*b == 'k' || *b == 'K')
4713				block_size *= 1024;
4714			else if(*b != '\0') {
4715				ERROR("%s: -b invalid block size\n", argv[0]);
4716				exit(1);
4717			}
4718			if((block_log = slog(block_size)) == 0) {
4719				ERROR("%s: -b block size not power of two or "
4720					"not between 4096 and 1Mbyte\n",
4721					argv[0]);
4722				exit(1);
4723			}
4724		} else if(strcmp(argv[i], "-ef") == 0) {
4725			if(++i == argc) {
4726				ERROR("%s: -ef missing filename\n", argv[0]);
4727				exit(1);
4728			}
4729		} else if(strcmp(argv[i], "-no-duplicates") == 0)
4730			duplicate_checking = FALSE;
4731
4732		else if(strcmp(argv[i], "-no-fragments") == 0)
4733			no_fragments = TRUE;
4734
4735		 else if(strcmp(argv[i], "-always-use-fragments") == 0)
4736			always_use_fragments = TRUE;
4737
4738		 else if(strcmp(argv[i], "-sort") == 0) {
4739			if(++i == argc) {
4740				ERROR("%s: -sort missing filename\n", argv[0]);
4741				exit(1);
4742			}
4743		} else if(strcmp(argv[i], "-all-root") == 0 ||
4744				strcmp(argv[i], "-root-owned") == 0)
4745			global_uid = global_gid = 0;
4746
4747		else if(strcmp(argv[i], "-force-uid") == 0) {
4748			if(++i == argc) {
4749				ERROR("%s: -force-uid missing uid or user\n",
4750					argv[0]);
4751				exit(1);
4752			}
4753			if((global_uid = strtoll(argv[i], &b, 10)), *b =='\0') {
4754				if(global_uid < 0 || global_uid >
4755						(((long long) 1 << 32) - 1)) {
4756					ERROR("%s: -force-uid uid out of range"
4757						"\n", argv[0]);
4758					exit(1);
4759				}
4760			} else {
4761				struct passwd *uid = getpwnam(argv[i]);
4762				if(uid)
4763					global_uid = uid->pw_uid;
4764				else {
4765					ERROR("%s: -force-uid invalid uid or "
4766						"unknown user\n", argv[0]);
4767					exit(1);
4768				}
4769			}
4770		} else if(strcmp(argv[i], "-force-gid") == 0) {
4771			if(++i == argc) {
4772				ERROR("%s: -force-gid missing gid or group\n",
4773					argv[0]);
4774				exit(1);
4775			}
4776			if((global_gid = strtoll(argv[i], &b, 10)), *b =='\0') {
4777				if(global_gid < 0 || global_gid >
4778						(((long long) 1 << 32) - 1)) {
4779					ERROR("%s: -force-gid gid out of range"
4780						"\n", argv[0]);
4781					exit(1);
4782				}
4783			} else {
4784				struct group *gid = getgrnam(argv[i]);
4785				if(gid)
4786					global_gid = gid->gr_gid;
4787				else {
4788					ERROR("%s: -force-gid invalid gid or "
4789						"unknown group\n", argv[0]);
4790					exit(1);
4791				}
4792			}
4793		} else if(strcmp(argv[i], "-noI") == 0 ||
4794				strcmp(argv[i], "-noInodeCompression") == 0)
4795			noI = TRUE;
4796
4797		else if(strcmp(argv[i], "-noD") == 0 ||
4798				strcmp(argv[i], "-noDataCompression") == 0)
4799			noD = TRUE;
4800
4801		else if(strcmp(argv[i], "-noF") == 0 ||
4802				strcmp(argv[i], "-noFragmentCompression") == 0)
4803			noF = TRUE;
4804
4805		else if(strcmp(argv[i], "-noX") == 0 ||
4806				strcmp(argv[i], "-noXattrCompression") == 0)
4807			noX = TRUE;
4808
4809		else if(strcmp(argv[i], "-no-xattrs") == 0)
4810			no_xattrs = TRUE;
4811
4812		else if(strcmp(argv[i], "-xattrs") == 0)
4813			no_xattrs = FALSE;
4814
4815		else if(strcmp(argv[i], "-nopad") == 0)
4816			nopad = TRUE;
4817
4818		else if(strcmp(argv[i], "-info") == 0) {
4819			silent = FALSE;
4820			progress = FALSE;
4821		}
4822
4823		else if(strcmp(argv[i], "-e") == 0)
4824			break;
4825
4826		else if(strcmp(argv[i], "-noappend") == 0)
4827			delete = TRUE;
4828
4829		else if(strcmp(argv[i], "-keep-as-directory") == 0)
4830			keep_as_directory = TRUE;
4831
4832		else if(strcmp(argv[i], "-root-becomes") == 0) {
4833			if(++i == argc) {
4834				ERROR("%s: -root-becomes: missing name\n",
4835					argv[0]);
4836				exit(1);
4837			}
4838			root_name = argv[i];
4839		} else if(strcmp(argv[i], "-version") == 0) {
4840			VERSION();
4841		} else {
4842			ERROR("%s: invalid option\n\n", argv[0]);
4843printOptions:
4844			ERROR("SYNTAX:%s source1 source2 ...  dest [options] "
4845				"[-e list of exclude\ndirs/files]\n", argv[0]);
4846			ERROR("\nFilesystem build options:\n");
4847			ERROR("-comp <comp>\t\tselect <comp> compression\n");
4848			ERROR("\t\t\tCompressors available:\n");
4849			display_compressors("\t\t\t", COMP_DEFAULT);
4850			ERROR("-b <block_size>\t\tset data block to "
4851				"<block_size>.  Default %d bytes\n",
4852				SQUASHFS_FILE_SIZE);
4853			ERROR("-no-exports\t\tdon't make the filesystem "
4854				"exportable via NFS\n");
4855			ERROR("-no-sparse\t\tdon't detect sparse files\n");
4856			ERROR("-no-xattrs\t\tdon't store extended attributes"
4857				NOXOPT_STR "\n");
4858			ERROR("-xattrs\t\t\tstore extended attributes" XOPT_STR
4859				"\n");
4860			ERROR("-noI\t\t\tdo not compress inode table\n");
4861			ERROR("-noD\t\t\tdo not compress data blocks\n");
4862			ERROR("-noF\t\t\tdo not compress fragment blocks\n");
4863			ERROR("-noX\t\t\tdo not compress extended "
4864				"attributes\n");
4865			ERROR("-no-fragments\t\tdo not use fragments\n");
4866			ERROR("-always-use-fragments\tuse fragment blocks for "
4867				"files larger than block size\n");
4868			ERROR("-no-duplicates\t\tdo not perform duplicate "
4869				"checking\n");
4870			ERROR("-all-root\t\tmake all files owned by root\n");
4871			ERROR("-force-uid uid\t\tset all file uids to uid\n");
4872			ERROR("-force-gid gid\t\tset all file gids to gid\n");
4873			ERROR("-nopad\t\t\tdo not pad filesystem to a multiple "
4874				"of 4K\n");
4875			ERROR("-keep-as-directory\tif one source directory is "
4876				"specified, create a root\n");
4877			ERROR("\t\t\tdirectory containing that directory, "
4878				"rather than the\n");
4879			ERROR("\t\t\tcontents of the directory\n");
4880			ERROR("\nFilesystem filter options:\n");
4881			ERROR("-p <pseudo-definition>\tAdd pseudo file "
4882				"definition\n");
4883			ERROR("-pf <pseudo-file>\tAdd list of pseudo file "
4884				"definitions\n");
4885			ERROR("-sort <sort_file>\tsort files according to "
4886				"priorities in <sort_file>.  One\n");
4887			ERROR("\t\t\tfile or dir with priority per line.  "
4888				"Priority -32768 to\n");
4889			ERROR("\t\t\t32767, default priority 0\n");
4890			ERROR("-ef <exclude_file>\tlist of exclude dirs/files."
4891				"  One per line\n");
4892			ERROR("-wildcards\t\tAllow extended shell wildcards "
4893				"(globbing) to be used in\n\t\t\texclude "
4894				"dirs/files\n");
4895			ERROR("-regex\t\t\tAllow POSIX regular expressions to "
4896				"be used in exclude\n\t\t\tdirs/files\n");
4897			ERROR("\nFilesystem append options:\n");
4898			ERROR("-noappend\t\tdo not append to existing "
4899				"filesystem\n");
4900			ERROR("-root-becomes <name>\twhen appending source "
4901				"files/directories, make the\n");
4902			ERROR("\t\t\toriginal root become a subdirectory in "
4903				"the new root\n");
4904			ERROR("\t\t\tcalled <name>, rather than adding the new "
4905				"source items\n");
4906			ERROR("\t\t\tto the original root\n");
4907			ERROR("\nMksquashfs runtime options:\n");
4908			ERROR("-version\t\tprint version, licence and "
4909				"copyright message\n");
4910			ERROR("-recover <name>\t\trecover filesystem data "
4911				"using recovery file <name>\n");
4912			ERROR("-no-recovery\t\tdon't generate a recovery "
4913				"file\n");
4914			ERROR("-info\t\t\tprint files written to filesystem\n");
4915			ERROR("-no-progress\t\tdon't display the progress "
4916				"bar\n");
4917			ERROR("-processors <number>\tUse <number> processors."
4918				"  By default will use number of\n");
4919			ERROR("\t\t\tprocessors available\n");
4920			ERROR("-read-queue <size>\tSet input queue to <size> "
4921				"Mbytes.  Default %d Mbytes\n",
4922				READER_BUFFER_DEFAULT);
4923			ERROR("-write-queue <size>\tSet output queue to <size> "
4924				"Mbytes.  Default %d Mbytes\n",
4925				WRITER_BUFFER_DEFAULT);
4926			ERROR("-fragment-queue <size>\tSet fragment queue to "
4927				"<size> Mbytes.  Default %d Mbytes\n",
4928				FRAGMENT_BUFFER_DEFAULT);
4929			ERROR("\nMiscellaneous options:\n");
4930			ERROR("-root-owned\t\talternative name for -all-root"
4931				"\n");
4932			ERROR("-noInodeCompression\talternative name for -noI"
4933				"\n");
4934			ERROR("-noDataCompression\talternative name for -noD"
4935				"\n");
4936			ERROR("-noFragmentCompression\talternative name for "
4937				"-noF\n");
4938			ERROR("-noXattrCompression\talternative name for "
4939				"-noX\n");
4940			ERROR("\nCompressors available and compressor specific "
4941				"options:\n");
4942			display_compressor_usage(COMP_DEFAULT);
4943			exit(1);
4944		}
4945	}
4946
4947	/*
4948	 * Some compressors may need the options to be checked for validity
4949	 * once all the options have been processed
4950	 */
4951	res = compressor_options_post(comp, block_size);
4952	if(res)
4953		EXIT_MKSQUASHFS();
4954
4955	for(i = 0; i < source; i++)
4956		if(lstat(source_path[i], &source_buf) == -1) {
4957			fprintf(stderr, "Cannot stat source directory \"%s\" "
4958				"because %s\n", source_path[i],
4959				strerror(errno));
4960			EXIT_MKSQUASHFS();
4961		}
4962
4963	destination_file = argv[source + 1];
4964	if(stat(argv[source + 1], &buf) == -1) {
4965		if(errno == ENOENT) { /* Does not exist */
4966			fd = open(argv[source + 1], O_CREAT | O_TRUNC | O_RDWR,
4967				S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
4968			if(fd == -1) {
4969				perror("Could not create destination file");
4970				exit(1);
4971			}
4972			delete = TRUE;
4973		} else {
4974			perror("Could not stat destination file");
4975			exit(1);
4976		}
4977
4978	} else {
4979		if(S_ISBLK(buf.st_mode)) {
4980			if((fd = open(argv[source + 1], O_RDWR)) == -1) {
4981				perror("Could not open block device as "
4982					"destination");
4983				exit(1);
4984			}
4985			block_device = 1;
4986
4987		} else if(S_ISREG(buf.st_mode))	 {
4988			fd = open(argv[source + 1], (delete ? O_TRUNC : 0) |
4989				O_RDWR);
4990			if(fd == -1) {
4991				perror("Could not open regular file for "
4992					"writing as destination");
4993				exit(1);
4994			}
4995		}
4996		else {
4997			ERROR("Destination not block device or regular file\n");
4998			exit(1);
4999		}
5000
5001	}
5002
5003	signal(SIGTERM, sighandler2);
5004	signal(SIGINT, sighandler2);
5005
5006	/*
5007	 * process the exclude files - must be done afer destination file has
5008	 * been possibly created
5009	 */
5010	for(i = source + 2; i < argc; i++)
5011		if(strcmp(argv[i], "-ef") == 0) {
5012			FILE *fd;
5013			char filename[16385];
5014			if((fd = fopen(argv[++i], "r")) == NULL) {
5015				perror("Could not open exclude file...");
5016				EXIT_MKSQUASHFS();
5017			}
5018			while(fscanf(fd, "%16384[^\n]\n", filename) != EOF)
5019					if(old_exclude)
5020						old_add_exclude(filename);
5021					else
5022						add_exclude(filename);
5023			fclose(fd);
5024		} else if(strcmp(argv[i], "-e") == 0)
5025			break;
5026		else if(strcmp(argv[i], "-root-becomes") == 0 ||
5027				strcmp(argv[i], "-sort") == 0 ||
5028				strcmp(argv[i], "-pf") == 0 ||
5029				strcmp(argv[i], "-comp") == 0)
5030			i++;
5031
5032	if(i != argc) {
5033		if(++i == argc) {
5034			ERROR("%s: -e missing arguments\n", argv[0]);
5035			EXIT_MKSQUASHFS();
5036		}
5037		while(i < argc)
5038			if(old_exclude)
5039				old_add_exclude(argv[i++]);
5040			else
5041				add_exclude(argv[i++]);
5042	}
5043
5044	/* process the sort files - must be done afer the exclude files  */
5045	for(i = source + 2; i < argc; i++)
5046		if(strcmp(argv[i], "-sort") == 0) {
5047			int res = read_sort_file(argv[++i], source,
5048								source_path);
5049			if(res == FALSE)
5050				BAD_ERROR("Failed to read sort file\n");
5051			sorted ++;
5052		} else if(strcmp(argv[i], "-e") == 0)
5053			break;
5054		else if(strcmp(argv[i], "-root-becomes") == 0 ||
5055				strcmp(argv[i], "-ef") == 0 ||
5056				strcmp(argv[i], "-pf") == 0 ||
5057				strcmp(argv[i], "-comp") == 0)
5058			i++;
5059
5060#ifdef SQUASHFS_TRACE
5061	progress = FALSE;
5062#endif
5063
5064	if(!delete) {
5065	        comp = read_super(fd, &sBlk, argv[source + 1]);
5066	        if(comp == NULL) {
5067			ERROR("Failed to read existing filesystem - will not "
5068				"overwrite - ABORTING!\n");
5069			ERROR("To force Mksquashfs to write to this block "
5070				"device or file use -noappend\n");
5071			EXIT_MKSQUASHFS();
5072		}
5073
5074		block_log = slog(block_size = sBlk.block_size);
5075		noI = SQUASHFS_UNCOMPRESSED_INODES(sBlk.flags);
5076		noD = SQUASHFS_UNCOMPRESSED_DATA(sBlk.flags);
5077		noF = SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.flags);
5078		noX = SQUASHFS_UNCOMPRESSED_XATTRS(sBlk.flags);
5079		no_fragments = SQUASHFS_NO_FRAGMENTS(sBlk.flags);
5080		always_use_fragments = SQUASHFS_ALWAYS_FRAGMENTS(sBlk.flags);
5081		duplicate_checking = SQUASHFS_DUPLICATES(sBlk.flags);
5082		exportable = SQUASHFS_EXPORTABLE(sBlk.flags);
5083		no_xattrs = SQUASHFS_NO_XATTRS(sBlk.flags);
5084		comp_opts = SQUASHFS_COMP_OPTS(sBlk.flags);
5085	}
5086
5087	initialise_threads(readb_mbytes, writeb_mbytes, fragmentb_mbytes);
5088
5089	res = compressor_init(comp, &stream, SQUASHFS_METADATA_SIZE, 0);
5090	if(res)
5091		BAD_ERROR("compressor_init failed\n");
5092
5093	if(delete) {
5094		int size;
5095		void *comp_data = compressor_dump_options(comp, block_size,
5096			&size);
5097
5098		printf("Creating %d.%d filesystem on %s, block size %d.\n",
5099			SQUASHFS_MAJOR, SQUASHFS_MINOR, argv[source + 1], block_size);
5100
5101		/*
5102		 * store any compressor specific options after the superblock,
5103		 * and set the COMP_OPT flag to show that the filesystem has
5104		 * compressor specfic options
5105		 */
5106		if(comp_data) {
5107			unsigned short c_byte = size | SQUASHFS_COMPRESSED_BIT;
5108
5109			SQUASHFS_INSWAP_SHORTS(&c_byte, 1);
5110			write_destination(fd, sizeof(struct squashfs_super_block),
5111				sizeof(c_byte), &c_byte);
5112			write_destination(fd, sizeof(struct squashfs_super_block) +
5113				sizeof(c_byte), size, comp_data);
5114			bytes = sizeof(struct squashfs_super_block) + sizeof(c_byte)
5115				+ size;
5116			comp_opts = TRUE;
5117		} else
5118			bytes = sizeof(struct squashfs_super_block);
5119	} else {
5120		unsigned int last_directory_block, inode_dir_offset,
5121			inode_dir_file_size, root_inode_size,
5122			inode_dir_start_block, uncompressed_data,
5123			compressed_data, inode_dir_inode_number,
5124			inode_dir_parent_inode;
5125		unsigned int root_inode_start =
5126			SQUASHFS_INODE_BLK(sBlk.root_inode),
5127			root_inode_offset =
5128			SQUASHFS_INODE_OFFSET(sBlk.root_inode);
5129
5130		if((bytes = read_filesystem(root_name, fd, &sBlk, &inode_table,
5131				&data_cache, &directory_table,
5132				&directory_data_cache, &last_directory_block,
5133				&inode_dir_offset, &inode_dir_file_size,
5134				&root_inode_size, &inode_dir_start_block,
5135				&file_count, &sym_count, &dev_count, &dir_count,
5136				&fifo_count, &sock_count, &total_bytes,
5137				&total_inode_bytes, &total_directory_bytes,
5138				&inode_dir_inode_number,
5139				&inode_dir_parent_inode, add_old_root_entry,
5140				&fragment_table, &inode_lookup_table)) == 0) {
5141			ERROR("Failed to read existing filesystem - will not "
5142				"overwrite - ABORTING!\n");
5143			ERROR("To force Mksquashfs to write to this block "
5144				"device or file use -noappend\n");
5145			EXIT_MKSQUASHFS();
5146		}
5147		if((fragments = sBlk.fragments)) {
5148			fragment_table = realloc((char *) fragment_table,
5149				((fragments + FRAG_SIZE - 1) & ~(FRAG_SIZE - 1))
5150				 * sizeof(struct squashfs_fragment_entry));
5151			if(fragment_table == NULL)
5152				BAD_ERROR("Out of memory in save filesystem state\n");
5153		}
5154
5155		printf("Appending to existing %d.%d filesystem on %s, block "
5156			"size %d\n", SQUASHFS_MAJOR, SQUASHFS_MINOR, argv[source + 1],
5157			block_size);
5158		printf("All -b, -noI, -noD, -noF, -noX, no-duplicates, no-fragments, "
5159			"-always-use-fragments,\n-exportable and -comp options "
5160			"ignored\n");
5161		printf("\nIf appending is not wanted, please re-run with "
5162			"-noappend specified!\n\n");
5163
5164		compressed_data = (inode_dir_offset + inode_dir_file_size) &
5165			~(SQUASHFS_METADATA_SIZE - 1);
5166		uncompressed_data = (inode_dir_offset + inode_dir_file_size) &
5167			(SQUASHFS_METADATA_SIZE - 1);
5168
5169		/* save original filesystem state for restoring ... */
5170		sfragments = fragments;
5171		sbytes = bytes;
5172		sinode_count = sBlk.inodes;
5173		scache_bytes = root_inode_offset + root_inode_size;
5174		sdirectory_cache_bytes = uncompressed_data;
5175		sdata_cache = malloc(scache_bytes);
5176		if(sdata_cache == NULL)
5177			BAD_ERROR("Out of memory in save filesystem state\n");
5178		sdirectory_data_cache = malloc(sdirectory_cache_bytes);
5179		if(sdirectory_data_cache == NULL)
5180			BAD_ERROR("Out of memory in save filesystem state\n");
5181		memcpy(sdata_cache, data_cache, scache_bytes);
5182		memcpy(sdirectory_data_cache, directory_data_cache +
5183			compressed_data, sdirectory_cache_bytes);
5184		sinode_bytes = root_inode_start;
5185		stotal_bytes = total_bytes;
5186		stotal_inode_bytes = total_inode_bytes;
5187		stotal_directory_bytes = total_directory_bytes +
5188			compressed_data;
5189		sfile_count = file_count;
5190		ssym_count = sym_count;
5191		sdev_count = dev_count;
5192		sdir_count = dir_count + 1;
5193		sfifo_count = fifo_count;
5194		ssock_count = sock_count;
5195		sdup_files = dup_files;
5196		sid_count = id_count;
5197		write_recovery_data(&sBlk);
5198		if(save_xattrs() == FALSE)
5199			BAD_ERROR("Failed to save xattrs from existing "
5200				"filesystem\n");
5201		restore = TRUE;
5202		if(setjmp(env))
5203			goto restore_filesystem;
5204		signal(SIGTERM, sighandler);
5205		signal(SIGINT, sighandler);
5206		write_destination(fd, SQUASHFS_START, 4, "\0\0\0\0");
5207
5208		/*
5209		 * set the filesystem state up to be able to append to the
5210		 * original filesystem.  The filesystem state differs depending
5211		 * on whether we're appending to the original root directory, or
5212		 * if the original root directory becomes a sub-directory
5213		 * (root-becomes specified on command line, here root_name !=
5214		 * NULL)
5215		 */
5216		inode_bytes = inode_size = root_inode_start;
5217		directory_size = last_directory_block;
5218		cache_size = root_inode_offset + root_inode_size;
5219		directory_cache_size = inode_dir_offset + inode_dir_file_size;
5220		if(root_name) {
5221			sdirectory_bytes = last_directory_block;
5222			sdirectory_compressed_bytes = 0;
5223			root_inode_number = inode_dir_parent_inode;
5224			dir_inode_no = sBlk.inodes + 2;
5225			directory_bytes = last_directory_block;
5226			directory_cache_bytes = uncompressed_data;
5227			memmove(directory_data_cache, directory_data_cache +
5228				compressed_data, uncompressed_data);
5229			cache_bytes = root_inode_offset + root_inode_size;
5230			add_old_root_entry(root_name, sBlk.root_inode,
5231				inode_dir_inode_number, SQUASHFS_DIR_TYPE);
5232			total_directory_bytes += compressed_data;
5233			dir_count ++;
5234		} else {
5235			sdirectory_compressed_bytes = last_directory_block -
5236				inode_dir_start_block;
5237			sdirectory_compressed =
5238				malloc(sdirectory_compressed_bytes);
5239			if(sdirectory_compressed == NULL)
5240				BAD_ERROR("Out of memory in save filesystem "
5241					"state\n");
5242			memcpy(sdirectory_compressed, directory_table +
5243				inode_dir_start_block,
5244				sdirectory_compressed_bytes);
5245			sdirectory_bytes = inode_dir_start_block;
5246			root_inode_number = inode_dir_inode_number;
5247			dir_inode_no = sBlk.inodes + 1;
5248			directory_bytes = inode_dir_start_block;
5249			directory_cache_bytes = inode_dir_offset;
5250			cache_bytes = root_inode_offset;
5251		}
5252
5253		inode_count = file_count + dir_count + sym_count + dev_count +
5254			fifo_count + sock_count;
5255
5256		/*
5257		 * The default use freelist before growing cache policy behaves
5258		 * poorly with appending - with many deplicates the caches
5259		 * do not grow due to the fact that large queues of outstanding
5260		 * fragments/writer blocks do not occur, leading to small caches
5261		 * and un-uncessary performance loss to frequent cache
5262		 * replacement in the small caches.  Therefore with appending
5263		 * change the policy to grow the caches before reusing blocks
5264		 * from the freelist
5265		 */
5266		first_freelist = FALSE;
5267	}
5268
5269	if(path || stickypath) {
5270		paths = init_subdir();
5271		if(path)
5272			paths = add_subdir(paths, path);
5273		if(stickypath)
5274			paths = add_subdir(paths, stickypath);
5275	}
5276
5277	if(delete && !keep_as_directory && source == 1 &&
5278			S_ISDIR(source_buf.st_mode))
5279		dir_scan(&inode, source_path[0], scan1_readdir);
5280	else if(!keep_as_directory && source == 1 &&
5281			S_ISDIR(source_buf.st_mode))
5282		dir_scan(&inode, source_path[0], scan1_single_readdir);
5283	else
5284		dir_scan(&inode, "", scan1_encomp_readdir);
5285	sBlk.root_inode = inode;
5286	sBlk.inodes = inode_count;
5287	sBlk.s_magic = SQUASHFS_MAGIC;
5288	sBlk.s_major = SQUASHFS_MAJOR;
5289	sBlk.s_minor = SQUASHFS_MINOR;
5290	sBlk.block_size = block_size;
5291	sBlk.block_log = block_log;
5292	sBlk.flags = SQUASHFS_MKFLAGS(noI, noD, noF, noX, no_fragments,
5293		always_use_fragments, duplicate_checking, exportable,
5294		no_xattrs, comp_opts);
5295	sBlk.mkfs_time = time(NULL);
5296
5297restore_filesystem:
5298	if(progress && estimated_uncompressed) {
5299		disable_progress_bar();
5300		progress_bar(cur_uncompressed, estimated_uncompressed, columns);
5301	}
5302
5303	write_fragment();
5304	sBlk.fragments = fragments;
5305	if(!restoring) {
5306		unlock_fragments();
5307		pthread_mutex_lock(&fragment_mutex);
5308		while(fragments_outstanding) {
5309			pthread_mutex_unlock(&fragment_mutex);
5310			sched_yield();
5311			pthread_mutex_lock(&fragment_mutex);
5312		}
5313		queue_put(to_writer, NULL);
5314		if(queue_get(from_writer) != 0)
5315			EXIT_MKSQUASHFS();
5316	}
5317
5318	sBlk.no_ids = id_count;
5319	sBlk.inode_table_start = write_inodes();
5320	sBlk.directory_table_start = write_directories();
5321	sBlk.fragment_table_start = write_fragment_table();
5322	sBlk.lookup_table_start = exportable ? write_inode_lookup_table() :
5323		SQUASHFS_INVALID_BLK;
5324	sBlk.id_table_start = write_id_table();
5325	sBlk.xattr_id_table_start = write_xattrs();
5326
5327	TRACE("sBlk->inode_table_start 0x%llx\n", sBlk.inode_table_start);
5328	TRACE("sBlk->directory_table_start 0x%llx\n",
5329		sBlk.directory_table_start);
5330	TRACE("sBlk->fragment_table_start 0x%llx\n", sBlk.fragment_table_start);
5331	if(exportable)
5332		TRACE("sBlk->lookup_table_start 0x%llx\n",
5333			sBlk.lookup_table_start);
5334
5335	sBlk.bytes_used = bytes;
5336
5337	sBlk.compression = comp->id;
5338
5339	SQUASHFS_INSWAP_SUPER_BLOCK(&sBlk);
5340	write_destination(fd, SQUASHFS_START, sizeof(sBlk), &sBlk);
5341
5342	if(!nopad && (i = bytes & (4096 - 1))) {
5343		char temp[4096] = {0};
5344		write_destination(fd, bytes, 4096 - i, temp);
5345	}
5346
5347	close(fd);
5348
5349	delete_pseudo_files();
5350
5351	if(recovery_file[0] != '\0')
5352		unlink(recovery_file);
5353
5354	total_bytes += total_inode_bytes + total_directory_bytes +
5355		sizeof(struct squashfs_super_block) + total_xattr_bytes;
5356
5357	printf("\n%sSquashfs %d.%d filesystem, %s compressed, data block size"
5358		" %d\n", exportable ? "Exportable " : "", SQUASHFS_MAJOR,
5359		SQUASHFS_MINOR, comp->name, block_size);
5360	printf("\t%s data, %s metadata, %s fragments, %s xattrs\n",
5361		noD ? "uncompressed" : "compressed", noI ?  "uncompressed" :
5362		"compressed", no_fragments ? "no" : noF ? "uncompressed" :
5363		"compressed", no_xattrs ? "no" : noX ? "uncompressed" :
5364		"compressed");
5365	printf("\tduplicates are %sremoved\n", duplicate_checking ? "" :
5366		"not ");
5367	printf("Filesystem size %.2f Kbytes (%.2f Mbytes)\n", bytes / 1024.0,
5368		bytes / (1024.0 * 1024.0));
5369	printf("\t%.2f%% of uncompressed filesystem size (%.2f Kbytes)\n",
5370		((float) bytes / total_bytes) * 100.0, total_bytes / 1024.0);
5371	printf("Inode table size %d bytes (%.2f Kbytes)\n",
5372		inode_bytes, inode_bytes / 1024.0);
5373	printf("\t%.2f%% of uncompressed inode table size (%d bytes)\n",
5374		((float) inode_bytes / total_inode_bytes) * 100.0,
5375		total_inode_bytes);
5376	printf("Directory table size %d bytes (%.2f Kbytes)\n",
5377		directory_bytes, directory_bytes / 1024.0);
5378	printf("\t%.2f%% of uncompressed directory table size (%d bytes)\n",
5379		((float) directory_bytes / total_directory_bytes) * 100.0,
5380		total_directory_bytes);
5381	if(total_xattr_bytes) {
5382		printf("Xattr table size %d bytes (%.2f Kbytes)\n",
5383			xattr_bytes, xattr_bytes / 1024.0);
5384		printf("\t%.2f%% of uncompressed xattr table size (%d bytes)\n",
5385			((float) xattr_bytes / total_xattr_bytes) * 100.0,
5386			total_xattr_bytes);
5387	}
5388	if(duplicate_checking)
5389		printf("Number of duplicate files found %d\n", file_count -
5390			dup_files);
5391	else
5392		printf("No duplicate files removed\n");
5393	printf("Number of inodes %d\n", inode_count);
5394	printf("Number of files %d\n", file_count);
5395	if(!no_fragments)
5396		printf("Number of fragments %d\n", fragments);
5397	printf("Number of symbolic links  %d\n", sym_count);
5398	printf("Number of device nodes %d\n", dev_count);
5399	printf("Number of fifo nodes %d\n", fifo_count);
5400	printf("Number of socket nodes %d\n", sock_count);
5401	printf("Number of directories %d\n", dir_count);
5402	printf("Number of ids (unique uids + gids) %d\n", id_count);
5403	printf("Number of uids %d\n", uid_count);
5404
5405	for(i = 0; i < id_count; i++) {
5406		if(id_table[i]->flags & ISA_UID) {
5407			struct passwd *user = getpwuid(id_table[i]->id);
5408			printf("\t%s (%d)\n", user == NULL ? "unknown" :
5409				user->pw_name, id_table[i]->id);
5410		}
5411	}
5412
5413	printf("Number of gids %d\n", guid_count);
5414
5415	for(i = 0; i < id_count; i++) {
5416		if(id_table[i]->flags & ISA_GID) {
5417			struct group *group = getgrgid(id_table[i]->id);
5418			printf("\t%s (%d)\n", group == NULL ? "unknown" :
5419				group->gr_name, id_table[i]->id);
5420		}
5421	}
5422
5423	return 0;
5424}
5425