1 /*
2 * Unsquash a squashfs filesystem. This is a highly compressed read only
3 * filesystem.
4 *
5 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
6 * 2012, 2013, 2014
7 * Phillip Lougher <phillip@squashfs.org.uk>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2,
12 * or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * unsquashfs.c
24 */
25
26 #include "unsquashfs.h"
27 #include "squashfs_swap.h"
28 #include "squashfs_compat.h"
29 #include "compressor.h"
30 #include "xattr.h"
31 #include "unsquashfs_info.h"
32 #include "stdarg.h"
33
34 #ifndef linux
35 #include <sys/sysctl.h>
36 #else
37 #include <sys/sysinfo.h>
38 #endif
39
40 #include <sys/types.h>
41 #include <sys/time.h>
42 #include <sys/resource.h>
43 #include <limits.h>
44 #include <ctype.h>
45
46 struct cache *fragment_cache, *data_cache;
47 struct queue *to_reader, *to_inflate, *to_writer, *from_writer;
48 pthread_t *thread, *inflator_thread;
49 pthread_mutex_t fragment_mutex;
50
51 /* user options that control parallelisation */
52 int processors = -1;
53
54 struct super_block sBlk;
55 squashfs_operations s_ops;
56 struct compressor *comp;
57
58 int bytes = 0, swap, file_count = 0, dir_count = 0, sym_count = 0,
59 dev_count = 0, fifo_count = 0;
60 char *inode_table = NULL, *directory_table = NULL;
61 struct hash_table_entry *inode_table_hash[65536], *directory_table_hash[65536];
62 int fd;
63 unsigned int *uid_table, *guid_table;
64 unsigned int cached_frag = SQUASHFS_INVALID_FRAG;
65 char *fragment_data;
66 char *file_data;
67 char *data;
68 unsigned int block_size;
69 unsigned int block_log;
70 int lsonly = FALSE, info = FALSE, force = FALSE, short_ls = TRUE;
71 int use_regex = FALSE;
72 char **created_inode;
73 int root_process;
74 int columns;
75 int rotate = 0;
76 pthread_mutex_t screen_mutex;
77 int progress = TRUE, progress_enabled = FALSE;
78 unsigned int total_blocks = 0, total_files = 0, total_inodes = 0;
79 unsigned int cur_blocks = 0;
80 int inode_number = 1;
81 int no_xattrs = XATTR_DEF;
82 int user_xattrs = FALSE;
83
84 int lookup_type[] = {
85 0,
86 S_IFDIR,
87 S_IFREG,
88 S_IFLNK,
89 S_IFBLK,
90 S_IFCHR,
91 S_IFIFO,
92 S_IFSOCK,
93 S_IFDIR,
94 S_IFREG,
95 S_IFLNK,
96 S_IFBLK,
97 S_IFCHR,
98 S_IFIFO,
99 S_IFSOCK
100 };
101
102 struct test table[] = {
103 { S_IFMT, S_IFSOCK, 0, 's' },
104 { S_IFMT, S_IFLNK, 0, 'l' },
105 { S_IFMT, S_IFBLK, 0, 'b' },
106 { S_IFMT, S_IFDIR, 0, 'd' },
107 { S_IFMT, S_IFCHR, 0, 'c' },
108 { S_IFMT, S_IFIFO, 0, 'p' },
109 { S_IRUSR, S_IRUSR, 1, 'r' },
110 { S_IWUSR, S_IWUSR, 2, 'w' },
111 { S_IRGRP, S_IRGRP, 4, 'r' },
112 { S_IWGRP, S_IWGRP, 5, 'w' },
113 { S_IROTH, S_IROTH, 7, 'r' },
114 { S_IWOTH, S_IWOTH, 8, 'w' },
115 { S_IXUSR | S_ISUID, S_IXUSR | S_ISUID, 3, 's' },
116 { S_IXUSR | S_ISUID, S_ISUID, 3, 'S' },
117 { S_IXUSR | S_ISUID, S_IXUSR, 3, 'x' },
118 { S_IXGRP | S_ISGID, S_IXGRP | S_ISGID, 6, 's' },
119 { S_IXGRP | S_ISGID, S_ISGID, 6, 'S' },
120 { S_IXGRP | S_ISGID, S_IXGRP, 6, 'x' },
121 { S_IXOTH | S_ISVTX, S_IXOTH | S_ISVTX, 9, 't' },
122 { S_IXOTH | S_ISVTX, S_ISVTX, 9, 'T' },
123 { S_IXOTH | S_ISVTX, S_IXOTH, 9, 'x' },
124 { 0, 0, 0, 0}
125 };
126
127 void progress_bar(long long current, long long max, int columns);
128
129 #define MAX_LINE 16384
130
prep_exit()131 void prep_exit()
132 {
133 }
134
135
sigwinch_handler()136 void sigwinch_handler()
137 {
138 struct winsize winsize;
139
140 if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
141 if(isatty(STDOUT_FILENO))
142 ERROR("TIOCGWINSZ ioctl failed, defaulting to 80 "
143 "columns\n");
144 columns = 80;
145 } else
146 columns = winsize.ws_col;
147 }
148
149
sigalrm_handler()150 void sigalrm_handler()
151 {
152 rotate = (rotate + 1) % 4;
153 }
154
155
add_overflow(int a,int b)156 int add_overflow(int a, int b)
157 {
158 return (INT_MAX - a) < b;
159 }
160
161
shift_overflow(int a,int shift)162 int shift_overflow(int a, int shift)
163 {
164 return (INT_MAX >> shift) < a;
165 }
166
167
multiply_overflow(int a,int multiplier)168 int multiply_overflow(int a, int multiplier)
169 {
170 return (INT_MAX / multiplier) < a;
171 }
172
173
queue_init(int size)174 struct queue *queue_init(int size)
175 {
176 struct queue *queue = malloc(sizeof(struct queue));
177
178 if(queue == NULL)
179 EXIT_UNSQUASH("Out of memory in queue_init\n");
180
181 if(add_overflow(size, 1) ||
182 multiply_overflow(size + 1, sizeof(void *)))
183 EXIT_UNSQUASH("Size too large in queue_init\n");
184
185 queue->data = malloc(sizeof(void *) * (size + 1));
186 if(queue->data == NULL)
187 EXIT_UNSQUASH("Out of memory in queue_init\n");
188
189 queue->size = size + 1;
190 queue->readp = queue->writep = 0;
191 pthread_mutex_init(&queue->mutex, NULL);
192 pthread_cond_init(&queue->empty, NULL);
193 pthread_cond_init(&queue->full, NULL);
194
195 return queue;
196 }
197
198
queue_put(struct queue * queue,void * data)199 void queue_put(struct queue *queue, void *data)
200 {
201 int nextp;
202
203 pthread_mutex_lock(&queue->mutex);
204
205 while((nextp = (queue->writep + 1) % queue->size) == queue->readp)
206 pthread_cond_wait(&queue->full, &queue->mutex);
207
208 queue->data[queue->writep] = data;
209 queue->writep = nextp;
210 pthread_cond_signal(&queue->empty);
211 pthread_mutex_unlock(&queue->mutex);
212 }
213
214
queue_get(struct queue * queue)215 void *queue_get(struct queue *queue)
216 {
217 void *data;
218 pthread_mutex_lock(&queue->mutex);
219
220 while(queue->readp == queue->writep)
221 pthread_cond_wait(&queue->empty, &queue->mutex);
222
223 data = queue->data[queue->readp];
224 queue->readp = (queue->readp + 1) % queue->size;
225 pthread_cond_signal(&queue->full);
226 pthread_mutex_unlock(&queue->mutex);
227
228 return data;
229 }
230
231
dump_queue(struct queue * queue)232 void dump_queue(struct queue *queue)
233 {
234 pthread_mutex_lock(&queue->mutex);
235
236 printf("Max size %d, size %d%s\n", queue->size - 1,
237 queue->readp <= queue->writep ? queue->writep - queue->readp :
238 queue->size - queue->readp + queue->writep,
239 queue->readp == queue->writep ? " (EMPTY)" :
240 ((queue->writep + 1) % queue->size) == queue->readp ?
241 " (FULL)" : "");
242
243 pthread_mutex_unlock(&queue->mutex);
244 }
245
246
247 /* Called with the cache mutex held */
insert_hash_table(struct cache * cache,struct cache_entry * entry)248 void insert_hash_table(struct cache *cache, struct cache_entry *entry)
249 {
250 int hash = CALCULATE_HASH(entry->block);
251
252 entry->hash_next = cache->hash_table[hash];
253 cache->hash_table[hash] = entry;
254 entry->hash_prev = NULL;
255 if(entry->hash_next)
256 entry->hash_next->hash_prev = entry;
257 }
258
259
260 /* Called with the cache mutex held */
remove_hash_table(struct cache * cache,struct cache_entry * entry)261 void remove_hash_table(struct cache *cache, struct cache_entry *entry)
262 {
263 if(entry->hash_prev)
264 entry->hash_prev->hash_next = entry->hash_next;
265 else
266 cache->hash_table[CALCULATE_HASH(entry->block)] =
267 entry->hash_next;
268 if(entry->hash_next)
269 entry->hash_next->hash_prev = entry->hash_prev;
270
271 entry->hash_prev = entry->hash_next = NULL;
272 }
273
274
275 /* Called with the cache mutex held */
insert_free_list(struct cache * cache,struct cache_entry * entry)276 void insert_free_list(struct cache *cache, struct cache_entry *entry)
277 {
278 if(cache->free_list) {
279 entry->free_next = cache->free_list;
280 entry->free_prev = cache->free_list->free_prev;
281 cache->free_list->free_prev->free_next = entry;
282 cache->free_list->free_prev = entry;
283 } else {
284 cache->free_list = entry;
285 entry->free_prev = entry->free_next = entry;
286 }
287 }
288
289
290 /* Called with the cache mutex held */
remove_free_list(struct cache * cache,struct cache_entry * entry)291 void remove_free_list(struct cache *cache, struct cache_entry *entry)
292 {
293 if(entry->free_prev == NULL || entry->free_next == NULL)
294 /* not in free list */
295 return;
296 else if(entry->free_prev == entry && entry->free_next == entry) {
297 /* only this entry in the free list */
298 cache->free_list = NULL;
299 } else {
300 /* more than one entry in the free list */
301 entry->free_next->free_prev = entry->free_prev;
302 entry->free_prev->free_next = entry->free_next;
303 if(cache->free_list == entry)
304 cache->free_list = entry->free_next;
305 }
306
307 entry->free_prev = entry->free_next = NULL;
308 }
309
310
cache_init(int buffer_size,int max_buffers)311 struct cache *cache_init(int buffer_size, int max_buffers)
312 {
313 struct cache *cache = malloc(sizeof(struct cache));
314
315 if(cache == NULL)
316 EXIT_UNSQUASH("Out of memory in cache_init\n");
317
318 cache->max_buffers = max_buffers;
319 cache->buffer_size = buffer_size;
320 cache->count = 0;
321 cache->used = 0;
322 cache->free_list = NULL;
323 memset(cache->hash_table, 0, sizeof(struct cache_entry *) * 65536);
324 cache->wait_free = FALSE;
325 cache->wait_pending = FALSE;
326 pthread_mutex_init(&cache->mutex, NULL);
327 pthread_cond_init(&cache->wait_for_free, NULL);
328 pthread_cond_init(&cache->wait_for_pending, NULL);
329
330 return cache;
331 }
332
333
cache_get(struct cache * cache,long long block,int size)334 struct cache_entry *cache_get(struct cache *cache, long long block, int size)
335 {
336 /*
337 * Get a block out of the cache. If the block isn't in the cache
338 * it is added and queued to the reader() and inflate() threads for
339 * reading off disk and decompression. The cache grows until max_blocks
340 * is reached, once this occurs existing discarded blocks on the free
341 * list are reused
342 */
343 int hash = CALCULATE_HASH(block);
344 struct cache_entry *entry;
345
346 pthread_mutex_lock(&cache->mutex);
347
348 for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
349 if(entry->block == block)
350 break;
351
352 if(entry) {
353 /*
354 * found the block in the cache. If the block is currently unused
355 * remove it from the free list and increment cache used count.
356 */
357 if(entry->used == 0) {
358 cache->used ++;
359 remove_free_list(cache, entry);
360 }
361 entry->used ++;
362 pthread_mutex_unlock(&cache->mutex);
363 } else {
364 /*
365 * not in the cache
366 *
367 * first try to allocate new block
368 */
369 if(cache->count < cache->max_buffers) {
370 entry = malloc(sizeof(struct cache_entry));
371 if(entry == NULL)
372 EXIT_UNSQUASH("Out of memory in cache_get\n");
373 entry->data = malloc(cache->buffer_size);
374 if(entry->data == NULL)
375 EXIT_UNSQUASH("Out of memory in cache_get\n");
376 entry->cache = cache;
377 entry->free_prev = entry->free_next = NULL;
378 cache->count ++;
379 } else {
380 /*
381 * try to get from free list
382 */
383 while(cache->free_list == NULL) {
384 cache->wait_free = TRUE;
385 pthread_cond_wait(&cache->wait_for_free,
386 &cache->mutex);
387 }
388 entry = cache->free_list;
389 remove_free_list(cache, entry);
390 remove_hash_table(cache, entry);
391 }
392
393 /*
394 * Initialise block and insert into the hash table.
395 * Increment used which tracks how many buffers in the
396 * cache are actively in use (the other blocks, count - used,
397 * are in the cache and available for lookup, but can also be
398 * re-used).
399 */
400 entry->block = block;
401 entry->size = size;
402 entry->used = 1;
403 entry->error = FALSE;
404 entry->pending = TRUE;
405 insert_hash_table(cache, entry);
406 cache->used ++;
407
408 /*
409 * queue to read thread to read and ultimately (via the
410 * decompress threads) decompress the buffer
411 */
412 pthread_mutex_unlock(&cache->mutex);
413 queue_put(to_reader, entry);
414 }
415
416 return entry;
417 }
418
419
cache_block_ready(struct cache_entry * entry,int error)420 void cache_block_ready(struct cache_entry *entry, int error)
421 {
422 /*
423 * mark cache entry as being complete, reading and (if necessary)
424 * decompression has taken place, and the buffer is valid for use.
425 * If an error occurs reading or decompressing, the buffer also
426 * becomes ready but with an error...
427 */
428 pthread_mutex_lock(&entry->cache->mutex);
429 entry->pending = FALSE;
430 entry->error = error;
431
432 /*
433 * if the wait_pending flag is set, one or more threads may be waiting
434 * on this buffer
435 */
436 if(entry->cache->wait_pending) {
437 entry->cache->wait_pending = FALSE;
438 pthread_cond_broadcast(&entry->cache->wait_for_pending);
439 }
440
441 pthread_mutex_unlock(&entry->cache->mutex);
442 }
443
444
cache_block_wait(struct cache_entry * entry)445 void cache_block_wait(struct cache_entry *entry)
446 {
447 /*
448 * wait for this cache entry to become ready, when reading and (if
449 * necessary) decompression has taken place
450 */
451 pthread_mutex_lock(&entry->cache->mutex);
452
453 while(entry->pending) {
454 entry->cache->wait_pending = TRUE;
455 pthread_cond_wait(&entry->cache->wait_for_pending,
456 &entry->cache->mutex);
457 }
458
459 pthread_mutex_unlock(&entry->cache->mutex);
460 }
461
462
cache_block_put(struct cache_entry * entry)463 void cache_block_put(struct cache_entry *entry)
464 {
465 /*
466 * finished with this cache entry, once the usage count reaches zero it
467 * can be reused and is put onto the free list. As it remains
468 * accessible via the hash table it can be found getting a new lease of
469 * life before it is reused.
470 */
471 pthread_mutex_lock(&entry->cache->mutex);
472
473 entry->used --;
474 if(entry->used == 0) {
475 insert_free_list(entry->cache, entry);
476 entry->cache->used --;
477
478 /*
479 * if the wait_free flag is set, one or more threads may be
480 * waiting on this buffer
481 */
482 if(entry->cache->wait_free) {
483 entry->cache->wait_free = FALSE;
484 pthread_cond_broadcast(&entry->cache->wait_for_free);
485 }
486 }
487
488 pthread_mutex_unlock(&entry->cache->mutex);
489 }
490
491
dump_cache(struct cache * cache)492 void dump_cache(struct cache *cache)
493 {
494 pthread_mutex_lock(&cache->mutex);
495
496 printf("Max buffers %d, Current size %d, Used %d, %s\n",
497 cache->max_buffers, cache->count, cache->used,
498 cache->free_list ? "Free buffers" : "No free buffers");
499
500 pthread_mutex_unlock(&cache->mutex);
501 }
502
503
modestr(char * str,int mode)504 char *modestr(char *str, int mode)
505 {
506 int i;
507
508 strcpy(str, "----------");
509
510 for(i = 0; table[i].mask != 0; i++) {
511 if((mode & table[i].mask) == table[i].value)
512 str[table[i].position] = table[i].mode;
513 }
514
515 return str;
516 }
517
518
519 #define TOTALCHARS 25
print_filename(char * pathname,struct inode * inode)520 int print_filename(char *pathname, struct inode *inode)
521 {
522 char str[11], dummy[12], dummy2[12]; /* overflow safe */
523 char *userstr, *groupstr;
524 int padchars;
525 struct passwd *user;
526 struct group *group;
527 struct tm *t;
528
529 if(short_ls) {
530 printf("%s\n", pathname);
531 return 1;
532 }
533
534 user = getpwuid(inode->uid);
535 if(user == NULL) {
536 int res = snprintf(dummy, 12, "%d", inode->uid);
537 if(res < 0)
538 EXIT_UNSQUASH("snprintf failed in print_filename()\n");
539 else if(res >= 12)
540 /* unsigned int shouldn't ever need more than 11 bytes
541 * (including terminating '\0') to print in base 10 */
542 userstr = "*";
543 else
544 userstr = dummy;
545 } else
546 userstr = user->pw_name;
547
548 group = getgrgid(inode->gid);
549 if(group == NULL) {
550 int res = snprintf(dummy2, 12, "%d", inode->gid);
551 if(res < 0)
552 EXIT_UNSQUASH("snprintf failed in print_filename()\n");
553 else if(res >= 12)
554 /* unsigned int shouldn't ever need more than 11 bytes
555 * (including terminating '\0') to print in base 10 */
556 groupstr = "*";
557 else
558 groupstr = dummy2;
559 } else
560 groupstr = group->gr_name;
561
562 printf("%s %s/%s ", modestr(str, inode->mode), userstr, groupstr);
563
564 switch(inode->mode & S_IFMT) {
565 case S_IFREG:
566 case S_IFDIR:
567 case S_IFSOCK:
568 case S_IFIFO:
569 case S_IFLNK:
570 padchars = TOTALCHARS - strlen(userstr) -
571 strlen(groupstr);
572
573 printf("%*lld ", padchars > 0 ? padchars : 0,
574 inode->data);
575 break;
576 case S_IFCHR:
577 case S_IFBLK:
578 padchars = TOTALCHARS - strlen(userstr) -
579 strlen(groupstr) - 7;
580
581 printf("%*s%3d,%3d ", padchars > 0 ? padchars : 0, " ",
582 (int) inode->data >> 8, (int) inode->data &
583 0xff);
584 break;
585 }
586
587 t = localtime(&inode->time);
588
589 printf("%d-%02d-%02d %02d:%02d %s", t->tm_year + 1900, t->tm_mon + 1,
590 t->tm_mday, t->tm_hour, t->tm_min, pathname);
591 if((inode->mode & S_IFMT) == S_IFLNK)
592 printf(" -> %s", inode->symlink);
593 printf("\n");
594
595 return 1;
596 }
597
598
add_entry(struct hash_table_entry * hash_table[],long long start,int bytes)599 void add_entry(struct hash_table_entry *hash_table[], long long start,
600 int bytes)
601 {
602 int hash = CALCULATE_HASH(start);
603 struct hash_table_entry *hash_table_entry;
604
605 hash_table_entry = malloc(sizeof(struct hash_table_entry));
606 if(hash_table_entry == NULL)
607 EXIT_UNSQUASH("Out of memory in add_entry\n");
608
609 hash_table_entry->start = start;
610 hash_table_entry->bytes = bytes;
611 hash_table_entry->next = hash_table[hash];
612 hash_table[hash] = hash_table_entry;
613 }
614
615
lookup_entry(struct hash_table_entry * hash_table[],long long start)616 int lookup_entry(struct hash_table_entry *hash_table[], long long start)
617 {
618 int hash = CALCULATE_HASH(start);
619 struct hash_table_entry *hash_table_entry;
620
621 for(hash_table_entry = hash_table[hash]; hash_table_entry;
622 hash_table_entry = hash_table_entry->next)
623
624 if(hash_table_entry->start == start)
625 return hash_table_entry->bytes;
626
627 return -1;
628 }
629
630
read_fs_bytes(int fd,long long byte,int bytes,void * buff)631 int read_fs_bytes(int fd, long long byte, int bytes, void *buff)
632 {
633 off_t off = byte;
634 int res, count;
635
636 TRACE("read_bytes: reading from position 0x%llx, bytes %d\n", byte,
637 bytes);
638
639 if(lseek(fd, off, SEEK_SET) == -1) {
640 ERROR("Lseek failed because %s\n", strerror(errno));
641 return FALSE;
642 }
643
644 for(count = 0; count < bytes; count += res) {
645 res = read(fd, buff + count, bytes - count);
646 if(res < 1) {
647 if(res == 0) {
648 ERROR("Read on filesystem failed because "
649 "EOF\n");
650 return FALSE;
651 } else if(errno != EINTR) {
652 ERROR("Read on filesystem failed because %s\n",
653 strerror(errno));
654 return FALSE;
655 } else
656 res = 0;
657 }
658 }
659
660 return TRUE;
661 }
662
663
read_block(int fd,long long start,long long * next,int expected,void * block)664 int read_block(int fd, long long start, long long *next, int expected,
665 void *block)
666 {
667 unsigned short c_byte;
668 int offset = 2, res, compressed;
669 int outlen = expected ? expected : SQUASHFS_METADATA_SIZE;
670
671 if(swap) {
672 if(read_fs_bytes(fd, start, 2, &c_byte) == FALSE)
673 goto failed;
674 c_byte = (c_byte >> 8) | ((c_byte & 0xff) << 8);
675 } else
676 if(read_fs_bytes(fd, start, 2, &c_byte) == FALSE)
677 goto failed;
678
679 TRACE("read_block: block @0x%llx, %d %s bytes\n", start,
680 SQUASHFS_COMPRESSED_SIZE(c_byte), SQUASHFS_COMPRESSED(c_byte) ?
681 "compressed" : "uncompressed");
682
683 if(SQUASHFS_CHECK_DATA(sBlk.s.flags))
684 offset = 3;
685
686 compressed = SQUASHFS_COMPRESSED(c_byte);
687 c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
688
689 /*
690 * The block size should not be larger than
691 * the uncompressed size (or max uncompressed size if
692 * expected is 0)
693 */
694 if(c_byte > outlen)
695 return 0;
696
697 if(compressed) {
698 char buffer[c_byte];
699 int error;
700
701 res = read_fs_bytes(fd, start + offset, c_byte, buffer);
702 if(res == FALSE)
703 goto failed;
704
705 res = compressor_uncompress(comp, block, buffer, c_byte,
706 outlen, &error);
707
708 if(res == -1) {
709 ERROR("%s uncompress failed with error code %d\n",
710 comp->name, error);
711 goto failed;
712 }
713 } else {
714 res = read_fs_bytes(fd, start + offset, c_byte, block);
715 if(res == FALSE)
716 goto failed;
717 res = c_byte;
718 }
719
720 if(next)
721 *next = start + offset + c_byte;
722
723 /*
724 * if expected, then check the (uncompressed) return data
725 * is of the expected size
726 */
727 if(expected && expected != res)
728 return 0;
729 else
730 return res;
731
732 failed:
733 ERROR("read_block: failed to read block @0x%llx\n", start);
734 return FALSE;
735 }
736
737
read_data_block(long long start,unsigned int size,char * block)738 int read_data_block(long long start, unsigned int size, char *block)
739 {
740 int error, res;
741 int c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
742
743 TRACE("read_data_block: block @0x%llx, %d %s bytes\n", start,
744 c_byte, SQUASHFS_COMPRESSED_BLOCK(size) ? "compressed" :
745 "uncompressed");
746
747 if(SQUASHFS_COMPRESSED_BLOCK(size)) {
748 if(read_fs_bytes(fd, start, c_byte, data) == FALSE)
749 goto failed;
750
751 res = compressor_uncompress(comp, block, data, c_byte,
752 block_size, &error);
753
754 if(res == -1) {
755 ERROR("%s uncompress failed with error code %d\n",
756 comp->name, error);
757 goto failed;
758 }
759
760 return res;
761 } else {
762 if(read_fs_bytes(fd, start, c_byte, block) == FALSE)
763 goto failed;
764
765 return c_byte;
766 }
767
768 failed:
769 ERROR("read_data_block: failed to read block @0x%llx, size %d\n", start,
770 c_byte);
771 return FALSE;
772 }
773
774
read_inode_table(long long start,long long end)775 int read_inode_table(long long start, long long end)
776 {
777 int size = 0, bytes = 0, res;
778
779 TRACE("read_inode_table: start %lld, end %lld\n", start, end);
780
781 while(start < end) {
782 if(size - bytes < SQUASHFS_METADATA_SIZE) {
783 inode_table = realloc(inode_table, size +=
784 SQUASHFS_METADATA_SIZE);
785 if(inode_table == NULL) {
786 ERROR("Out of memory in read_inode_table");
787 goto failed;
788 }
789 }
790
791 add_entry(inode_table_hash, start, bytes);
792
793 res = read_block(fd, start, &start, 0, inode_table + bytes);
794 if(res == 0) {
795 ERROR("read_inode_table: failed to read block\n");
796 goto failed;
797 }
798 bytes += res;
799
800 /*
801 * If this is not the last metadata block in the inode table
802 * then it should be SQUASHFS_METADATA_SIZE in size.
803 * Note, we can't use expected in read_block() above for this
804 * because we don't know if this is the last block until
805 * after reading.
806 */
807 if(start != end && res != SQUASHFS_METADATA_SIZE) {
808 ERROR("read_inode_table: metadata block should be %d "
809 "bytes in length, it is %d bytes\n",
810 SQUASHFS_METADATA_SIZE, res);
811
812 goto failed;
813 }
814 }
815
816 return TRUE;
817
818 failed:
819 free(inode_table);
820 return FALSE;
821 }
822
823
set_attributes(char * pathname,int mode,uid_t uid,gid_t guid,time_t time,unsigned int xattr,unsigned int set_mode)824 int set_attributes(char *pathname, int mode, uid_t uid, gid_t guid, time_t time,
825 unsigned int xattr, unsigned int set_mode)
826 {
827 struct utimbuf times = { time, time };
828
829 if(utime(pathname, ×) == -1) {
830 ERROR("set_attributes: failed to set time on %s, because %s\n",
831 pathname, strerror(errno));
832 return FALSE;
833 }
834
835 if(root_process) {
836 if(chown(pathname, uid, guid) == -1) {
837 ERROR("set_attributes: failed to change uid and gids "
838 "on %s, because %s\n", pathname,
839 strerror(errno));
840 return FALSE;
841 }
842 } else
843 mode &= ~07000;
844
845 if((set_mode || (mode & 07000)) && chmod(pathname, (mode_t) mode) == -1) {
846 ERROR("set_attributes: failed to change mode %s, because %s\n",
847 pathname, strerror(errno));
848 return FALSE;
849 }
850
851 write_xattr(pathname, xattr);
852
853 return TRUE;
854 }
855
856
write_bytes(int fd,char * buff,int bytes)857 int write_bytes(int fd, char *buff, int bytes)
858 {
859 int res, count;
860
861 for(count = 0; count < bytes; count += res) {
862 res = write(fd, buff + count, bytes - count);
863 if(res == -1) {
864 if(errno != EINTR) {
865 ERROR("Write on output file failed because "
866 "%s\n", strerror(errno));
867 return -1;
868 }
869 res = 0;
870 }
871 }
872
873 return 0;
874 }
875
876
877 int lseek_broken = FALSE;
878 char *zero_data = NULL;
879
write_block(int file_fd,char * buffer,int size,long long hole,int sparse)880 int write_block(int file_fd, char *buffer, int size, long long hole, int sparse)
881 {
882 off_t off = hole;
883
884 if(hole) {
885 if(sparse && lseek_broken == FALSE) {
886 int error = lseek(file_fd, off, SEEK_CUR);
887 if(error == -1)
888 /* failed to seek beyond end of file */
889 lseek_broken = TRUE;
890 }
891
892 if((sparse == FALSE || lseek_broken) && zero_data == NULL) {
893 if((zero_data = malloc(block_size)) == NULL)
894 EXIT_UNSQUASH("write_block: failed to alloc "
895 "zero data block\n");
896 memset(zero_data, 0, block_size);
897 }
898
899 if(sparse == FALSE || lseek_broken) {
900 int blocks = (hole + block_size -1) / block_size;
901 int avail_bytes, i;
902 for(i = 0; i < blocks; i++, hole -= avail_bytes) {
903 avail_bytes = hole > block_size ? block_size :
904 hole;
905 if(write_bytes(file_fd, zero_data, avail_bytes)
906 == -1)
907 goto failure;
908 }
909 }
910 }
911
912 if(write_bytes(file_fd, buffer, size) == -1)
913 goto failure;
914
915 return TRUE;
916
917 failure:
918 return FALSE;
919 }
920
921
922 pthread_mutex_t open_mutex = PTHREAD_MUTEX_INITIALIZER;
923 pthread_cond_t open_empty = PTHREAD_COND_INITIALIZER;
924 int open_unlimited, open_count;
925 #define OPEN_FILE_MARGIN 10
926
927
open_init(int count)928 void open_init(int count)
929 {
930 open_count = count;
931 open_unlimited = count == -1;
932 }
933
934
open_wait(char * pathname,int flags,mode_t mode)935 int open_wait(char *pathname, int flags, mode_t mode)
936 {
937 if (!open_unlimited) {
938 pthread_mutex_lock(&open_mutex);
939 while (open_count == 0)
940 pthread_cond_wait(&open_empty, &open_mutex);
941 open_count --;
942 pthread_mutex_unlock(&open_mutex);
943 }
944
945 return open(pathname, flags, mode);
946 }
947
948
close_wake(int fd)949 void close_wake(int fd)
950 {
951 close(fd);
952
953 if (!open_unlimited) {
954 pthread_mutex_lock(&open_mutex);
955 open_count ++;
956 pthread_cond_signal(&open_empty);
957 pthread_mutex_unlock(&open_mutex);
958 }
959 }
960
961
queue_file(char * pathname,int file_fd,struct inode * inode)962 void queue_file(char *pathname, int file_fd, struct inode *inode)
963 {
964 struct squashfs_file *file = malloc(sizeof(struct squashfs_file));
965 if(file == NULL)
966 EXIT_UNSQUASH("queue_file: unable to malloc file\n");
967
968 file->fd = file_fd;
969 file->file_size = inode->data;
970 file->mode = inode->mode;
971 file->gid = inode->gid;
972 file->uid = inode->uid;
973 file->time = inode->time;
974 file->pathname = strdup(pathname);
975 file->blocks = inode->blocks + (inode->frag_bytes > 0);
976 file->sparse = inode->sparse;
977 file->xattr = inode->xattr;
978 queue_put(to_writer, file);
979 }
980
981
queue_dir(char * pathname,struct dir * dir)982 void queue_dir(char *pathname, struct dir *dir)
983 {
984 struct squashfs_file *file = malloc(sizeof(struct squashfs_file));
985 if(file == NULL)
986 EXIT_UNSQUASH("queue_dir: unable to malloc file\n");
987
988 file->fd = -1;
989 file->mode = dir->mode;
990 file->gid = dir->guid;
991 file->uid = dir->uid;
992 file->time = dir->mtime;
993 file->pathname = strdup(pathname);
994 file->xattr = dir->xattr;
995 queue_put(to_writer, file);
996 }
997
998
write_file(struct inode * inode,char * pathname)999 int write_file(struct inode *inode, char *pathname)
1000 {
1001 unsigned int file_fd, i;
1002 unsigned int *block_list;
1003 int file_end = inode->data / block_size;
1004 long long start = inode->start;
1005
1006 TRACE("write_file: regular file, blocks %d\n", inode->blocks);
1007
1008 file_fd = open_wait(pathname, O_CREAT | O_WRONLY |
1009 (force ? O_TRUNC : 0), (mode_t) inode->mode & 0777);
1010 if(file_fd == -1) {
1011 ERROR("write_file: failed to create file %s, because %s\n",
1012 pathname, strerror(errno));
1013 return FALSE;
1014 }
1015
1016 block_list = malloc(inode->blocks * sizeof(unsigned int));
1017 if(block_list == NULL)
1018 EXIT_UNSQUASH("write_file: unable to malloc block list\n");
1019
1020 s_ops.read_block_list(block_list, inode->block_ptr, inode->blocks);
1021
1022 /*
1023 * the writer thread is queued a squashfs_file structure describing the
1024 * file. If the file has one or more blocks or a fragment they are
1025 * queued separately (references to blocks in the cache).
1026 */
1027 queue_file(pathname, file_fd, inode);
1028
1029 for(i = 0; i < inode->blocks; i++) {
1030 int c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]);
1031 struct file_entry *block = malloc(sizeof(struct file_entry));
1032
1033 if(block == NULL)
1034 EXIT_UNSQUASH("write_file: unable to malloc file\n");
1035 block->offset = 0;
1036 block->size = i == file_end ? inode->data & (block_size - 1) :
1037 block_size;
1038 if(block_list[i] == 0) /* sparse block */
1039 block->buffer = NULL;
1040 else {
1041 block->buffer = cache_get(data_cache, start,
1042 block_list[i]);
1043 start += c_byte;
1044 }
1045 queue_put(to_writer, block);
1046 }
1047
1048 if(inode->frag_bytes) {
1049 int size;
1050 long long start;
1051 struct file_entry *block = malloc(sizeof(struct file_entry));
1052
1053 if(block == NULL)
1054 EXIT_UNSQUASH("write_file: unable to malloc file\n");
1055 s_ops.read_fragment(inode->fragment, &start, &size);
1056 block->buffer = cache_get(fragment_cache, start, size);
1057 block->offset = inode->offset;
1058 block->size = inode->frag_bytes;
1059 queue_put(to_writer, block);
1060 }
1061
1062 free(block_list);
1063 return TRUE;
1064 }
1065
1066
create_inode(char * pathname,struct inode * i)1067 int create_inode(char *pathname, struct inode *i)
1068 {
1069 TRACE("create_inode: pathname %s\n", pathname);
1070
1071 if(created_inode[i->inode_number - 1]) {
1072 TRACE("create_inode: hard link\n");
1073 if(force)
1074 unlink(pathname);
1075
1076 if(link(created_inode[i->inode_number - 1], pathname) == -1) {
1077 ERROR("create_inode: failed to create hardlink, "
1078 "because %s\n", strerror(errno));
1079 return FALSE;
1080 }
1081
1082 return TRUE;
1083 }
1084
1085 switch(i->type) {
1086 case SQUASHFS_FILE_TYPE:
1087 case SQUASHFS_LREG_TYPE:
1088 TRACE("create_inode: regular file, file_size %lld, "
1089 "blocks %d\n", i->data, i->blocks);
1090
1091 if(write_file(i, pathname))
1092 file_count ++;
1093 break;
1094 case SQUASHFS_SYMLINK_TYPE:
1095 case SQUASHFS_LSYMLINK_TYPE:
1096 TRACE("create_inode: symlink, symlink_size %lld\n",
1097 i->data);
1098
1099 if(force)
1100 unlink(pathname);
1101
1102 if(symlink(i->symlink, pathname) == -1) {
1103 ERROR("create_inode: failed to create symlink "
1104 "%s, because %s\n", pathname,
1105 strerror(errno));
1106 break;
1107 }
1108
1109 write_xattr(pathname, i->xattr);
1110
1111 if(root_process) {
1112 if(lchown(pathname, i->uid, i->gid) == -1)
1113 ERROR("create_inode: failed to change "
1114 "uid and gids on %s, because "
1115 "%s\n", pathname,
1116 strerror(errno));
1117 }
1118
1119 sym_count ++;
1120 break;
1121 case SQUASHFS_BLKDEV_TYPE:
1122 case SQUASHFS_CHRDEV_TYPE:
1123 case SQUASHFS_LBLKDEV_TYPE:
1124 case SQUASHFS_LCHRDEV_TYPE: {
1125 int chrdev = i->type == SQUASHFS_CHRDEV_TYPE;
1126 TRACE("create_inode: dev, rdev 0x%llx\n", i->data);
1127
1128 if(root_process) {
1129 if(force)
1130 unlink(pathname);
1131
1132 if(mknod(pathname, chrdev ? S_IFCHR : S_IFBLK,
1133 makedev((i->data >> 8) & 0xff,
1134 i->data & 0xff)) == -1) {
1135 ERROR("create_inode: failed to create "
1136 "%s device %s, because %s\n",
1137 chrdev ? "character" : "block",
1138 pathname, strerror(errno));
1139 break;
1140 }
1141 set_attributes(pathname, i->mode, i->uid,
1142 i->gid, i->time, i->xattr, TRUE);
1143 dev_count ++;
1144 } else
1145 ERROR("create_inode: could not create %s "
1146 "device %s, because you're not "
1147 "superuser!\n", chrdev ? "character" :
1148 "block", pathname);
1149 break;
1150 }
1151 case SQUASHFS_FIFO_TYPE:
1152 case SQUASHFS_LFIFO_TYPE:
1153 TRACE("create_inode: fifo\n");
1154
1155 if(force)
1156 unlink(pathname);
1157
1158 if(mknod(pathname, S_IFIFO, 0) == -1) {
1159 ERROR("create_inode: failed to create fifo %s, "
1160 "because %s\n", pathname,
1161 strerror(errno));
1162 break;
1163 }
1164 set_attributes(pathname, i->mode, i->uid, i->gid,
1165 i->time, i->xattr, TRUE);
1166 fifo_count ++;
1167 break;
1168 case SQUASHFS_SOCKET_TYPE:
1169 case SQUASHFS_LSOCKET_TYPE:
1170 TRACE("create_inode: socket\n");
1171 ERROR("create_inode: socket %s ignored\n", pathname);
1172 break;
1173 default:
1174 ERROR("Unknown inode type %d in create_inode_table!\n",
1175 i->type);
1176 return FALSE;
1177 }
1178
1179 created_inode[i->inode_number - 1] = strdup(pathname);
1180
1181 return TRUE;
1182 }
1183
1184
read_directory_table(long long start,long long end)1185 int read_directory_table(long long start, long long end)
1186 {
1187 int bytes = 0, size = 0, res;
1188
1189 TRACE("read_directory_table: start %lld, end %lld\n", start, end);
1190
1191 while(start < end) {
1192 if(size - bytes < SQUASHFS_METADATA_SIZE) {
1193 directory_table = realloc(directory_table, size +=
1194 SQUASHFS_METADATA_SIZE);
1195 if(directory_table == NULL) {
1196 ERROR("Out of memory in "
1197 "read_directory_table\n");
1198 goto failed;
1199 }
1200 }
1201
1202 add_entry(directory_table_hash, start, bytes);
1203
1204 res = read_block(fd, start, &start, 0, directory_table + bytes);
1205 if(res == 0) {
1206 ERROR("read_directory_table: failed to read block\n");
1207 goto failed;
1208 }
1209
1210 bytes += res;
1211
1212 /*
1213 * If this is not the last metadata block in the directory table
1214 * then it should be SQUASHFS_METADATA_SIZE in size.
1215 * Note, we can't use expected in read_block() above for this
1216 * because we don't know if this is the last block until
1217 * after reading.
1218 */
1219 if(start != end && res != SQUASHFS_METADATA_SIZE) {
1220 ERROR("read_directory_table: metadata block "
1221 "should be %d bytes in length, it is %d "
1222 "bytes\n", SQUASHFS_METADATA_SIZE, res);
1223 goto failed;
1224 }
1225 }
1226
1227 return TRUE;
1228
1229 failed:
1230 free(directory_table);
1231 return FALSE;
1232 }
1233
1234
squashfs_readdir(struct dir * dir,char ** name,unsigned int * start_block,unsigned int * offset,unsigned int * type)1235 int squashfs_readdir(struct dir *dir, char **name, unsigned int *start_block,
1236 unsigned int *offset, unsigned int *type)
1237 {
1238 if(dir->cur_entry == dir->dir_count)
1239 return FALSE;
1240
1241 *name = dir->dirs[dir->cur_entry].name;
1242 *start_block = dir->dirs[dir->cur_entry].start_block;
1243 *offset = dir->dirs[dir->cur_entry].offset;
1244 *type = dir->dirs[dir->cur_entry].type;
1245 dir->cur_entry ++;
1246
1247 return TRUE;
1248 }
1249
1250
squashfs_closedir(struct dir * dir)1251 void squashfs_closedir(struct dir *dir)
1252 {
1253 free(dir->dirs);
1254 free(dir);
1255 }
1256
1257
get_component(char * target,char ** targname)1258 char *get_component(char *target, char **targname)
1259 {
1260 char *start;
1261
1262 while(*target == '/')
1263 target ++;
1264
1265 start = target;
1266 while(*target != '/' && *target != '\0')
1267 target ++;
1268
1269 *targname = strndup(start, target - start);
1270
1271 while(*target == '/')
1272 target ++;
1273
1274 return target;
1275 }
1276
1277
free_path(struct pathname * paths)1278 void free_path(struct pathname *paths)
1279 {
1280 int i;
1281
1282 for(i = 0; i < paths->names; i++) {
1283 if(paths->name[i].paths)
1284 free_path(paths->name[i].paths);
1285 free(paths->name[i].name);
1286 if(paths->name[i].preg) {
1287 regfree(paths->name[i].preg);
1288 free(paths->name[i].preg);
1289 }
1290 }
1291
1292 free(paths);
1293 }
1294
1295
add_path(struct pathname * paths,char * target,char * alltarget)1296 struct pathname *add_path(struct pathname *paths, char *target, char *alltarget)
1297 {
1298 char *targname;
1299 int i, error;
1300
1301 TRACE("add_path: adding \"%s\" extract file\n", target);
1302
1303 target = get_component(target, &targname);
1304
1305 if(paths == NULL) {
1306 paths = malloc(sizeof(struct pathname));
1307 if(paths == NULL)
1308 EXIT_UNSQUASH("failed to allocate paths\n");
1309
1310 paths->names = 0;
1311 paths->name = NULL;
1312 }
1313
1314 for(i = 0; i < paths->names; i++)
1315 if(strcmp(paths->name[i].name, targname) == 0)
1316 break;
1317
1318 if(i == paths->names) {
1319 /*
1320 * allocate new name entry
1321 */
1322 paths->names ++;
1323 paths->name = realloc(paths->name, (i + 1) *
1324 sizeof(struct path_entry));
1325 if(paths->name == NULL)
1326 EXIT_UNSQUASH("Out of memory in add_path\n");
1327 paths->name[i].name = targname;
1328 paths->name[i].paths = NULL;
1329 if(use_regex) {
1330 paths->name[i].preg = malloc(sizeof(regex_t));
1331 if(paths->name[i].preg == NULL)
1332 EXIT_UNSQUASH("Out of memory in add_path\n");
1333 error = regcomp(paths->name[i].preg, targname,
1334 REG_EXTENDED|REG_NOSUB);
1335 if(error) {
1336 char str[1024]; /* overflow safe */
1337
1338 regerror(error, paths->name[i].preg, str, 1024);
1339 EXIT_UNSQUASH("invalid regex %s in export %s, "
1340 "because %s\n", targname, alltarget,
1341 str);
1342 }
1343 } else
1344 paths->name[i].preg = NULL;
1345
1346 if(target[0] == '\0')
1347 /*
1348 * at leaf pathname component
1349 */
1350 paths->name[i].paths = NULL;
1351 else
1352 /*
1353 * recurse adding child components
1354 */
1355 paths->name[i].paths = add_path(NULL, target, alltarget);
1356 } else {
1357 /*
1358 * existing matching entry
1359 */
1360 free(targname);
1361
1362 if(paths->name[i].paths == NULL) {
1363 /*
1364 * No sub-directory which means this is the leaf
1365 * component of a pre-existing extract which subsumes
1366 * the extract currently being added, in which case stop
1367 * adding components
1368 */
1369 } else if(target[0] == '\0') {
1370 /*
1371 * at leaf pathname component and child components exist
1372 * from more specific extracts, delete as they're
1373 * subsumed by this extract
1374 */
1375 free_path(paths->name[i].paths);
1376 paths->name[i].paths = NULL;
1377 } else
1378 /*
1379 * recurse adding child components
1380 */
1381 add_path(paths->name[i].paths, target, alltarget);
1382 }
1383
1384 return paths;
1385 }
1386
1387
init_subdir()1388 struct pathnames *init_subdir()
1389 {
1390 struct pathnames *new = malloc(sizeof(struct pathnames));
1391 if(new == NULL)
1392 EXIT_UNSQUASH("Out of memory in init_subdir\n");
1393 new->count = 0;
1394 return new;
1395 }
1396
1397
add_subdir(struct pathnames * paths,struct pathname * path)1398 struct pathnames *add_subdir(struct pathnames *paths, struct pathname *path)
1399 {
1400 if(paths->count % PATHS_ALLOC_SIZE == 0) {
1401 paths = realloc(paths, sizeof(struct pathnames *) +
1402 (paths->count + PATHS_ALLOC_SIZE) *
1403 sizeof(struct pathname *));
1404 if(paths == NULL)
1405 EXIT_UNSQUASH("Out of memory in add_subdir\n");
1406 }
1407
1408 paths->path[paths->count++] = path;
1409 return paths;
1410 }
1411
1412
free_subdir(struct pathnames * paths)1413 void free_subdir(struct pathnames *paths)
1414 {
1415 free(paths);
1416 }
1417
1418
matches(struct pathnames * paths,char * name,struct pathnames ** new)1419 int matches(struct pathnames *paths, char *name, struct pathnames **new)
1420 {
1421 int i, n;
1422
1423 if(paths == NULL) {
1424 *new = NULL;
1425 return TRUE;
1426 }
1427
1428 *new = init_subdir();
1429
1430 for(n = 0; n < paths->count; n++) {
1431 struct pathname *path = paths->path[n];
1432 for(i = 0; i < path->names; i++) {
1433 int match = use_regex ?
1434 regexec(path->name[i].preg, name, (size_t) 0,
1435 NULL, 0) == 0 : fnmatch(path->name[i].name,
1436 name, FNM_PATHNAME|FNM_PERIOD|FNM_EXTMATCH) ==
1437 0;
1438 if(match && path->name[i].paths == NULL)
1439 /*
1440 * match on a leaf component, any subdirectories
1441 * will implicitly match, therefore return an
1442 * empty new search set
1443 */
1444 goto empty_set;
1445
1446 if(match)
1447 /*
1448 * match on a non-leaf component, add any
1449 * subdirectories to the new set of
1450 * subdirectories to scan for this name
1451 */
1452 *new = add_subdir(*new, path->name[i].paths);
1453 }
1454 }
1455
1456 if((*new)->count == 0) {
1457 /*
1458 * no matching names found, delete empty search set, and return
1459 * FALSE
1460 */
1461 free_subdir(*new);
1462 *new = NULL;
1463 return FALSE;
1464 }
1465
1466 /*
1467 * one or more matches with sub-directories found (no leaf matches),
1468 * return new search set and return TRUE
1469 */
1470 return TRUE;
1471
1472 empty_set:
1473 /*
1474 * found matching leaf exclude, return empty search set and return TRUE
1475 */
1476 free_subdir(*new);
1477 *new = NULL;
1478 return TRUE;
1479 }
1480
1481
pre_scan(char * parent_name,unsigned int start_block,unsigned int offset,struct pathnames * paths)1482 void pre_scan(char *parent_name, unsigned int start_block, unsigned int offset,
1483 struct pathnames *paths)
1484 {
1485 unsigned int type;
1486 char *name;
1487 struct pathnames *new;
1488 struct inode *i;
1489 struct dir *dir = s_ops.squashfs_opendir(start_block, offset, &i);
1490
1491 if(dir == NULL)
1492 return;
1493
1494 while(squashfs_readdir(dir, &name, &start_block, &offset, &type)) {
1495 struct inode *i;
1496 char *pathname;
1497 int res;
1498
1499 TRACE("pre_scan: name %s, start_block %d, offset %d, type %d\n",
1500 name, start_block, offset, type);
1501
1502 if(!matches(paths, name, &new))
1503 continue;
1504
1505 res = asprintf(&pathname, "%s/%s", parent_name, name);
1506 if(res == -1)
1507 EXIT_UNSQUASH("asprintf failed in dir_scan\n");
1508
1509 if(type == SQUASHFS_DIR_TYPE)
1510 pre_scan(parent_name, start_block, offset, new);
1511 else if(new == NULL) {
1512 if(type == SQUASHFS_FILE_TYPE ||
1513 type == SQUASHFS_LREG_TYPE) {
1514 i = s_ops.read_inode(start_block, offset);
1515 if(created_inode[i->inode_number - 1] == NULL) {
1516 created_inode[i->inode_number - 1] =
1517 (char *) i;
1518 total_blocks += (i->data +
1519 (block_size - 1)) >> block_log;
1520 }
1521 total_files ++;
1522 }
1523 total_inodes ++;
1524 }
1525
1526 free_subdir(new);
1527 free(pathname);
1528 }
1529
1530 squashfs_closedir(dir);
1531 }
1532
1533
dir_scan(char * parent_name,unsigned int start_block,unsigned int offset,struct pathnames * paths)1534 void dir_scan(char *parent_name, unsigned int start_block, unsigned int offset,
1535 struct pathnames *paths)
1536 {
1537 unsigned int type;
1538 char *name;
1539 struct pathnames *new;
1540 struct inode *i;
1541 struct dir *dir = s_ops.squashfs_opendir(start_block, offset, &i);
1542
1543 if(dir == NULL) {
1544 ERROR("dir_scan: failed to read directory %s, skipping\n",
1545 parent_name);
1546 return;
1547 }
1548
1549 if(lsonly || info)
1550 print_filename(parent_name, i);
1551
1552 if(!lsonly) {
1553 /*
1554 * Make directory with default User rwx permissions rather than
1555 * the permissions from the filesystem, as these may not have
1556 * write/execute permission. These are fixed up later in
1557 * set_attributes().
1558 */
1559 int res = mkdir(parent_name, S_IRUSR|S_IWUSR|S_IXUSR);
1560 if(res == -1) {
1561 /*
1562 * Skip directory if mkdir fails, unless we're
1563 * forcing and the error is -EEXIST
1564 */
1565 if(!force || errno != EEXIST) {
1566 ERROR("dir_scan: failed to make directory %s, "
1567 "because %s\n", parent_name,
1568 strerror(errno));
1569 squashfs_closedir(dir);
1570 return;
1571 }
1572
1573 /*
1574 * Try to change permissions of existing directory so
1575 * that we can write to it
1576 */
1577 res = chmod(parent_name, S_IRUSR|S_IWUSR|S_IXUSR);
1578 if (res == -1)
1579 ERROR("dir_scan: failed to change permissions "
1580 "for directory %s, because %s\n",
1581 parent_name, strerror(errno));
1582 }
1583 }
1584
1585 while(squashfs_readdir(dir, &name, &start_block, &offset, &type)) {
1586 char *pathname;
1587 int res;
1588
1589 TRACE("dir_scan: name %s, start_block %d, offset %d, type %d\n",
1590 name, start_block, offset, type);
1591
1592
1593 if(!matches(paths, name, &new))
1594 continue;
1595
1596 res = asprintf(&pathname, "%s/%s", parent_name, name);
1597 if(res == -1)
1598 EXIT_UNSQUASH("asprintf failed in dir_scan\n");
1599
1600 if(type == SQUASHFS_DIR_TYPE) {
1601 dir_scan(pathname, start_block, offset, new);
1602 free(pathname);
1603 } else if(new == NULL) {
1604 update_info(pathname);
1605
1606 i = s_ops.read_inode(start_block, offset);
1607
1608 if(lsonly || info)
1609 print_filename(pathname, i);
1610
1611 if(!lsonly)
1612 create_inode(pathname, i);
1613
1614 if(i->type == SQUASHFS_SYMLINK_TYPE ||
1615 i->type == SQUASHFS_LSYMLINK_TYPE)
1616 free(i->symlink);
1617 } else
1618 free(pathname);
1619
1620 free_subdir(new);
1621 }
1622
1623 if(!lsonly)
1624 queue_dir(parent_name, dir);
1625
1626 squashfs_closedir(dir);
1627 dir_count ++;
1628 }
1629
1630
squashfs_stat(char * source)1631 void squashfs_stat(char *source)
1632 {
1633 time_t mkfs_time = (time_t) sBlk.s.mkfs_time;
1634 char *mkfs_str = ctime(&mkfs_time);
1635
1636 #if __BYTE_ORDER == __BIG_ENDIAN
1637 printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
1638 sBlk.s.s_major == 4 ? "" : swap ? "little endian " :
1639 "big endian ", sBlk.s.s_major, sBlk.s.s_minor, source);
1640 #else
1641 printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
1642 sBlk.s.s_major == 4 ? "" : swap ? "big endian " :
1643 "little endian ", sBlk.s.s_major, sBlk.s.s_minor, source);
1644 #endif
1645
1646 printf("Creation or last append time %s", mkfs_str ? mkfs_str :
1647 "failed to get time\n");
1648 printf("Filesystem size %.2f Kbytes (%.2f Mbytes)\n",
1649 sBlk.s.bytes_used / 1024.0, sBlk.s.bytes_used /
1650 (1024.0 * 1024.0));
1651
1652 if(sBlk.s.s_major == 4) {
1653 printf("Compression %s\n", comp->name);
1654
1655 if(SQUASHFS_COMP_OPTS(sBlk.s.flags)) {
1656 char buffer[SQUASHFS_METADATA_SIZE] __attribute__ ((aligned));
1657 int bytes;
1658
1659 bytes = read_block(fd, sizeof(sBlk.s), NULL, 0, buffer);
1660 if(bytes == 0) {
1661 ERROR("Failed to read compressor options\n");
1662 return;
1663 }
1664
1665 compressor_display_options(comp, buffer, bytes);
1666 }
1667 }
1668
1669 printf("Block size %d\n", sBlk.s.block_size);
1670 printf("Filesystem is %sexportable via NFS\n",
1671 SQUASHFS_EXPORTABLE(sBlk.s.flags) ? "" : "not ");
1672 printf("Inodes are %scompressed\n",
1673 SQUASHFS_UNCOMPRESSED_INODES(sBlk.s.flags) ? "un" : "");
1674 printf("Data is %scompressed\n",
1675 SQUASHFS_UNCOMPRESSED_DATA(sBlk.s.flags) ? "un" : "");
1676
1677 if(sBlk.s.s_major > 1) {
1678 if(SQUASHFS_NO_FRAGMENTS(sBlk.s.flags))
1679 printf("Fragments are not stored\n");
1680 else {
1681 printf("Fragments are %scompressed\n",
1682 SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.s.flags) ?
1683 "un" : "");
1684 printf("Always-use-fragments option is %sspecified\n",
1685 SQUASHFS_ALWAYS_FRAGMENTS(sBlk.s.flags) ? "" :
1686 "not ");
1687 }
1688 }
1689
1690 if(sBlk.s.s_major == 4) {
1691 if(SQUASHFS_NO_XATTRS(sBlk.s.flags))
1692 printf("Xattrs are not stored\n");
1693 else
1694 printf("Xattrs are %scompressed\n",
1695 SQUASHFS_UNCOMPRESSED_XATTRS(sBlk.s.flags) ?
1696 "un" : "");
1697 }
1698
1699 if(sBlk.s.s_major < 4)
1700 printf("Check data is %spresent in the filesystem\n",
1701 SQUASHFS_CHECK_DATA(sBlk.s.flags) ? "" :
1702 "not ");
1703
1704 if(sBlk.s.s_major > 1)
1705 printf("Duplicates are %sremoved\n",
1706 SQUASHFS_DUPLICATES(sBlk.s.flags) ? "" : "not ");
1707 else
1708 printf("Duplicates are removed\n");
1709
1710 if(sBlk.s.s_major > 1)
1711 printf("Number of fragments %d\n", sBlk.s.fragments);
1712
1713 printf("Number of inodes %d\n", sBlk.s.inodes);
1714
1715 if(sBlk.s.s_major == 4)
1716 printf("Number of ids %d\n", sBlk.s.no_ids);
1717 else {
1718 printf("Number of uids %d\n", sBlk.no_uids);
1719 printf("Number of gids %d\n", sBlk.no_guids);
1720 }
1721
1722 TRACE("sBlk.s.inode_table_start 0x%llx\n", sBlk.s.inode_table_start);
1723 TRACE("sBlk.s.directory_table_start 0x%llx\n",
1724 sBlk.s.directory_table_start);
1725
1726 if(sBlk.s.s_major > 1)
1727 TRACE("sBlk.s.fragment_table_start 0x%llx\n\n",
1728 sBlk.s.fragment_table_start);
1729
1730 if(sBlk.s.s_major > 2)
1731 TRACE("sBlk.s.lookup_table_start 0x%llx\n\n",
1732 sBlk.s.lookup_table_start);
1733
1734 if(sBlk.s.s_major == 4) {
1735 TRACE("sBlk.s.id_table_start 0x%llx\n", sBlk.s.id_table_start);
1736 TRACE("sBlk.s.xattr_id_table_start 0x%llx\n",
1737 sBlk.s.xattr_id_table_start);
1738 } else {
1739 TRACE("sBlk.uid_start 0x%llx\n", sBlk.uid_start);
1740 TRACE("sBlk.guid_start 0x%llx\n", sBlk.guid_start);
1741 }
1742 }
1743
1744
check_compression(struct compressor * comp)1745 int check_compression(struct compressor *comp)
1746 {
1747 int res, bytes = 0;
1748 char buffer[SQUASHFS_METADATA_SIZE] __attribute__ ((aligned));
1749
1750 if(!comp->supported) {
1751 ERROR("Filesystem uses %s compression, this is "
1752 "unsupported by this version\n", comp->name);
1753 ERROR("Decompressors available:\n");
1754 display_compressors("", "");
1755 return 0;
1756 }
1757
1758 /*
1759 * Read compression options from disk if present, and pass to
1760 * the compressor to ensure we know how to decompress a filesystem
1761 * compressed with these compression options.
1762 *
1763 * Note, even if there is no compression options we still call the
1764 * compressor because some compression options may be mandatory
1765 * for some compressors.
1766 */
1767 if(SQUASHFS_COMP_OPTS(sBlk.s.flags)) {
1768 bytes = read_block(fd, sizeof(sBlk.s), NULL, 0, buffer);
1769 if(bytes == 0) {
1770 ERROR("Failed to read compressor options\n");
1771 return 0;
1772 }
1773 }
1774
1775 res = compressor_check_options(comp, sBlk.s.block_size, buffer, bytes);
1776
1777 return res != -1;
1778 }
1779
1780
read_super(char * source)1781 int read_super(char *source)
1782 {
1783 squashfs_super_block_3 sBlk_3;
1784 struct squashfs_super_block sBlk_4;
1785
1786 /*
1787 * Try to read a Squashfs 4 superblock
1788 */
1789 read_fs_bytes(fd, SQUASHFS_START, sizeof(struct squashfs_super_block),
1790 &sBlk_4);
1791 swap = sBlk_4.s_magic != SQUASHFS_MAGIC;
1792 SQUASHFS_INSWAP_SUPER_BLOCK(&sBlk_4);
1793
1794 if(sBlk_4.s_magic == SQUASHFS_MAGIC && sBlk_4.s_major == 4 &&
1795 sBlk_4.s_minor == 0) {
1796 s_ops.squashfs_opendir = squashfs_opendir_4;
1797 s_ops.read_fragment = read_fragment_4;
1798 s_ops.read_fragment_table = read_fragment_table_4;
1799 s_ops.read_block_list = read_block_list_2;
1800 s_ops.read_inode = read_inode_4;
1801 s_ops.read_uids_guids = read_uids_guids_4;
1802 memcpy(&sBlk, &sBlk_4, sizeof(sBlk_4));
1803
1804 /*
1805 * Check the compression type
1806 */
1807 comp = lookup_compressor_id(sBlk.s.compression);
1808 return TRUE;
1809 }
1810
1811 /*
1812 * Not a Squashfs 4 superblock, try to read a squashfs 3 superblock
1813 * (compatible with 1 and 2 filesystems)
1814 */
1815 read_fs_bytes(fd, SQUASHFS_START, sizeof(squashfs_super_block_3),
1816 &sBlk_3);
1817
1818 /*
1819 * Check it is a SQUASHFS superblock
1820 */
1821 swap = 0;
1822 if(sBlk_3.s_magic != SQUASHFS_MAGIC) {
1823 if(sBlk_3.s_magic == SQUASHFS_MAGIC_SWAP) {
1824 squashfs_super_block_3 sblk;
1825 ERROR("Reading a different endian SQUASHFS filesystem "
1826 "on %s\n", source);
1827 SQUASHFS_SWAP_SUPER_BLOCK_3(&sblk, &sBlk_3);
1828 memcpy(&sBlk_3, &sblk, sizeof(squashfs_super_block_3));
1829 swap = 1;
1830 } else {
1831 ERROR("Can't find a SQUASHFS superblock on %s\n",
1832 source);
1833 goto failed_mount;
1834 }
1835 }
1836
1837 sBlk.s.s_magic = sBlk_3.s_magic;
1838 sBlk.s.inodes = sBlk_3.inodes;
1839 sBlk.s.mkfs_time = sBlk_3.mkfs_time;
1840 sBlk.s.block_size = sBlk_3.block_size;
1841 sBlk.s.fragments = sBlk_3.fragments;
1842 sBlk.s.block_log = sBlk_3.block_log;
1843 sBlk.s.flags = sBlk_3.flags;
1844 sBlk.s.s_major = sBlk_3.s_major;
1845 sBlk.s.s_minor = sBlk_3.s_minor;
1846 sBlk.s.root_inode = sBlk_3.root_inode;
1847 sBlk.s.bytes_used = sBlk_3.bytes_used;
1848 sBlk.s.inode_table_start = sBlk_3.inode_table_start;
1849 sBlk.s.directory_table_start = sBlk_3.directory_table_start;
1850 sBlk.s.fragment_table_start = sBlk_3.fragment_table_start;
1851 sBlk.s.lookup_table_start = sBlk_3.lookup_table_start;
1852 sBlk.no_uids = sBlk_3.no_uids;
1853 sBlk.no_guids = sBlk_3.no_guids;
1854 sBlk.uid_start = sBlk_3.uid_start;
1855 sBlk.guid_start = sBlk_3.guid_start;
1856 sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
1857
1858 /* Check the MAJOR & MINOR versions */
1859 if(sBlk.s.s_major == 1 || sBlk.s.s_major == 2) {
1860 sBlk.s.bytes_used = sBlk_3.bytes_used_2;
1861 sBlk.uid_start = sBlk_3.uid_start_2;
1862 sBlk.guid_start = sBlk_3.guid_start_2;
1863 sBlk.s.inode_table_start = sBlk_3.inode_table_start_2;
1864 sBlk.s.directory_table_start = sBlk_3.directory_table_start_2;
1865
1866 if(sBlk.s.s_major == 1) {
1867 sBlk.s.block_size = sBlk_3.block_size_1;
1868 sBlk.s.fragment_table_start = sBlk.uid_start;
1869 s_ops.squashfs_opendir = squashfs_opendir_1;
1870 s_ops.read_fragment_table = read_fragment_table_1;
1871 s_ops.read_block_list = read_block_list_1;
1872 s_ops.read_inode = read_inode_1;
1873 s_ops.read_uids_guids = read_uids_guids_1;
1874 } else {
1875 sBlk.s.fragment_table_start =
1876 sBlk_3.fragment_table_start_2;
1877 s_ops.squashfs_opendir = squashfs_opendir_1;
1878 s_ops.read_fragment = read_fragment_2;
1879 s_ops.read_fragment_table = read_fragment_table_2;
1880 s_ops.read_block_list = read_block_list_2;
1881 s_ops.read_inode = read_inode_2;
1882 s_ops.read_uids_guids = read_uids_guids_1;
1883 }
1884 } else if(sBlk.s.s_major == 3) {
1885 s_ops.squashfs_opendir = squashfs_opendir_3;
1886 s_ops.read_fragment = read_fragment_3;
1887 s_ops.read_fragment_table = read_fragment_table_3;
1888 s_ops.read_block_list = read_block_list_2;
1889 s_ops.read_inode = read_inode_3;
1890 s_ops.read_uids_guids = read_uids_guids_1;
1891 } else {
1892 ERROR("Filesystem on %s is (%d:%d), ", source, sBlk.s.s_major,
1893 sBlk.s.s_minor);
1894 ERROR("which is a later filesystem version than I support!\n");
1895 goto failed_mount;
1896 }
1897
1898 /*
1899 * 1.x, 2.x and 3.x filesystems use gzip compression.
1900 */
1901 comp = lookup_compressor("gzip");
1902 return TRUE;
1903
1904 failed_mount:
1905 return FALSE;
1906 }
1907
1908
process_extract_files(struct pathname * path,char * filename)1909 struct pathname *process_extract_files(struct pathname *path, char *filename)
1910 {
1911 FILE *fd;
1912 char buffer[MAX_LINE + 1]; /* overflow safe */
1913 char *name;
1914
1915 fd = fopen(filename, "r");
1916 if(fd == NULL)
1917 EXIT_UNSQUASH("Failed to open extract file \"%s\" because %s\n",
1918 filename, strerror(errno));
1919
1920 while(fgets(name = buffer, MAX_LINE + 1, fd) != NULL) {
1921 int len = strlen(name);
1922
1923 if(len == MAX_LINE && name[len - 1] != '\n')
1924 /* line too large */
1925 EXIT_UNSQUASH("Line too long when reading "
1926 "extract file \"%s\", larger than %d "
1927 "bytes\n", filename, MAX_LINE);
1928
1929 /*
1930 * Remove '\n' terminator if it exists (the last line
1931 * in the file may not be '\n' terminated)
1932 */
1933 if(len && name[len - 1] == '\n')
1934 name[len - 1] = '\0';
1935
1936 /* Skip any leading whitespace */
1937 while(isspace(*name))
1938 name ++;
1939
1940 /* if comment line, skip */
1941 if(*name == '#')
1942 continue;
1943
1944 /* check for initial backslash, to accommodate
1945 * filenames with leading space or leading # character
1946 */
1947 if(*name == '\\')
1948 name ++;
1949
1950 /* if line is now empty after skipping characters, skip it */
1951 if(*name == '\0')
1952 continue;
1953
1954 path = add_path(path, name, name);
1955 }
1956
1957 if(ferror(fd))
1958 EXIT_UNSQUASH("Reading extract file \"%s\" failed because %s\n",
1959 filename, strerror(errno));
1960
1961 fclose(fd);
1962 return path;
1963 }
1964
1965
1966 /*
1967 * reader thread. This thread processes read requests queued by the
1968 * cache_get() routine.
1969 */
reader(void * arg)1970 void *reader(void *arg)
1971 {
1972 while(1) {
1973 struct cache_entry *entry = queue_get(to_reader);
1974 int res = read_fs_bytes(fd, entry->block,
1975 SQUASHFS_COMPRESSED_SIZE_BLOCK(entry->size),
1976 entry->data);
1977
1978 if(res && SQUASHFS_COMPRESSED_BLOCK(entry->size))
1979 /*
1980 * queue successfully read block to the inflate
1981 * thread(s) for further processing
1982 */
1983 queue_put(to_inflate, entry);
1984 else
1985 /*
1986 * block has either been successfully read and is
1987 * uncompressed, or an error has occurred, clear pending
1988 * flag, set error appropriately, and wake up any
1989 * threads waiting on this buffer
1990 */
1991 cache_block_ready(entry, !res);
1992 }
1993 }
1994
1995
1996 /*
1997 * writer thread. This processes file write requests queued by the
1998 * write_file() routine.
1999 */
writer(void * arg)2000 void *writer(void *arg)
2001 {
2002 int i;
2003
2004 while(1) {
2005 struct squashfs_file *file = queue_get(to_writer);
2006 int file_fd;
2007 long long hole = 0;
2008 int failed = FALSE;
2009 int error;
2010
2011 if(file == NULL) {
2012 queue_put(from_writer, NULL);
2013 continue;
2014 } else if(file->fd == -1) {
2015 /* write attributes for directory file->pathname */
2016 set_attributes(file->pathname, file->mode, file->uid,
2017 file->gid, file->time, file->xattr, TRUE);
2018 free(file->pathname);
2019 free(file);
2020 continue;
2021 }
2022
2023 TRACE("writer: regular file, blocks %d\n", file->blocks);
2024
2025 file_fd = file->fd;
2026
2027 for(i = 0; i < file->blocks; i++, cur_blocks ++) {
2028 struct file_entry *block = queue_get(to_writer);
2029
2030 if(block->buffer == 0) { /* sparse file */
2031 hole += block->size;
2032 free(block);
2033 continue;
2034 }
2035
2036 cache_block_wait(block->buffer);
2037
2038 if(block->buffer->error)
2039 failed = TRUE;
2040
2041 if(failed)
2042 continue;
2043
2044 error = write_block(file_fd, block->buffer->data +
2045 block->offset, block->size, hole, file->sparse);
2046
2047 if(error == FALSE) {
2048 ERROR("writer: failed to write data block %d\n",
2049 i);
2050 failed = TRUE;
2051 }
2052
2053 hole = 0;
2054 cache_block_put(block->buffer);
2055 free(block);
2056 }
2057
2058 if(hole && failed == FALSE) {
2059 /*
2060 * corner case for hole extending to end of file
2061 */
2062 if(file->sparse == FALSE ||
2063 lseek(file_fd, hole, SEEK_CUR) == -1) {
2064 /*
2065 * for files which we don't want to write
2066 * sparsely, or for broken lseeks which cannot
2067 * seek beyond end of file, write_block will do
2068 * the right thing
2069 */
2070 hole --;
2071 if(write_block(file_fd, "\0", 1, hole,
2072 file->sparse) == FALSE) {
2073 ERROR("writer: failed to write sparse "
2074 "data block\n");
2075 failed = TRUE;
2076 }
2077 } else if(ftruncate(file_fd, file->file_size) == -1) {
2078 ERROR("writer: failed to write sparse data "
2079 "block\n");
2080 failed = TRUE;
2081 }
2082 }
2083
2084 close_wake(file_fd);
2085 if(failed == FALSE)
2086 set_attributes(file->pathname, file->mode, file->uid,
2087 file->gid, file->time, file->xattr, force);
2088 else {
2089 ERROR("Failed to write %s, skipping\n", file->pathname);
2090 unlink(file->pathname);
2091 }
2092 free(file->pathname);
2093 free(file);
2094
2095 }
2096 }
2097
2098
2099 /*
2100 * decompress thread. This decompresses buffers queued by the read thread
2101 */
inflator(void * arg)2102 void *inflator(void *arg)
2103 {
2104 char tmp[block_size];
2105
2106 while(1) {
2107 struct cache_entry *entry = queue_get(to_inflate);
2108 int error, res;
2109
2110 res = compressor_uncompress(comp, tmp, entry->data,
2111 SQUASHFS_COMPRESSED_SIZE_BLOCK(entry->size), block_size,
2112 &error);
2113
2114 if(res == -1)
2115 ERROR("%s uncompress failed with error code %d\n",
2116 comp->name, error);
2117 else
2118 memcpy(entry->data, tmp, res);
2119
2120 /*
2121 * block has been either successfully decompressed, or an error
2122 * occurred, clear pending flag, set error appropriately and
2123 * wake up any threads waiting on this block
2124 */
2125 cache_block_ready(entry, res == -1);
2126 }
2127 }
2128
2129
progress_thread(void * arg)2130 void *progress_thread(void *arg)
2131 {
2132 struct timespec requested_time, remaining;
2133 struct itimerval itimerval;
2134 struct winsize winsize;
2135
2136 if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
2137 if(isatty(STDOUT_FILENO))
2138 ERROR("TIOCGWINSZ ioctl failed, defaulting to 80 "
2139 "columns\n");
2140 columns = 80;
2141 } else
2142 columns = winsize.ws_col;
2143 signal(SIGWINCH, sigwinch_handler);
2144 signal(SIGALRM, sigalrm_handler);
2145
2146 itimerval.it_value.tv_sec = 0;
2147 itimerval.it_value.tv_usec = 250000;
2148 itimerval.it_interval.tv_sec = 0;
2149 itimerval.it_interval.tv_usec = 250000;
2150 setitimer(ITIMER_REAL, &itimerval, NULL);
2151
2152 requested_time.tv_sec = 0;
2153 requested_time.tv_nsec = 250000000;
2154
2155 while(1) {
2156 int res = nanosleep(&requested_time, &remaining);
2157
2158 if(res == -1 && errno != EINTR)
2159 EXIT_UNSQUASH("nanosleep failed in progress thread\n");
2160
2161 if(progress_enabled) {
2162 pthread_mutex_lock(&screen_mutex);
2163 progress_bar(sym_count + dev_count +
2164 fifo_count + cur_blocks, total_inodes -
2165 total_files + total_blocks, columns);
2166 pthread_mutex_unlock(&screen_mutex);
2167 }
2168 }
2169 }
2170
2171
initialise_threads(int fragment_buffer_size,int data_buffer_size)2172 void initialise_threads(int fragment_buffer_size, int data_buffer_size)
2173 {
2174 struct rlimit rlim;
2175 int i, max_files, res;
2176 sigset_t sigmask, old_mask;
2177
2178 /* block SIGQUIT and SIGHUP, these are handled by the info thread */
2179 sigemptyset(&sigmask);
2180 sigaddset(&sigmask, SIGQUIT);
2181 sigaddset(&sigmask, SIGHUP);
2182 sigaddset(&sigmask, SIGALRM);
2183 if(pthread_sigmask(SIG_BLOCK, &sigmask, NULL) == -1)
2184 EXIT_UNSQUASH("Failed to set signal mask in initialise_threads"
2185 "\n");
2186
2187 /*
2188 * temporarily block these signals so the created sub-threads will
2189 * ignore them, ensuring the main thread handles them
2190 */
2191 sigemptyset(&sigmask);
2192 sigaddset(&sigmask, SIGINT);
2193 sigaddset(&sigmask, SIGTERM);
2194 if(pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask) == -1)
2195 EXIT_UNSQUASH("Failed to set signal mask in initialise_threads"
2196 "\n");
2197
2198 if(processors == -1) {
2199 #ifndef linux
2200 int mib[2];
2201 size_t len = sizeof(processors);
2202
2203 mib[0] = CTL_HW;
2204 #ifdef HW_AVAILCPU
2205 mib[1] = HW_AVAILCPU;
2206 #else
2207 mib[1] = HW_NCPU;
2208 #endif
2209
2210 if(sysctl(mib, 2, &processors, &len, NULL, 0) == -1) {
2211 ERROR("Failed to get number of available processors. "
2212 "Defaulting to 1\n");
2213 processors = 1;
2214 }
2215 #else
2216 processors = sysconf(_SC_NPROCESSORS_ONLN);
2217 #endif
2218 }
2219
2220 if(add_overflow(processors, 3) ||
2221 multiply_overflow(processors + 3, sizeof(pthread_t)))
2222 EXIT_UNSQUASH("Processors too large\n");
2223
2224 thread = malloc((3 + processors) * sizeof(pthread_t));
2225 if(thread == NULL)
2226 EXIT_UNSQUASH("Out of memory allocating thread descriptors\n");
2227 inflator_thread = &thread[3];
2228
2229 /*
2230 * dimensioning the to_reader and to_inflate queues. The size of
2231 * these queues is directly related to the amount of block
2232 * read-ahead possible. To_reader queues block read requests to
2233 * the reader thread and to_inflate queues block decompression
2234 * requests to the inflate thread(s) (once the block has been read by
2235 * the reader thread). The amount of read-ahead is determined by
2236 * the combined size of the data_block and fragment caches which
2237 * determine the total number of blocks which can be "in flight"
2238 * at any one time (either being read or being decompressed)
2239 *
2240 * The maximum file open limit, however, affects the read-ahead
2241 * possible, in that for normal sizes of the fragment and data block
2242 * caches, where the incoming files have few data blocks or one fragment
2243 * only, the file open limit is likely to be reached before the
2244 * caches are full. This means the worst case sizing of the combined
2245 * sizes of the caches is unlikely to ever be necessary. However, is is
2246 * obvious read-ahead up to the data block cache size is always possible
2247 * irrespective of the file open limit, because a single file could
2248 * contain that number of blocks.
2249 *
2250 * Choosing the size as "file open limit + data block cache size" seems
2251 * to be a reasonable estimate. We can reasonably assume the maximum
2252 * likely read-ahead possible is data block cache size + one fragment
2253 * per open file.
2254 *
2255 * dimensioning the to_writer queue. The size of this queue is
2256 * directly related to the amount of block read-ahead possible.
2257 * However, unlike the to_reader and to_inflate queues, this is
2258 * complicated by the fact the to_writer queue not only contains
2259 * entries for fragments and data_blocks but it also contains
2260 * file entries, one per open file in the read-ahead.
2261 *
2262 * Choosing the size as "2 * (file open limit) +
2263 * data block cache size" seems to be a reasonable estimate.
2264 * We can reasonably assume the maximum likely read-ahead possible
2265 * is data block cache size + one fragment per open file, and then
2266 * we will have a file_entry for each open file.
2267 */
2268 res = getrlimit(RLIMIT_NOFILE, &rlim);
2269 if (res == -1) {
2270 ERROR("failed to get open file limit! Defaulting to 1\n");
2271 rlim.rlim_cur = 1;
2272 }
2273
2274 if (rlim.rlim_cur != RLIM_INFINITY) {
2275 /*
2276 * leave OPEN_FILE_MARGIN free (rlim_cur includes fds used by
2277 * stdin, stdout, stderr and filesystem fd
2278 */
2279 if (rlim.rlim_cur <= OPEN_FILE_MARGIN)
2280 /* no margin, use minimum possible */
2281 max_files = 1;
2282 else
2283 max_files = rlim.rlim_cur - OPEN_FILE_MARGIN;
2284 } else
2285 max_files = -1;
2286
2287 /* set amount of available files for use by open_wait and close_wake */
2288 open_init(max_files);
2289
2290 /*
2291 * allocate to_reader, to_inflate and to_writer queues. Set based on
2292 * open file limit and cache size, unless open file limit is unlimited,
2293 * in which case set purely based on cache limits
2294 *
2295 * In doing so, check that the user supplied values do not overflow
2296 * a signed int
2297 */
2298 if (max_files != -1) {
2299 if(add_overflow(data_buffer_size, max_files) ||
2300 add_overflow(data_buffer_size, max_files * 2))
2301 EXIT_UNSQUASH("Data queue size is too large\n");
2302
2303 to_reader = queue_init(max_files + data_buffer_size);
2304 to_inflate = queue_init(max_files + data_buffer_size);
2305 to_writer = queue_init(max_files * 2 + data_buffer_size);
2306 } else {
2307 int all_buffers_size;
2308
2309 if(add_overflow(fragment_buffer_size, data_buffer_size))
2310 EXIT_UNSQUASH("Data and fragment queues combined are"
2311 " too large\n");
2312
2313 all_buffers_size = fragment_buffer_size + data_buffer_size;
2314
2315 if(add_overflow(all_buffers_size, all_buffers_size))
2316 EXIT_UNSQUASH("Data and fragment queues combined are"
2317 " too large\n");
2318
2319 to_reader = queue_init(all_buffers_size);
2320 to_inflate = queue_init(all_buffers_size);
2321 to_writer = queue_init(all_buffers_size * 2);
2322 }
2323
2324 from_writer = queue_init(1);
2325
2326 fragment_cache = cache_init(block_size, fragment_buffer_size);
2327 data_cache = cache_init(block_size, data_buffer_size);
2328 pthread_create(&thread[0], NULL, reader, NULL);
2329 pthread_create(&thread[1], NULL, writer, NULL);
2330 pthread_create(&thread[2], NULL, progress_thread, NULL);
2331 init_info();
2332 pthread_mutex_init(&fragment_mutex, NULL);
2333
2334 for(i = 0; i < processors; i++) {
2335 if(pthread_create(&inflator_thread[i], NULL, inflator, NULL) !=
2336 0)
2337 EXIT_UNSQUASH("Failed to create thread\n");
2338 }
2339
2340 printf("Parallel unsquashfs: Using %d processor%s\n", processors,
2341 processors == 1 ? "" : "s");
2342
2343 if(pthread_sigmask(SIG_SETMASK, &old_mask, NULL) == -1)
2344 EXIT_UNSQUASH("Failed to set signal mask in initialise_threads"
2345 "\n");
2346 }
2347
2348
enable_progress_bar()2349 void enable_progress_bar()
2350 {
2351 pthread_mutex_lock(&screen_mutex);
2352 progress_enabled = progress;
2353 pthread_mutex_unlock(&screen_mutex);
2354 }
2355
2356
disable_progress_bar()2357 void disable_progress_bar()
2358 {
2359 pthread_mutex_lock(&screen_mutex);
2360 if(progress_enabled) {
2361 progress_bar(sym_count + dev_count + fifo_count + cur_blocks,
2362 total_inodes - total_files + total_blocks, columns);
2363 printf("\n");
2364 }
2365 progress_enabled = FALSE;
2366 pthread_mutex_unlock(&screen_mutex);
2367 }
2368
2369
progressbar_error(char * fmt,...)2370 void progressbar_error(char *fmt, ...)
2371 {
2372 va_list ap;
2373
2374 pthread_mutex_lock(&screen_mutex);
2375
2376 if(progress_enabled)
2377 fprintf(stderr, "\n");
2378
2379 va_start(ap, fmt);
2380 vfprintf(stderr, fmt, ap);
2381 va_end(ap);
2382
2383 pthread_mutex_unlock(&screen_mutex);
2384 }
2385
2386
progressbar_info(char * fmt,...)2387 void progressbar_info(char *fmt, ...)
2388 {
2389 va_list ap;
2390
2391 pthread_mutex_lock(&screen_mutex);
2392
2393 if(progress_enabled)
2394 printf("\n");
2395
2396 va_start(ap, fmt);
2397 vprintf(fmt, ap);
2398 va_end(ap);
2399
2400 pthread_mutex_unlock(&screen_mutex);
2401 }
2402
progress_bar(long long current,long long max,int columns)2403 void progress_bar(long long current, long long max, int columns)
2404 {
2405 char rotate_list[] = { '|', '/', '-', '\\' };
2406 int max_digits, used, hashes, spaces;
2407 static int tty = -1;
2408
2409 if(max == 0)
2410 return;
2411
2412 max_digits = floor(log10(max)) + 1;
2413 used = max_digits * 2 + 11;
2414 hashes = (current * (columns - used)) / max;
2415 spaces = columns - used - hashes;
2416
2417 if((current > max) || (columns - used < 0))
2418 return;
2419
2420 if(tty == -1)
2421 tty = isatty(STDOUT_FILENO);
2422 if(!tty) {
2423 static long long previous = -1;
2424
2425 /*
2426 * Updating much more frequently than this results in huge
2427 * log files.
2428 */
2429 if((current % 100) != 0 && current != max)
2430 return;
2431 /* Don't update just to rotate the spinner. */
2432 if(current == previous)
2433 return;
2434 previous = current;
2435 }
2436
2437 printf("\r[");
2438
2439 while (hashes --)
2440 putchar('=');
2441
2442 putchar(rotate_list[rotate]);
2443
2444 while(spaces --)
2445 putchar(' ');
2446
2447 printf("] %*lld/%*lld", max_digits, current, max_digits, max);
2448 printf(" %3lld%%", current * 100 / max);
2449 fflush(stdout);
2450 }
2451
2452
parse_number(char * arg,int * res)2453 int parse_number(char *arg, int *res)
2454 {
2455 char *b;
2456 long number = strtol(arg, &b, 10);
2457
2458 /* check for trailing junk after number */
2459 if(*b != '\0')
2460 return 0;
2461
2462 /*
2463 * check for strtol underflow or overflow in conversion.
2464 * Note: strtol can validly return LONG_MIN and LONG_MAX
2465 * if the user entered these values, but, additional code
2466 * to distinguish this scenario is unnecessary, because for
2467 * our purposes LONG_MIN and LONG_MAX are too large anyway
2468 */
2469 if(number == LONG_MIN || number == LONG_MAX)
2470 return 0;
2471
2472 /* reject negative numbers as invalid */
2473 if(number < 0)
2474 return 0;
2475
2476 /* check if long result will overflow signed int */
2477 if(number > INT_MAX)
2478 return 0;
2479
2480 *res = number;
2481 return 1;
2482 }
2483
2484
2485 #define VERSION() \
2486 printf("unsquashfs version 4.3 (2014/05/12)\n");\
2487 printf("copyright (C) 2014 Phillip Lougher "\
2488 "<phillip@squashfs.org.uk>\n\n");\
2489 printf("This program is free software; you can redistribute it and/or"\
2490 "\n");\
2491 printf("modify it under the terms of the GNU General Public License"\
2492 "\n");\
2493 printf("as published by the Free Software Foundation; either version "\
2494 "2,\n");\
2495 printf("or (at your option) any later version.\n\n");\
2496 printf("This program is distributed in the hope that it will be "\
2497 "useful,\n");\
2498 printf("but WITHOUT ANY WARRANTY; without even the implied warranty of"\
2499 "\n");\
2500 printf("MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the"\
2501 "\n");\
2502 printf("GNU General Public License for more details.\n");
main(int argc,char * argv[])2503 int main(int argc, char *argv[])
2504 {
2505 char *dest = "squashfs-root";
2506 int i, stat_sys = FALSE, version = FALSE;
2507 int n;
2508 struct pathnames *paths = NULL;
2509 struct pathname *path = NULL;
2510 long long directory_table_end;
2511 int fragment_buffer_size = FRAGMENT_BUFFER_DEFAULT;
2512 int data_buffer_size = DATA_BUFFER_DEFAULT;
2513
2514 pthread_mutex_init(&screen_mutex, NULL);
2515 root_process = geteuid() == 0;
2516 if(root_process)
2517 umask(0);
2518
2519 for(i = 1; i < argc; i++) {
2520 if(*argv[i] != '-')
2521 break;
2522 if(strcmp(argv[i], "-version") == 0 ||
2523 strcmp(argv[i], "-v") == 0) {
2524 VERSION();
2525 version = TRUE;
2526 } else if(strcmp(argv[i], "-info") == 0 ||
2527 strcmp(argv[i], "-i") == 0)
2528 info = TRUE;
2529 else if(strcmp(argv[i], "-ls") == 0 ||
2530 strcmp(argv[i], "-l") == 0)
2531 lsonly = TRUE;
2532 else if(strcmp(argv[i], "-no-progress") == 0 ||
2533 strcmp(argv[i], "-n") == 0)
2534 progress = FALSE;
2535 else if(strcmp(argv[i], "-no-xattrs") == 0 ||
2536 strcmp(argv[i], "-no") == 0)
2537 no_xattrs = TRUE;
2538 else if(strcmp(argv[i], "-xattrs") == 0 ||
2539 strcmp(argv[i], "-x") == 0)
2540 no_xattrs = FALSE;
2541 else if(strcmp(argv[i], "-user-xattrs") == 0 ||
2542 strcmp(argv[i], "-u") == 0) {
2543 user_xattrs = TRUE;
2544 no_xattrs = FALSE;
2545 } else if(strcmp(argv[i], "-dest") == 0 ||
2546 strcmp(argv[i], "-d") == 0) {
2547 if(++i == argc) {
2548 fprintf(stderr, "%s: -dest missing filename\n",
2549 argv[0]);
2550 exit(1);
2551 }
2552 dest = argv[i];
2553 } else if(strcmp(argv[i], "-processors") == 0 ||
2554 strcmp(argv[i], "-p") == 0) {
2555 if((++i == argc) ||
2556 !parse_number(argv[i],
2557 &processors)) {
2558 ERROR("%s: -processors missing or invalid "
2559 "processor number\n", argv[0]);
2560 exit(1);
2561 }
2562 if(processors < 1) {
2563 ERROR("%s: -processors should be 1 or larger\n",
2564 argv[0]);
2565 exit(1);
2566 }
2567 } else if(strcmp(argv[i], "-data-queue") == 0 ||
2568 strcmp(argv[i], "-da") == 0) {
2569 if((++i == argc) ||
2570 !parse_number(argv[i],
2571 &data_buffer_size)) {
2572 ERROR("%s: -data-queue missing or invalid "
2573 "queue size\n", argv[0]);
2574 exit(1);
2575 }
2576 if(data_buffer_size < 1) {
2577 ERROR("%s: -data-queue should be 1 Mbyte or "
2578 "larger\n", argv[0]);
2579 exit(1);
2580 }
2581 } else if(strcmp(argv[i], "-frag-queue") == 0 ||
2582 strcmp(argv[i], "-fr") == 0) {
2583 if((++i == argc) ||
2584 !parse_number(argv[i],
2585 &fragment_buffer_size)) {
2586 ERROR("%s: -frag-queue missing or invalid "
2587 "queue size\n", argv[0]);
2588 exit(1);
2589 }
2590 if(fragment_buffer_size < 1) {
2591 ERROR("%s: -frag-queue should be 1 Mbyte or "
2592 "larger\n", argv[0]);
2593 exit(1);
2594 }
2595 } else if(strcmp(argv[i], "-force") == 0 ||
2596 strcmp(argv[i], "-f") == 0)
2597 force = TRUE;
2598 else if(strcmp(argv[i], "-stat") == 0 ||
2599 strcmp(argv[i], "-s") == 0)
2600 stat_sys = TRUE;
2601 else if(strcmp(argv[i], "-lls") == 0 ||
2602 strcmp(argv[i], "-ll") == 0) {
2603 lsonly = TRUE;
2604 short_ls = FALSE;
2605 } else if(strcmp(argv[i], "-linfo") == 0 ||
2606 strcmp(argv[i], "-li") == 0) {
2607 info = TRUE;
2608 short_ls = FALSE;
2609 } else if(strcmp(argv[i], "-ef") == 0 ||
2610 strcmp(argv[i], "-e") == 0) {
2611 if(++i == argc) {
2612 fprintf(stderr, "%s: -ef missing filename\n",
2613 argv[0]);
2614 exit(1);
2615 }
2616 path = process_extract_files(path, argv[i]);
2617 } else if(strcmp(argv[i], "-regex") == 0 ||
2618 strcmp(argv[i], "-r") == 0)
2619 use_regex = TRUE;
2620 else
2621 goto options;
2622 }
2623
2624 if(lsonly || info)
2625 progress = FALSE;
2626
2627 #ifdef SQUASHFS_TRACE
2628 /*
2629 * Disable progress bar if full debug tracing is enabled.
2630 * The progress bar in this case just gets in the way of the
2631 * debug trace output
2632 */
2633 progress = FALSE;
2634 #endif
2635
2636 if(i == argc) {
2637 if(!version) {
2638 options:
2639 ERROR("SYNTAX: %s [options] filesystem [directories or "
2640 "files to extract]\n", argv[0]);
2641 ERROR("\t-v[ersion]\t\tprint version, licence and "
2642 "copyright information\n");
2643 ERROR("\t-d[est] <pathname>\tunsquash to <pathname>, "
2644 "default \"squashfs-root\"\n");
2645 ERROR("\t-n[o-progress]\t\tdon't display the progress "
2646 "bar\n");
2647 ERROR("\t-no[-xattrs]\t\tdon't extract xattrs in file system"
2648 NOXOPT_STR"\n");
2649 ERROR("\t-x[attrs]\t\textract xattrs in file system"
2650 XOPT_STR "\n");
2651 ERROR("\t-u[ser-xattrs]\t\tonly extract user xattrs in "
2652 "file system.\n\t\t\t\tEnables extracting "
2653 "xattrs\n");
2654 ERROR("\t-p[rocessors] <number>\tuse <number> "
2655 "processors. By default will use\n");
2656 ERROR("\t\t\t\tnumber of processors available\n");
2657 ERROR("\t-i[nfo]\t\t\tprint files as they are "
2658 "unsquashed\n");
2659 ERROR("\t-li[nfo]\t\tprint files as they are "
2660 "unsquashed with file\n");
2661 ERROR("\t\t\t\tattributes (like ls -l output)\n");
2662 ERROR("\t-l[s]\t\t\tlist filesystem, but don't unsquash"
2663 "\n");
2664 ERROR("\t-ll[s]\t\t\tlist filesystem with file "
2665 "attributes (like\n");
2666 ERROR("\t\t\t\tls -l output), but don't unsquash\n");
2667 ERROR("\t-f[orce]\t\tif file already exists then "
2668 "overwrite\n");
2669 ERROR("\t-s[tat]\t\t\tdisplay filesystem superblock "
2670 "information\n");
2671 ERROR("\t-e[f] <extract file>\tlist of directories or "
2672 "files to extract.\n\t\t\t\tOne per line\n");
2673 ERROR("\t-da[ta-queue] <size>\tSet data queue to "
2674 "<size> Mbytes. Default %d\n\t\t\t\tMbytes\n",
2675 DATA_BUFFER_DEFAULT);
2676 ERROR("\t-fr[ag-queue] <size>\tSet fragment queue to "
2677 "<size> Mbytes. Default\n\t\t\t\t%d Mbytes\n",
2678 FRAGMENT_BUFFER_DEFAULT);
2679 ERROR("\t-r[egex]\t\ttreat extract names as POSIX "
2680 "regular expressions\n");
2681 ERROR("\t\t\t\trather than use the default shell "
2682 "wildcard\n\t\t\t\texpansion (globbing)\n");
2683 ERROR("\nDecompressors available:\n");
2684 display_compressors("", "");
2685 }
2686 exit(1);
2687 }
2688
2689 for(n = i + 1; n < argc; n++)
2690 path = add_path(path, argv[n], argv[n]);
2691
2692 if((fd = open(argv[i], O_RDONLY)) == -1) {
2693 ERROR("Could not open %s, because %s\n", argv[i],
2694 strerror(errno));
2695 exit(1);
2696 }
2697
2698 if(read_super(argv[i]) == FALSE)
2699 exit(1);
2700
2701 if(stat_sys) {
2702 squashfs_stat(argv[i]);
2703 exit(0);
2704 }
2705
2706 if(!check_compression(comp))
2707 exit(1);
2708
2709 block_size = sBlk.s.block_size;
2710 block_log = sBlk.s.block_log;
2711
2712 /*
2713 * Sanity check block size and block log.
2714 *
2715 * Check they're within correct limits
2716 */
2717 if(block_size > SQUASHFS_FILE_MAX_SIZE ||
2718 block_log > SQUASHFS_FILE_MAX_LOG)
2719 EXIT_UNSQUASH("Block size or block_log too large."
2720 " File system is corrupt.\n");
2721
2722 /*
2723 * Check block_size and block_log match
2724 */
2725 if(block_size != (1 << block_log))
2726 EXIT_UNSQUASH("Block size and block_log do not match."
2727 " File system is corrupt.\n");
2728
2729 /*
2730 * convert from queue size in Mbytes to queue size in
2731 * blocks.
2732 *
2733 * In doing so, check that the user supplied values do not
2734 * overflow a signed int
2735 */
2736 if(shift_overflow(fragment_buffer_size, 20 - block_log))
2737 EXIT_UNSQUASH("Fragment queue size is too large\n");
2738 else
2739 fragment_buffer_size <<= 20 - block_log;
2740
2741 if(shift_overflow(data_buffer_size, 20 - block_log))
2742 EXIT_UNSQUASH("Data queue size is too large\n");
2743 else
2744 data_buffer_size <<= 20 - block_log;
2745
2746 initialise_threads(fragment_buffer_size, data_buffer_size);
2747
2748 fragment_data = malloc(block_size);
2749 if(fragment_data == NULL)
2750 EXIT_UNSQUASH("failed to allocate fragment_data\n");
2751
2752 file_data = malloc(block_size);
2753 if(file_data == NULL)
2754 EXIT_UNSQUASH("failed to allocate file_data");
2755
2756 data = malloc(block_size);
2757 if(data == NULL)
2758 EXIT_UNSQUASH("failed to allocate data\n");
2759
2760 created_inode = malloc(sBlk.s.inodes * sizeof(char *));
2761 if(created_inode == NULL)
2762 EXIT_UNSQUASH("failed to allocate created_inode\n");
2763
2764 memset(created_inode, 0, sBlk.s.inodes * sizeof(char *));
2765
2766 if(s_ops.read_uids_guids() == FALSE)
2767 EXIT_UNSQUASH("failed to uid/gid table\n");
2768
2769 if(s_ops.read_fragment_table(&directory_table_end) == FALSE)
2770 EXIT_UNSQUASH("failed to read fragment table\n");
2771
2772 if(read_inode_table(sBlk.s.inode_table_start,
2773 sBlk.s.directory_table_start) == FALSE)
2774 EXIT_UNSQUASH("failed to read inode table\n");
2775
2776 if(read_directory_table(sBlk.s.directory_table_start,
2777 directory_table_end) == FALSE)
2778 EXIT_UNSQUASH("failed to read directory table\n");
2779
2780 if(no_xattrs)
2781 sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
2782
2783 if(read_xattrs_from_disk(fd, &sBlk.s) == 0)
2784 EXIT_UNSQUASH("failed to read the xattr table\n");
2785
2786 if(path) {
2787 paths = init_subdir();
2788 paths = add_subdir(paths, path);
2789 }
2790
2791 pre_scan(dest, SQUASHFS_INODE_BLK(sBlk.s.root_inode),
2792 SQUASHFS_INODE_OFFSET(sBlk.s.root_inode), paths);
2793
2794 memset(created_inode, 0, sBlk.s.inodes * sizeof(char *));
2795 inode_number = 1;
2796
2797 printf("%d inodes (%d blocks) to write\n\n", total_inodes,
2798 total_inodes - total_files + total_blocks);
2799
2800 enable_progress_bar();
2801
2802 dir_scan(dest, SQUASHFS_INODE_BLK(sBlk.s.root_inode),
2803 SQUASHFS_INODE_OFFSET(sBlk.s.root_inode), paths);
2804
2805 queue_put(to_writer, NULL);
2806 queue_get(from_writer);
2807
2808 disable_progress_bar();
2809
2810 if(!lsonly) {
2811 printf("\n");
2812 printf("created %d files\n", file_count);
2813 printf("created %d directories\n", dir_count);
2814 printf("created %d symlinks\n", sym_count);
2815 printf("created %d devices\n", dev_count);
2816 printf("created %d fifos\n", fifo_count);
2817 }
2818
2819 return 0;
2820 }
2821