1 /*
2 * Create a squashfs filesystem. This is a highly compressed read only
3 * filesystem.
4 *
5 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
6 * 2012, 2013, 2014
7 * Phillip Lougher <phillip@squashfs.org.uk>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2,
12 * or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * mksquashfs.c
24 */
25
26 #define FALSE 0
27 #define TRUE 1
28 #define MAX_LINE 16384
29
30 #include <pwd.h>
31 #include <grp.h>
32 #include <time.h>
33 #include <unistd.h>
34 #include <stdio.h>
35 #include <stddef.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <fcntl.h>
39 #include <errno.h>
40 #include <dirent.h>
41 #include <string.h>
42 #include <stdlib.h>
43 #include <signal.h>
44 #include <setjmp.h>
45 #include <sys/types.h>
46 #include <sys/mman.h>
47 #include <pthread.h>
48 #include <regex.h>
49 #include <fnmatch.h>
50 #include <sys/wait.h>
51 #include <limits.h>
52 #include <ctype.h>
53
54 #ifndef FNM_EXTMATCH /* glibc extension */
55 #define FNM_EXTMATCH 0
56 #endif
57
58 #ifndef linux
59 #define __BYTE_ORDER BYTE_ORDER
60 #define __BIG_ENDIAN BIG_ENDIAN
61 #define __LITTLE_ENDIAN LITTLE_ENDIAN
62 #include <sys/sysctl.h>
63 #else
64 #include <endian.h>
65 #include <sys/sysinfo.h>
66 #endif
67
68 #include "squashfs_fs.h"
69 #include "squashfs_swap.h"
70 #include "mksquashfs.h"
71 #include "sort.h"
72 #include "pseudo.h"
73 #include "compressor.h"
74 #include "xattr.h"
75 #include "action.h"
76 #include "error.h"
77 #include "progressbar.h"
78 #include "info.h"
79 #include "caches-queues-lists.h"
80 #include "read_fs.h"
81 #include "restore.h"
82 #include "process_fragments.h"
83
84 /* ANDROID CHANGES START*/
85 #ifdef ANDROID
86 #include "android.h"
87 #include "private/android_filesystem_config.h"
88 #include "private/canned_fs_config.h"
89 int android_config = FALSE;
90 char *context_file = NULL;
91 char *mount_point = NULL;
92 char *target_out_path = NULL;
93 fs_config_func_t fs_config_func = NULL;
94 int compress_thresh_per = 0;
95 int align_4k_blocks = TRUE;
96 FILE *block_map_file = NULL;
97 #endif
98 /* ANDROID CHANGES END */
99
100 int delete = FALSE;
101 int fd;
102 struct squashfs_super_block sBlk;
103
104 /* filesystem flags for building */
105 int comp_opts = FALSE;
106 int no_xattrs = XATTR_DEF;
107 int noX = FALSE;
108 int duplicate_checking = TRUE;
109 int noF = FALSE;
110 int no_fragments = FALSE;
111 int always_use_fragments = FALSE;
112 int noI = FALSE;
113 int noD = FALSE;
114 int silent = TRUE;
115 int exportable = TRUE;
116 int sparse_files = TRUE;
117 int old_exclude = TRUE;
118 int use_regex = FALSE;
119 int nopad = FALSE;
120 int exit_on_error = FALSE;
121
122 long long global_uid = -1, global_gid = -1;
123
124 /* superblock attributes */
125 int block_size = SQUASHFS_FILE_SIZE, block_log;
126 unsigned int id_count = 0;
127 int file_count = 0, sym_count = 0, dev_count = 0, dir_count = 0, fifo_count = 0,
128 sock_count = 0;
129
130 /* ANDROID CHANGES START*/
131 #ifdef ANDROID
132 int whitelisted_count = 0;
133 #endif
134 /* ANDROID CHANGES END */
135
136 /* write position within data section */
137 long long bytes = 0, total_bytes = 0;
138
139 /* in memory directory table - possibly compressed */
140 char *directory_table = NULL;
141 unsigned int directory_bytes = 0, directory_size = 0, total_directory_bytes = 0;
142
143 /* cached directory table */
144 char *directory_data_cache = NULL;
145 unsigned int directory_cache_bytes = 0, directory_cache_size = 0;
146
147 /* in memory inode table - possibly compressed */
148 char *inode_table = NULL;
149 unsigned int inode_bytes = 0, inode_size = 0, total_inode_bytes = 0;
150
151 /* cached inode table */
152 char *data_cache = NULL;
153 unsigned int cache_bytes = 0, cache_size = 0, inode_count = 0;
154
155 /* inode lookup table */
156 squashfs_inode *inode_lookup_table = NULL;
157
158 /* in memory directory data */
159 #define I_COUNT_SIZE 128
160 #define DIR_ENTRIES 32
161 #define INODE_HASH_SIZE 65536
162 #define INODE_HASH_MASK (INODE_HASH_SIZE - 1)
163 #define INODE_HASH(dev, ino) (ino & INODE_HASH_MASK)
164
165 struct cached_dir_index {
166 struct squashfs_dir_index index;
167 char *name;
168 };
169
170 struct directory {
171 unsigned int start_block;
172 unsigned int size;
173 unsigned char *buff;
174 unsigned char *p;
175 unsigned int entry_count;
176 unsigned char *entry_count_p;
177 unsigned int i_count;
178 unsigned int i_size;
179 struct cached_dir_index *index;
180 unsigned char *index_count_p;
181 unsigned int inode_number;
182 };
183
184 struct inode_info *inode_info[INODE_HASH_SIZE];
185
186 /* hash tables used to do fast duplicate searches in duplicate check */
187 struct file_info *dupl[65536];
188 int dup_files = 0;
189
190 /* exclude file handling */
191 /* list of exclude dirs/files */
192 struct exclude_info {
193 dev_t st_dev;
194 ino_t st_ino;
195 };
196
197 #define EXCLUDE_SIZE 8192
198 int exclude = 0;
199 struct exclude_info *exclude_paths = NULL;
200 int old_excluded(char *filename, struct stat *buf);
201
202 struct path_entry {
203 char *name;
204 regex_t *preg;
205 struct pathname *paths;
206 };
207
208 struct pathname {
209 int names;
210 struct path_entry *name;
211 };
212
213 struct pathnames {
214 int count;
215 struct pathname *path[0];
216 };
217 #define PATHS_ALLOC_SIZE 10
218
219 struct pathnames *paths = NULL;
220 struct pathname *path = NULL;
221 struct pathname *stickypath = NULL;
222 int excluded(char *name, struct pathnames *paths, struct pathnames **new);
223
224 int fragments = 0;
225
226 #define FRAG_SIZE 32768
227
228 struct squashfs_fragment_entry *fragment_table = NULL;
229 int fragments_outstanding = 0;
230
231 int fragments_locked = FALSE;
232
233 /* current inode number for directories and non directories */
234 unsigned int inode_no = 1;
235 unsigned int root_inode_number = 0;
236
237 /* list of source dirs/files */
238 int source = 0;
239 char **source_path;
240
241 /* list of root directory entries read from original filesystem */
242 int old_root_entries = 0;
243 struct old_root_entry_info {
244 char *name;
245 struct inode_info inode;
246 };
247 struct old_root_entry_info *old_root_entry;
248
249 /* restore orignal filesystem state if appending to existing filesystem is
250 * cancelled */
251 int appending = FALSE;
252 char *sdata_cache, *sdirectory_data_cache, *sdirectory_compressed;
253
254 long long sbytes, stotal_bytes;
255
256 unsigned int sinode_bytes, scache_bytes, sdirectory_bytes,
257 sdirectory_cache_bytes, sdirectory_compressed_bytes,
258 stotal_inode_bytes, stotal_directory_bytes,
259 sinode_count = 0, sfile_count, ssym_count, sdev_count,
260 sdir_count, sfifo_count, ssock_count, sdup_files;
261 int sfragments;
262 int threads;
263
264 /* flag whether destination file is a block device */
265 int block_device = FALSE;
266
267 /* flag indicating whether files are sorted using sort list(s) */
268 int sorted = FALSE;
269
270 /* save destination file name for deleting on error */
271 char *destination_file = NULL;
272
273 /* recovery file for abnormal exit on appending */
274 char *recovery_file = NULL;
275 int recover = TRUE;
276
277 struct id *id_hash_table[ID_ENTRIES];
278 struct id *id_table[SQUASHFS_IDS], *sid_table[SQUASHFS_IDS];
279 unsigned int uid_count = 0, guid_count = 0;
280 unsigned int sid_count = 0, suid_count = 0, sguid_count = 0;
281
282 struct cache *reader_buffer, *fragment_buffer, *reserve_cache;
283 struct cache *bwriter_buffer, *fwriter_buffer;
284 struct queue *to_reader, *to_deflate, *to_writer, *from_writer,
285 *to_frag, *locked_fragment, *to_process_frag;
286 struct seq_queue *to_main;
287 pthread_t reader_thread, writer_thread, main_thread;
288 pthread_t *deflator_thread, *frag_deflator_thread, *frag_thread;
289 pthread_t *restore_thread = NULL;
290 pthread_mutex_t fragment_mutex = PTHREAD_MUTEX_INITIALIZER;
291 pthread_mutex_t pos_mutex = PTHREAD_MUTEX_INITIALIZER;
292 pthread_mutex_t dup_mutex = PTHREAD_MUTEX_INITIALIZER;
293
294 /* user options that control parallelisation */
295 int processors = -1;
296 int bwriter_size;
297
298 /* compression operations */
299 struct compressor *comp = NULL;
300 int compressor_opt_parsed = FALSE;
301 void *stream = NULL;
302
303 /* xattr stats */
304 unsigned int xattr_bytes = 0, total_xattr_bytes = 0;
305
306 /* fragment to file mapping used when appending */
307 int append_fragments = 0;
308 struct append_file **file_mapping;
309
310 /* root of the in-core directory structure */
311 struct dir_info *root_dir;
312
313 static char *read_from_disk(long long start, unsigned int avail_bytes);
314 void add_old_root_entry(char *name, squashfs_inode inode, int inode_number,
315 int type);
316 struct file_info *duplicate(long long file_size, long long bytes,
317 unsigned int **block_list, long long *start, struct fragment **fragment,
318 struct file_buffer *file_buffer, int blocks, unsigned short checksum,
319 int checksum_flag);
320 struct dir_info *dir_scan1(char *, char *, struct pathnames *,
321 struct dir_ent *(_readdir)(struct dir_info *), int);
322 void dir_scan2(struct dir_info *dir, struct pseudo *pseudo);
323 void dir_scan3(struct dir_info *dir);
324 void dir_scan4(struct dir_info *dir);
325 void dir_scan5(struct dir_info *dir);
326 void dir_scan6(struct dir_info *dir);
327 void dir_scan7(squashfs_inode *inode, struct dir_info *dir_info);
328 struct file_info *add_non_dup(long long file_size, long long bytes,
329 unsigned int *block_list, long long start, struct fragment *fragment,
330 unsigned short checksum, unsigned short fragment_checksum,
331 int checksum_flag, int checksum_frag_flag);
332 long long generic_write_table(int, void *, int, void *, int);
333 void restorefs();
334 struct dir_info *scan1_opendir(char *pathname, char *subpath, int depth);
335 void write_filesystem_tables(struct squashfs_super_block *sBlk, int nopad);
336 unsigned short get_checksum_mem(char *buff, int bytes);
337 void check_usable_phys_mem(int total_mem);
338
339 /* ANDROID CHANGES START*/
340 #ifdef ANDROID
341 static int whitelisted(struct stat *buf);
342 static void add_whitelist_entry(char *filename, struct stat *buf);
343 static int add_whitelist(char *path);
344 static void process_whitelist_file(char *argv);
345
346 #define WHITELIST_SIZE 8192
347 int whitelist = 0;
348
349 struct whitelist_info {
350 dev_t st_dev;
351 ino_t st_ino;
352 };
353 char *whitelist_filename = NULL;
354 struct whitelist_info *whitelist_paths = NULL;
355 #endif
356 /* ANDROID CHANGES END */
357
prep_exit()358 void prep_exit()
359 {
360 if(restore_thread) {
361 if(pthread_self() == *restore_thread) {
362 /*
363 * Recursive failure when trying to restore filesystem!
364 * Nothing to do except to exit, otherwise we'll just
365 * appear to hang. The user should be able to restore
366 * from the recovery file (which is why it was added, in
367 * case of catastrophic failure in Mksquashfs)
368 */
369 exit(1);
370 } else {
371 /* signal the restore thread to restore */
372 pthread_kill(*restore_thread, SIGUSR1);
373 pthread_exit(NULL);
374 }
375 } else if(delete) {
376 if(destination_file && !block_device)
377 unlink(destination_file);
378 } else if(recovery_file)
379 unlink(recovery_file);
380 }
381
382
add_overflow(int a,int b)383 int add_overflow(int a, int b)
384 {
385 return (INT_MAX - a) < b;
386 }
387
388
shift_overflow(int a,int shift)389 int shift_overflow(int a, int shift)
390 {
391 return (INT_MAX >> shift) < a;
392 }
393
394
multiply_overflow(int a,int multiplier)395 int multiply_overflow(int a, int multiplier)
396 {
397 return (INT_MAX / multiplier) < a;
398 }
399
400
multiply_overflowll(long long a,int multiplier)401 int multiply_overflowll(long long a, int multiplier)
402 {
403 return (LLONG_MAX / multiplier) < a;
404 }
405
406
407 #define MKINODE(A) ((squashfs_inode)(((squashfs_inode) inode_bytes << 16) \
408 + (((char *)A) - data_cache)))
409
410
restorefs()411 void restorefs()
412 {
413 ERROR("Exiting - restoring original filesystem!\n\n");
414
415 bytes = sbytes;
416 memcpy(data_cache, sdata_cache, cache_bytes = scache_bytes);
417 memcpy(directory_data_cache, sdirectory_data_cache,
418 sdirectory_cache_bytes);
419 directory_cache_bytes = sdirectory_cache_bytes;
420 inode_bytes = sinode_bytes;
421 directory_bytes = sdirectory_bytes;
422 memcpy(directory_table + directory_bytes, sdirectory_compressed,
423 sdirectory_compressed_bytes);
424 directory_bytes += sdirectory_compressed_bytes;
425 total_bytes = stotal_bytes;
426 total_inode_bytes = stotal_inode_bytes;
427 total_directory_bytes = stotal_directory_bytes;
428 inode_count = sinode_count;
429 file_count = sfile_count;
430 sym_count = ssym_count;
431 dev_count = sdev_count;
432 dir_count = sdir_count;
433 fifo_count = sfifo_count;
434 sock_count = ssock_count;
435 dup_files = sdup_files;
436 fragments = sfragments;
437 id_count = sid_count;
438 restore_xattrs();
439 write_filesystem_tables(&sBlk, nopad);
440 exit(1);
441 }
442
443
sighandler()444 void sighandler()
445 {
446 EXIT_MKSQUASHFS();
447 }
448
449
mangle2(void * strm,char * d,char * s,int size,int block_size,int uncompressed,int data_block)450 int mangle2(void *strm, char *d, char *s, int size,
451 int block_size, int uncompressed, int data_block)
452 {
453 int error, c_byte = 0;
454
455 if(!uncompressed) {
456 c_byte = compressor_compress(comp, strm, d, s, size, block_size,
457 &error);
458 if(c_byte == -1)
459 BAD_ERROR("mangle2:: %s compress failed with error "
460 "code %d\n", comp->name, error);
461 }
462
463 if(c_byte == 0 || c_byte >= size ||
464 (c_byte > (size * ((100.0 - compress_thresh_per) / 100.0)))) {
465 memcpy(d, s, size);
466 return size | (data_block ? SQUASHFS_COMPRESSED_BIT_BLOCK :
467 SQUASHFS_COMPRESSED_BIT);
468 }
469
470 return c_byte;
471 }
472
473
mangle(char * d,char * s,int size,int block_size,int uncompressed,int data_block)474 int mangle(char *d, char *s, int size, int block_size,
475 int uncompressed, int data_block)
476 {
477 return mangle2(stream, d, s, size, block_size, uncompressed,
478 data_block);
479 }
480
481
get_inode(int req_size)482 void *get_inode(int req_size)
483 {
484 int data_space;
485 unsigned short c_byte;
486
487 while(cache_bytes >= SQUASHFS_METADATA_SIZE) {
488 if((inode_size - inode_bytes) <
489 ((SQUASHFS_METADATA_SIZE << 1)) + 2) {
490 void *it = realloc(inode_table, inode_size +
491 (SQUASHFS_METADATA_SIZE << 1) + 2);
492 if(it == NULL)
493 MEM_ERROR();
494 inode_table = it;
495 inode_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
496 }
497
498 c_byte = mangle(inode_table + inode_bytes + BLOCK_OFFSET,
499 data_cache, SQUASHFS_METADATA_SIZE,
500 SQUASHFS_METADATA_SIZE, noI, 0);
501 TRACE("Inode block @ 0x%x, size %d\n", inode_bytes, c_byte);
502 SQUASHFS_SWAP_SHORTS(&c_byte, inode_table + inode_bytes, 1);
503 inode_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + BLOCK_OFFSET;
504 total_inode_bytes += SQUASHFS_METADATA_SIZE + BLOCK_OFFSET;
505 memmove(data_cache, data_cache + SQUASHFS_METADATA_SIZE,
506 cache_bytes - SQUASHFS_METADATA_SIZE);
507 cache_bytes -= SQUASHFS_METADATA_SIZE;
508 }
509
510 data_space = (cache_size - cache_bytes);
511 if(data_space < req_size) {
512 int realloc_size = cache_size == 0 ?
513 ((req_size + SQUASHFS_METADATA_SIZE) &
514 ~(SQUASHFS_METADATA_SIZE - 1)) : req_size -
515 data_space;
516
517 void *dc = realloc(data_cache, cache_size +
518 realloc_size);
519 if(dc == NULL)
520 MEM_ERROR();
521 cache_size += realloc_size;
522 data_cache = dc;
523 }
524
525 cache_bytes += req_size;
526
527 return data_cache + cache_bytes - req_size;
528 }
529
530
read_bytes(int fd,void * buff,int bytes)531 int read_bytes(int fd, void *buff, int bytes)
532 {
533 int res, count;
534
535 for(count = 0; count < bytes; count += res) {
536 res = read(fd, buff + count, bytes - count);
537 if(res < 1) {
538 if(res == 0)
539 goto bytes_read;
540 else if(errno != EINTR) {
541 ERROR("Read failed because %s\n",
542 strerror(errno));
543 return -1;
544 } else
545 res = 0;
546 }
547 }
548
549 bytes_read:
550 return count;
551 }
552
553
read_fs_bytes(int fd,long long byte,int bytes,void * buff)554 int read_fs_bytes(int fd, long long byte, int bytes, void *buff)
555 {
556 off_t off = byte;
557 int res = 1;
558
559 TRACE("read_fs_bytes: reading from position 0x%llx, bytes %d\n",
560 byte, bytes);
561
562 pthread_cleanup_push((void *) pthread_mutex_unlock, &pos_mutex);
563 pthread_mutex_lock(&pos_mutex);
564 if(lseek(fd, off, SEEK_SET) == -1) {
565 ERROR("read_fs_bytes: Lseek on destination failed because %s, "
566 "offset=0x%llx\n", strerror(errno), off);
567 res = 0;
568 } else if(read_bytes(fd, buff, bytes) < bytes) {
569 ERROR("Read on destination failed\n");
570 res = 0;
571 }
572
573 pthread_cleanup_pop(1);
574 return res;
575 }
576
577
write_bytes(int fd,void * buff,int bytes)578 int write_bytes(int fd, void *buff, int bytes)
579 {
580 int res, count;
581
582 for(count = 0; count < bytes; count += res) {
583 res = write(fd, buff + count, bytes - count);
584 if(res == -1) {
585 if(errno != EINTR) {
586 ERROR("Write failed because %s\n",
587 strerror(errno));
588 return -1;
589 }
590 res = 0;
591 }
592 }
593
594 return 0;
595 }
596
597
write_destination(int fd,long long byte,int bytes,void * buff)598 void write_destination(int fd, long long byte, int bytes, void *buff)
599 {
600 off_t off = byte;
601
602 pthread_cleanup_push((void *) pthread_mutex_unlock, &pos_mutex);
603 pthread_mutex_lock(&pos_mutex);
604
605 if(lseek(fd, off, SEEK_SET) == -1) {
606 ERROR("write_destination: Lseek on destination "
607 "failed because %s, offset=0x%llx\n", strerror(errno),
608 off);
609 BAD_ERROR("Probably out of space on output %s\n",
610 block_device ? "block device" : "filesystem");
611 }
612
613 if(write_bytes(fd, buff, bytes) == -1)
614 BAD_ERROR("Failed to write to output %s\n",
615 block_device ? "block device" : "filesystem");
616
617 pthread_cleanup_pop(1);
618 }
619
620
write_inodes()621 long long write_inodes()
622 {
623 unsigned short c_byte;
624 int avail_bytes;
625 char *datap = data_cache;
626 long long start_bytes = bytes;
627
628 while(cache_bytes) {
629 if(inode_size - inode_bytes <
630 ((SQUASHFS_METADATA_SIZE << 1) + 2)) {
631 void *it = realloc(inode_table, inode_size +
632 ((SQUASHFS_METADATA_SIZE << 1) + 2));
633 if(it == NULL)
634 MEM_ERROR();
635 inode_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
636 inode_table = it;
637 }
638 avail_bytes = cache_bytes > SQUASHFS_METADATA_SIZE ?
639 SQUASHFS_METADATA_SIZE : cache_bytes;
640 c_byte = mangle(inode_table + inode_bytes + BLOCK_OFFSET, datap,
641 avail_bytes, SQUASHFS_METADATA_SIZE, noI, 0);
642 TRACE("Inode block @ 0x%x, size %d\n", inode_bytes, c_byte);
643 SQUASHFS_SWAP_SHORTS(&c_byte, inode_table + inode_bytes, 1);
644 inode_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + BLOCK_OFFSET;
645 total_inode_bytes += avail_bytes + BLOCK_OFFSET;
646 datap += avail_bytes;
647 cache_bytes -= avail_bytes;
648 }
649
650 write_destination(fd, bytes, inode_bytes, inode_table);
651 bytes += inode_bytes;
652
653 return start_bytes;
654 }
655
656
write_directories()657 long long write_directories()
658 {
659 unsigned short c_byte;
660 int avail_bytes;
661 char *directoryp = directory_data_cache;
662 long long start_bytes = bytes;
663
664 while(directory_cache_bytes) {
665 if(directory_size - directory_bytes <
666 ((SQUASHFS_METADATA_SIZE << 1) + 2)) {
667 void *dt = realloc(directory_table,
668 directory_size + ((SQUASHFS_METADATA_SIZE << 1)
669 + 2));
670 if(dt == NULL)
671 MEM_ERROR();
672 directory_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
673 directory_table = dt;
674 }
675 avail_bytes = directory_cache_bytes > SQUASHFS_METADATA_SIZE ?
676 SQUASHFS_METADATA_SIZE : directory_cache_bytes;
677 c_byte = mangle(directory_table + directory_bytes +
678 BLOCK_OFFSET, directoryp, avail_bytes,
679 SQUASHFS_METADATA_SIZE, noI, 0);
680 TRACE("Directory block @ 0x%x, size %d\n", directory_bytes,
681 c_byte);
682 SQUASHFS_SWAP_SHORTS(&c_byte,
683 directory_table + directory_bytes, 1);
684 directory_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) +
685 BLOCK_OFFSET;
686 total_directory_bytes += avail_bytes + BLOCK_OFFSET;
687 directoryp += avail_bytes;
688 directory_cache_bytes -= avail_bytes;
689 }
690 write_destination(fd, bytes, directory_bytes, directory_table);
691 bytes += directory_bytes;
692
693 return start_bytes;
694 }
695
696
write_id_table()697 long long write_id_table()
698 {
699 unsigned int id_bytes = SQUASHFS_ID_BYTES(id_count);
700 unsigned int p[id_count];
701 int i;
702
703 TRACE("write_id_table: ids %d, id_bytes %d\n", id_count, id_bytes);
704 for(i = 0; i < id_count; i++) {
705 TRACE("write_id_table: id index %d, id %d", i, id_table[i]->id);
706 SQUASHFS_SWAP_INTS(&id_table[i]->id, p + i, 1);
707 }
708
709 return generic_write_table(id_bytes, p, 0, NULL, noI);
710 }
711
712
get_id(unsigned int id)713 struct id *get_id(unsigned int id)
714 {
715 int hash = ID_HASH(id);
716 struct id *entry = id_hash_table[hash];
717
718 for(; entry; entry = entry->next)
719 if(entry->id == id)
720 break;
721
722 return entry;
723 }
724
725
create_id(unsigned int id)726 struct id *create_id(unsigned int id)
727 {
728 int hash = ID_HASH(id);
729 struct id *entry = malloc(sizeof(struct id));
730 if(entry == NULL)
731 MEM_ERROR();
732 entry->id = id;
733 entry->index = id_count ++;
734 entry->flags = 0;
735 entry->next = id_hash_table[hash];
736 id_hash_table[hash] = entry;
737 id_table[entry->index] = entry;
738 return entry;
739 }
740
741
get_uid(unsigned int uid)742 unsigned int get_uid(unsigned int uid)
743 {
744 struct id *entry = get_id(uid);
745
746 if(entry == NULL) {
747 if(id_count == SQUASHFS_IDS)
748 BAD_ERROR("Out of uids!\n");
749 entry = create_id(uid);
750 }
751
752 if((entry->flags & ISA_UID) == 0) {
753 entry->flags |= ISA_UID;
754 uid_count ++;
755 }
756
757 return entry->index;
758 }
759
760
get_guid(unsigned int guid)761 unsigned int get_guid(unsigned int guid)
762 {
763 struct id *entry = get_id(guid);
764
765 if(entry == NULL) {
766 if(id_count == SQUASHFS_IDS)
767 BAD_ERROR("Out of gids!\n");
768 entry = create_id(guid);
769 }
770
771 if((entry->flags & ISA_GID) == 0) {
772 entry->flags |= ISA_GID;
773 guid_count ++;
774 }
775
776 return entry->index;
777 }
778
779
780 #define ALLOC_SIZE 128
781
_pathname(struct dir_ent * dir_ent,char * pathname,int * size)782 char *_pathname(struct dir_ent *dir_ent, char *pathname, int *size)
783 {
784 if(pathname == NULL) {
785 pathname = malloc(ALLOC_SIZE);
786 if(pathname == NULL)
787 MEM_ERROR();
788 }
789
790 for(;;) {
791 int res = snprintf(pathname, *size, "%s/%s",
792 dir_ent->our_dir->pathname,
793 dir_ent->source_name ? : dir_ent->name);
794
795 if(res < 0)
796 BAD_ERROR("snprintf failed in pathname\n");
797 else if(res >= *size) {
798 /*
799 * pathname is too small to contain the result, so
800 * increase it and try again
801 */
802 *size = (res + ALLOC_SIZE) & ~(ALLOC_SIZE - 1);
803 pathname = realloc(pathname, *size);
804 if(pathname == NULL)
805 MEM_ERROR();
806 } else
807 break;
808 }
809
810 return pathname;
811 }
812
813
pathname(struct dir_ent * dir_ent)814 char *pathname(struct dir_ent *dir_ent)
815 {
816 static char *pathname = NULL;
817 static int size = ALLOC_SIZE;
818
819 if (dir_ent->nonstandard_pathname)
820 return dir_ent->nonstandard_pathname;
821
822 return pathname = _pathname(dir_ent, pathname, &size);
823 }
824
825
pathname_reader(struct dir_ent * dir_ent)826 char *pathname_reader(struct dir_ent *dir_ent)
827 {
828 static char *pathname = NULL;
829 static int size = ALLOC_SIZE;
830
831 if (dir_ent->nonstandard_pathname)
832 return dir_ent->nonstandard_pathname;
833
834 return pathname = _pathname(dir_ent, pathname, &size);
835 }
836
837
subpathname(struct dir_ent * dir_ent)838 char *subpathname(struct dir_ent *dir_ent)
839 {
840 static char *subpath = NULL;
841 static int size = ALLOC_SIZE;
842 int res;
843
844 if(subpath == NULL) {
845 subpath = malloc(ALLOC_SIZE);
846 if(subpath == NULL)
847 MEM_ERROR();
848 }
849
850 for(;;) {
851 if(dir_ent->our_dir->subpath[0] != '\0')
852 res = snprintf(subpath, size, "%s/%s",
853 dir_ent->our_dir->subpath, dir_ent->name);
854 else
855 res = snprintf(subpath, size, "/%s", dir_ent->name);
856
857 if(res < 0)
858 BAD_ERROR("snprintf failed in subpathname\n");
859 else if(res >= size) {
860 /*
861 * subpath is too small to contain the result, so
862 * increase it and try again
863 */
864 size = (res + ALLOC_SIZE) & ~(ALLOC_SIZE - 1);
865 subpath = realloc(subpath, size);
866 if(subpath == NULL)
867 MEM_ERROR();
868 } else
869 break;
870 }
871
872 return subpath;
873 }
874
875
get_inode_no(struct inode_info * inode)876 static inline unsigned int get_inode_no(struct inode_info *inode)
877 {
878 return inode->inode_number;
879 }
880
881
get_parent_no(struct dir_info * dir)882 static inline unsigned int get_parent_no(struct dir_info *dir)
883 {
884 return dir->depth ? get_inode_no(dir->dir_ent->inode) : inode_no;
885 }
886
887
888 /* ANDROID CHANGES START*/
889 #ifdef ANDROID
890
891 /* Round up the passed |n| value to the smallest multiple of 4096 greater or
892 * equal than |n| and return the 4K-block number for that value. */
round_up_block(unsigned long long n)893 static unsigned long long round_up_block(unsigned long long n) {
894 const unsigned long long kMapBlockSize = 4096;
895 return (n + kMapBlockSize - 1) / kMapBlockSize;
896 }
897
write_block_map_entry(char * sub_path,unsigned long long start_block,unsigned long long total_size,char * mount_point,FILE * block_map_file)898 static inline void write_block_map_entry(char *sub_path, unsigned long long start_block, unsigned long long total_size,
899 char * mount_point, FILE *block_map_file) {
900 if (block_map_file) {
901 /* We assign each 4K block based on what file the first byte of the block
902 * belongs to. The current file consists of the chunk of bytes in the
903 * interval [start_block, start_block + total_size), (closed on the left end
904 * and open on the right end). We then compute the first block whose first
905 * byte is equal to or greater than start_block as |round_start| and then
906 * the first block whose first byte is *past* this interval, as
907 * |round_end + 1|. This means that the blocks that should be assigned to
908 * the current file are in the interval [round_start, round_end + 1), or
909 * simply [round_start, round_end].
910 */
911 unsigned long long round_start = round_up_block(start_block);
912 unsigned long long round_end = round_up_block(start_block + total_size) - 1;
913 if (round_start && total_size && round_start <= round_end) {
914 fprintf(block_map_file, "/%s", mount_point);
915 if (sub_path[0] != '/') fprintf(block_map_file, "/");
916 if (round_start == round_end)
917 fprintf(block_map_file, "%s %lld\n", sub_path, round_start);
918 else
919 fprintf(block_map_file, "%s %lld-%lld\n", sub_path, round_start, round_end);
920 }
921 }
922 }
923 #endif
924 /* ANDROID CHANGES END */
925
create_inode(squashfs_inode * i_no,struct dir_info * dir_info,struct dir_ent * dir_ent,int type,long long byte_size,long long start_block,unsigned int offset,unsigned int * block_list,struct fragment * fragment,struct directory * dir_in,long long sparse)926 int create_inode(squashfs_inode *i_no, struct dir_info *dir_info,
927 struct dir_ent *dir_ent, int type, long long byte_size,
928 long long start_block, unsigned int offset, unsigned int *block_list,
929 struct fragment *fragment, struct directory *dir_in, long long sparse)
930 {
931 struct stat *buf = &dir_ent->inode->buf;
932 union squashfs_inode_header inode_header;
933 struct squashfs_base_inode_header *base = &inode_header.base;
934 void *inode;
935 char *filename = pathname(dir_ent);
936 int nlink = dir_ent->inode->nlink;
937 int xattr = read_xattrs(dir_ent);
938
939 switch(type) {
940 case SQUASHFS_FILE_TYPE:
941 if(dir_ent->inode->nlink > 1 ||
942 byte_size >= (1LL << 32) ||
943 start_block >= (1LL << 32) ||
944 sparse || IS_XATTR(xattr))
945 type = SQUASHFS_LREG_TYPE;
946 break;
947 case SQUASHFS_DIR_TYPE:
948 if(dir_info->dir_is_ldir || IS_XATTR(xattr))
949 type = SQUASHFS_LDIR_TYPE;
950 break;
951 case SQUASHFS_SYMLINK_TYPE:
952 if(IS_XATTR(xattr))
953 type = SQUASHFS_LSYMLINK_TYPE;
954 break;
955 case SQUASHFS_BLKDEV_TYPE:
956 if(IS_XATTR(xattr))
957 type = SQUASHFS_LBLKDEV_TYPE;
958 break;
959 case SQUASHFS_CHRDEV_TYPE:
960 if(IS_XATTR(xattr))
961 type = SQUASHFS_LCHRDEV_TYPE;
962 break;
963 case SQUASHFS_FIFO_TYPE:
964 if(IS_XATTR(xattr))
965 type = SQUASHFS_LFIFO_TYPE;
966 break;
967 case SQUASHFS_SOCKET_TYPE:
968 if(IS_XATTR(xattr))
969 type = SQUASHFS_LSOCKET_TYPE;
970 break;
971 }
972
973 base->mode = SQUASHFS_MODE(buf->st_mode);
974 base->uid = get_uid((unsigned int) global_uid == -1 ?
975 buf->st_uid : global_uid);
976 base->inode_type = type;
977 base->guid = get_guid((unsigned int) global_gid == -1 ?
978 buf->st_gid : global_gid);
979 base->mtime = buf->st_mtime;
980 base->inode_number = get_inode_no(dir_ent->inode);
981
982 if(type == SQUASHFS_FILE_TYPE) {
983 int i;
984 struct squashfs_reg_inode_header *reg = &inode_header.reg;
985 size_t off = offsetof(struct squashfs_reg_inode_header, block_list);
986 /* ANDROID CHANGES START*/
987 #ifdef ANDROID
988 unsigned long long total_size = 0;
989 char *sub_path;
990 #endif
991 /* ANDROID CHANGES END */
992
993 inode = get_inode(sizeof(*reg) + offset * sizeof(unsigned int));
994 reg->file_size = byte_size;
995 reg->start_block = start_block;
996 reg->fragment = fragment->index;
997 reg->offset = fragment->offset;
998 SQUASHFS_SWAP_REG_INODE_HEADER(reg, inode);
999 SQUASHFS_SWAP_INTS(block_list, inode + off, offset);
1000 TRACE("File inode, file_size %lld, start_block 0x%llx, blocks "
1001 "%d, fragment %d, offset %d, size %d\n", byte_size,
1002 start_block, offset, fragment->index, fragment->offset,
1003 fragment->size);
1004 for(i = 0; i < offset; i++) {
1005 TRACE("Block %d, size %d\n", i, block_list[i]);
1006 total_size += SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]);
1007 }
1008 /* ANDROID CHANGES START*/
1009 #ifdef ANDROID
1010 sub_path = subpathname(dir_ent);
1011 if (block_map_file && fragment->index == -1) {
1012 write_block_map_entry(sub_path, start_block, total_size, mount_point, block_map_file);
1013 }
1014 #endif
1015 /* ANDROID CHANGES END */
1016 }
1017 else if(type == SQUASHFS_LREG_TYPE) {
1018 /* ANDROID CHANGES START*/
1019 #ifdef ANDROID
1020 unsigned long long total_size = 0;
1021 char *sub_path;
1022 #endif
1023 /* ANDROID CHANGES END */
1024 int i;
1025 struct squashfs_lreg_inode_header *reg = &inode_header.lreg;
1026 size_t off = offsetof(struct squashfs_lreg_inode_header, block_list);
1027
1028 inode = get_inode(sizeof(*reg) + offset * sizeof(unsigned int));
1029 reg->nlink = nlink;
1030 reg->file_size = byte_size;
1031 reg->start_block = start_block;
1032 reg->fragment = fragment->index;
1033 reg->offset = fragment->offset;
1034 if(sparse && sparse >= byte_size)
1035 sparse = byte_size - 1;
1036 reg->sparse = sparse;
1037 reg->xattr = xattr;
1038 SQUASHFS_SWAP_LREG_INODE_HEADER(reg, inode);
1039 SQUASHFS_SWAP_INTS(block_list, inode + off, offset);
1040 TRACE("Long file inode, file_size %lld, start_block 0x%llx, "
1041 "blocks %d, fragment %d, offset %d, size %d, nlink %d"
1042 "\n", byte_size, start_block, offset, fragment->index,
1043 fragment->offset, fragment->size, nlink);
1044 for(i = 0; i < offset; i++) {
1045 TRACE("Block %d, size %d\n", i, block_list[i]);
1046 total_size += SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]);
1047 }
1048 /* ANDROID CHANGES START*/
1049 #ifdef ANDROID
1050 sub_path = subpathname(dir_ent);
1051 if (block_map_file && fragment->index == -1) {
1052 write_block_map_entry(sub_path, start_block, total_size, mount_point, block_map_file);
1053 }
1054 #endif
1055 /* ANDROID CHANGES END */
1056 }
1057 else if(type == SQUASHFS_LDIR_TYPE) {
1058 int i;
1059 unsigned char *p;
1060 struct squashfs_ldir_inode_header *dir = &inode_header.ldir;
1061 struct cached_dir_index *index = dir_in->index;
1062 unsigned int i_count = dir_in->i_count;
1063 unsigned int i_size = dir_in->i_size;
1064
1065 if(byte_size >= 1 << 27)
1066 BAD_ERROR("directory greater than 2^27-1 bytes!\n");
1067
1068 inode = get_inode(sizeof(*dir) + i_size);
1069 dir->inode_type = SQUASHFS_LDIR_TYPE;
1070 dir->nlink = dir_ent->dir->directory_count + 2;
1071 dir->file_size = byte_size;
1072 dir->offset = offset;
1073 dir->start_block = start_block;
1074 dir->i_count = i_count;
1075 dir->parent_inode = get_parent_no(dir_ent->our_dir);
1076 dir->xattr = xattr;
1077
1078 SQUASHFS_SWAP_LDIR_INODE_HEADER(dir, inode);
1079 p = inode + offsetof(struct squashfs_ldir_inode_header, index);
1080 for(i = 0; i < i_count; i++) {
1081 SQUASHFS_SWAP_DIR_INDEX(&index[i].index, p);
1082 p += offsetof(struct squashfs_dir_index, name);
1083 memcpy(p, index[i].name, index[i].index.size + 1);
1084 p += index[i].index.size + 1;
1085 }
1086 TRACE("Long directory inode, file_size %lld, start_block "
1087 "0x%llx, offset 0x%x, nlink %d\n", byte_size,
1088 start_block, offset, dir_ent->dir->directory_count + 2);
1089 }
1090 else if(type == SQUASHFS_DIR_TYPE) {
1091 struct squashfs_dir_inode_header *dir = &inode_header.dir;
1092
1093 inode = get_inode(sizeof(*dir));
1094 dir->nlink = dir_ent->dir->directory_count + 2;
1095 dir->file_size = byte_size;
1096 dir->offset = offset;
1097 dir->start_block = start_block;
1098 dir->parent_inode = get_parent_no(dir_ent->our_dir);
1099 SQUASHFS_SWAP_DIR_INODE_HEADER(dir, inode);
1100 TRACE("Directory inode, file_size %lld, start_block 0x%llx, "
1101 "offset 0x%x, nlink %d\n", byte_size, start_block,
1102 offset, dir_ent->dir->directory_count + 2);
1103 }
1104 else if(type == SQUASHFS_CHRDEV_TYPE || type == SQUASHFS_BLKDEV_TYPE) {
1105 struct squashfs_dev_inode_header *dev = &inode_header.dev;
1106 unsigned int major = major(buf->st_rdev);
1107 unsigned int minor = minor(buf->st_rdev);
1108
1109 if(major > 0xfff) {
1110 ERROR("Major %d out of range in device node %s, "
1111 "truncating to %d\n", major, filename,
1112 major & 0xfff);
1113 major &= 0xfff;
1114 }
1115 if(minor > 0xfffff) {
1116 ERROR("Minor %d out of range in device node %s, "
1117 "truncating to %d\n", minor, filename,
1118 minor & 0xfffff);
1119 minor &= 0xfffff;
1120 }
1121 inode = get_inode(sizeof(*dev));
1122 dev->nlink = nlink;
1123 dev->rdev = (major << 8) | (minor & 0xff) |
1124 ((minor & ~0xff) << 12);
1125 SQUASHFS_SWAP_DEV_INODE_HEADER(dev, inode);
1126 TRACE("Device inode, rdev 0x%x, nlink %d\n", dev->rdev, nlink);
1127 }
1128 else if(type == SQUASHFS_LCHRDEV_TYPE || type == SQUASHFS_LBLKDEV_TYPE) {
1129 struct squashfs_ldev_inode_header *dev = &inode_header.ldev;
1130 unsigned int major = major(buf->st_rdev);
1131 unsigned int minor = minor(buf->st_rdev);
1132
1133 if(major > 0xfff) {
1134 ERROR("Major %d out of range in device node %s, "
1135 "truncating to %d\n", major, filename,
1136 major & 0xfff);
1137 major &= 0xfff;
1138 }
1139 if(minor > 0xfffff) {
1140 ERROR("Minor %d out of range in device node %s, "
1141 "truncating to %d\n", minor, filename,
1142 minor & 0xfffff);
1143 minor &= 0xfffff;
1144 }
1145 inode = get_inode(sizeof(*dev));
1146 dev->nlink = nlink;
1147 dev->rdev = (major << 8) | (minor & 0xff) |
1148 ((minor & ~0xff) << 12);
1149 dev->xattr = xattr;
1150 SQUASHFS_SWAP_LDEV_INODE_HEADER(dev, inode);
1151 TRACE("Device inode, rdev 0x%x, nlink %d\n", dev->rdev, nlink);
1152 }
1153 else if(type == SQUASHFS_SYMLINK_TYPE) {
1154 struct squashfs_symlink_inode_header *symlink = &inode_header.symlink;
1155 int byte = strlen(dir_ent->inode->symlink);
1156 size_t off = offsetof(struct squashfs_symlink_inode_header, symlink);
1157
1158 inode = get_inode(sizeof(*symlink) + byte);
1159 symlink->nlink = nlink;
1160 symlink->symlink_size = byte;
1161 SQUASHFS_SWAP_SYMLINK_INODE_HEADER(symlink, inode);
1162 strncpy(inode + off, dir_ent->inode->symlink, byte);
1163 TRACE("Symbolic link inode, symlink_size %d, nlink %d\n", byte,
1164 nlink);
1165 }
1166 else if(type == SQUASHFS_LSYMLINK_TYPE) {
1167 struct squashfs_symlink_inode_header *symlink = &inode_header.symlink;
1168 int byte = strlen(dir_ent->inode->symlink);
1169 size_t off = offsetof(struct squashfs_symlink_inode_header, symlink);
1170
1171 inode = get_inode(sizeof(*symlink) + byte +
1172 sizeof(unsigned int));
1173 symlink->nlink = nlink;
1174 symlink->symlink_size = byte;
1175 SQUASHFS_SWAP_SYMLINK_INODE_HEADER(symlink, inode);
1176 strncpy(inode + off, dir_ent->inode->symlink, byte);
1177 SQUASHFS_SWAP_INTS(&xattr, inode + off + byte, 1);
1178 TRACE("Symbolic link inode, symlink_size %d, nlink %d\n", byte,
1179 nlink);
1180 }
1181 else if(type == SQUASHFS_FIFO_TYPE || type == SQUASHFS_SOCKET_TYPE) {
1182 struct squashfs_ipc_inode_header *ipc = &inode_header.ipc;
1183
1184 inode = get_inode(sizeof(*ipc));
1185 ipc->nlink = nlink;
1186 SQUASHFS_SWAP_IPC_INODE_HEADER(ipc, inode);
1187 TRACE("ipc inode, type %s, nlink %d\n", type ==
1188 SQUASHFS_FIFO_TYPE ? "fifo" : "socket", nlink);
1189 }
1190 else if(type == SQUASHFS_LFIFO_TYPE || type == SQUASHFS_LSOCKET_TYPE) {
1191 struct squashfs_lipc_inode_header *ipc = &inode_header.lipc;
1192
1193 inode = get_inode(sizeof(*ipc));
1194 ipc->nlink = nlink;
1195 ipc->xattr = xattr;
1196 SQUASHFS_SWAP_LIPC_INODE_HEADER(ipc, inode);
1197 TRACE("ipc inode, type %s, nlink %d\n", type ==
1198 SQUASHFS_FIFO_TYPE ? "fifo" : "socket", nlink);
1199 } else
1200 BAD_ERROR("Unrecognised inode %d in create_inode\n", type);
1201
1202 *i_no = MKINODE(inode);
1203 inode_count ++;
1204
1205 TRACE("Created inode 0x%llx, type %d, uid %d, guid %d\n", *i_no, type,
1206 base->uid, base->guid);
1207
1208 return TRUE;
1209 }
1210
1211
add_dir(squashfs_inode inode,unsigned int inode_number,char * name,int type,struct directory * dir)1212 void add_dir(squashfs_inode inode, unsigned int inode_number, char *name,
1213 int type, struct directory *dir)
1214 {
1215 unsigned char *buff;
1216 struct squashfs_dir_entry idir;
1217 unsigned int start_block = inode >> 16;
1218 unsigned int offset = inode & 0xffff;
1219 unsigned int size = strlen(name);
1220 size_t name_off = offsetof(struct squashfs_dir_entry, name);
1221
1222 if(size > SQUASHFS_NAME_LEN) {
1223 size = SQUASHFS_NAME_LEN;
1224 ERROR("Filename is greater than %d characters, truncating! ..."
1225 "\n", SQUASHFS_NAME_LEN);
1226 }
1227
1228 if(dir->p + sizeof(struct squashfs_dir_entry) + size +
1229 sizeof(struct squashfs_dir_header)
1230 >= dir->buff + dir->size) {
1231 buff = realloc(dir->buff, dir->size += SQUASHFS_METADATA_SIZE);
1232 if(buff == NULL)
1233 MEM_ERROR();
1234
1235 dir->p = (dir->p - dir->buff) + buff;
1236 if(dir->entry_count_p)
1237 dir->entry_count_p = (dir->entry_count_p - dir->buff +
1238 buff);
1239 dir->index_count_p = dir->index_count_p - dir->buff + buff;
1240 dir->buff = buff;
1241 }
1242
1243 if(dir->entry_count == 256 || start_block != dir->start_block ||
1244 ((dir->entry_count_p != NULL) &&
1245 ((dir->p + sizeof(struct squashfs_dir_entry) + size -
1246 dir->index_count_p) > SQUASHFS_METADATA_SIZE)) ||
1247 ((long long) inode_number - dir->inode_number) > 32767
1248 || ((long long) inode_number - dir->inode_number)
1249 < -32768) {
1250 if(dir->entry_count_p) {
1251 struct squashfs_dir_header dir_header;
1252
1253 if((dir->p + sizeof(struct squashfs_dir_entry) + size -
1254 dir->index_count_p) >
1255 SQUASHFS_METADATA_SIZE) {
1256 if(dir->i_count % I_COUNT_SIZE == 0) {
1257 dir->index = realloc(dir->index,
1258 (dir->i_count + I_COUNT_SIZE) *
1259 sizeof(struct cached_dir_index));
1260 if(dir->index == NULL)
1261 MEM_ERROR();
1262 }
1263 dir->index[dir->i_count].index.index =
1264 dir->p - dir->buff;
1265 dir->index[dir->i_count].index.size = size - 1;
1266 dir->index[dir->i_count++].name = name;
1267 dir->i_size += sizeof(struct squashfs_dir_index)
1268 + size;
1269 dir->index_count_p = dir->p;
1270 }
1271
1272 dir_header.count = dir->entry_count - 1;
1273 dir_header.start_block = dir->start_block;
1274 dir_header.inode_number = dir->inode_number;
1275 SQUASHFS_SWAP_DIR_HEADER(&dir_header,
1276 dir->entry_count_p);
1277
1278 }
1279
1280
1281 dir->entry_count_p = dir->p;
1282 dir->start_block = start_block;
1283 dir->entry_count = 0;
1284 dir->inode_number = inode_number;
1285 dir->p += sizeof(struct squashfs_dir_header);
1286 }
1287
1288 idir.offset = offset;
1289 idir.type = type;
1290 idir.size = size - 1;
1291 idir.inode_number = ((long long) inode_number - dir->inode_number);
1292 SQUASHFS_SWAP_DIR_ENTRY(&idir, dir->p);
1293 strncpy((char *) dir->p + name_off, name, size);
1294 dir->p += sizeof(struct squashfs_dir_entry) + size;
1295 dir->entry_count ++;
1296 }
1297
1298
write_dir(squashfs_inode * inode,struct dir_info * dir_info,struct directory * dir)1299 void write_dir(squashfs_inode *inode, struct dir_info *dir_info,
1300 struct directory *dir)
1301 {
1302 unsigned int dir_size = dir->p - dir->buff;
1303 int data_space = directory_cache_size - directory_cache_bytes;
1304 unsigned int directory_block, directory_offset, i_count, index;
1305 unsigned short c_byte;
1306
1307 if(data_space < dir_size) {
1308 int realloc_size = directory_cache_size == 0 ?
1309 ((dir_size + SQUASHFS_METADATA_SIZE) &
1310 ~(SQUASHFS_METADATA_SIZE - 1)) : dir_size - data_space;
1311
1312 void *dc = realloc(directory_data_cache,
1313 directory_cache_size + realloc_size);
1314 if(dc == NULL)
1315 MEM_ERROR();
1316 directory_cache_size += realloc_size;
1317 directory_data_cache = dc;
1318 }
1319
1320 if(dir_size) {
1321 struct squashfs_dir_header dir_header;
1322
1323 dir_header.count = dir->entry_count - 1;
1324 dir_header.start_block = dir->start_block;
1325 dir_header.inode_number = dir->inode_number;
1326 SQUASHFS_SWAP_DIR_HEADER(&dir_header, dir->entry_count_p);
1327 memcpy(directory_data_cache + directory_cache_bytes, dir->buff,
1328 dir_size);
1329 }
1330 directory_offset = directory_cache_bytes;
1331 directory_block = directory_bytes;
1332 directory_cache_bytes += dir_size;
1333 i_count = 0;
1334 index = SQUASHFS_METADATA_SIZE - directory_offset;
1335
1336 while(1) {
1337 while(i_count < dir->i_count &&
1338 dir->index[i_count].index.index < index)
1339 dir->index[i_count++].index.start_block =
1340 directory_bytes;
1341 index += SQUASHFS_METADATA_SIZE;
1342
1343 if(directory_cache_bytes < SQUASHFS_METADATA_SIZE)
1344 break;
1345
1346 if((directory_size - directory_bytes) <
1347 ((SQUASHFS_METADATA_SIZE << 1) + 2)) {
1348 void *dt = realloc(directory_table,
1349 directory_size + (SQUASHFS_METADATA_SIZE << 1)
1350 + 2);
1351 if(dt == NULL)
1352 MEM_ERROR();
1353 directory_size += SQUASHFS_METADATA_SIZE << 1;
1354 directory_table = dt;
1355 }
1356
1357 c_byte = mangle(directory_table + directory_bytes +
1358 BLOCK_OFFSET, directory_data_cache,
1359 SQUASHFS_METADATA_SIZE, SQUASHFS_METADATA_SIZE,
1360 noI, 0);
1361 TRACE("Directory block @ 0x%x, size %d\n", directory_bytes,
1362 c_byte);
1363 SQUASHFS_SWAP_SHORTS(&c_byte,
1364 directory_table + directory_bytes, 1);
1365 directory_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) +
1366 BLOCK_OFFSET;
1367 total_directory_bytes += SQUASHFS_METADATA_SIZE + BLOCK_OFFSET;
1368 memmove(directory_data_cache, directory_data_cache +
1369 SQUASHFS_METADATA_SIZE, directory_cache_bytes -
1370 SQUASHFS_METADATA_SIZE);
1371 directory_cache_bytes -= SQUASHFS_METADATA_SIZE;
1372 }
1373
1374 create_inode(inode, dir_info, dir_info->dir_ent, SQUASHFS_DIR_TYPE,
1375 dir_size + 3, directory_block, directory_offset, NULL, NULL,
1376 dir, 0);
1377
1378 #ifdef SQUASHFS_TRACE
1379 {
1380 unsigned char *dirp;
1381 int count;
1382
1383 TRACE("Directory contents of inode 0x%llx\n", *inode);
1384 dirp = dir->buff;
1385 while(dirp < dir->p) {
1386 char buffer[SQUASHFS_NAME_LEN + 1];
1387 struct squashfs_dir_entry idir, *idirp;
1388 struct squashfs_dir_header dirh;
1389 SQUASHFS_SWAP_DIR_HEADER((struct squashfs_dir_header *) dirp,
1390 &dirh);
1391 count = dirh.count + 1;
1392 dirp += sizeof(struct squashfs_dir_header);
1393
1394 TRACE("\tStart block 0x%x, count %d\n",
1395 dirh.start_block, count);
1396
1397 while(count--) {
1398 idirp = (struct squashfs_dir_entry *) dirp;
1399 SQUASHFS_SWAP_DIR_ENTRY(idirp, &idir);
1400 strncpy(buffer, idirp->name, idir.size + 1);
1401 buffer[idir.size + 1] = '\0';
1402 TRACE("\t\tname %s, inode offset 0x%x, type "
1403 "%d\n", buffer, idir.offset, idir.type);
1404 dirp += sizeof(struct squashfs_dir_entry) + idir.size +
1405 1;
1406 }
1407 }
1408 }
1409 #endif
1410 dir_count ++;
1411 }
1412
1413
get_fragment(struct fragment * fragment)1414 static struct file_buffer *get_fragment(struct fragment *fragment)
1415 {
1416 struct squashfs_fragment_entry *disk_fragment;
1417 struct file_buffer *buffer, *compressed_buffer;
1418 long long start_block;
1419 int res, size, index = fragment->index;
1420 char locked;
1421
1422 /*
1423 * Lookup fragment block in cache.
1424 * If the fragment block doesn't exist, then get the compressed version
1425 * from the writer cache or off disk, and decompress it.
1426 *
1427 * This routine has two things which complicate the code:
1428 *
1429 * 1. Multiple threads can simultaneously lookup/create the
1430 * same buffer. This means a buffer needs to be "locked"
1431 * when it is being filled in, to prevent other threads from
1432 * using it when it is not ready. This is because we now do
1433 * fragment duplicate checking in parallel.
1434 * 2. We have two caches which need to be checked for the
1435 * presence of fragment blocks: the normal fragment cache
1436 * and a "reserve" cache. The reserve cache is used to
1437 * prevent an unnecessary pipeline stall when the fragment cache
1438 * is full of fragments waiting to be compressed.
1439 */
1440
1441 if(fragment->index == SQUASHFS_INVALID_FRAG)
1442 return NULL;
1443
1444 pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
1445 pthread_mutex_lock(&dup_mutex);
1446
1447 again:
1448 buffer = cache_lookup_nowait(fragment_buffer, index, &locked);
1449 if(buffer) {
1450 pthread_mutex_unlock(&dup_mutex);
1451 if(locked)
1452 /* got a buffer being filled in. Wait for it */
1453 cache_wait_unlock(buffer);
1454 goto finished;
1455 }
1456
1457 /* not in fragment cache, is it in the reserve cache? */
1458 buffer = cache_lookup_nowait(reserve_cache, index, &locked);
1459 if(buffer) {
1460 pthread_mutex_unlock(&dup_mutex);
1461 if(locked)
1462 /* got a buffer being filled in. Wait for it */
1463 cache_wait_unlock(buffer);
1464 goto finished;
1465 }
1466
1467 /* in neither cache, try to get it from the fragment cache */
1468 buffer = cache_get_nowait(fragment_buffer, index);
1469 if(!buffer) {
1470 /*
1471 * no room, get it from the reserve cache, this is
1472 * dimensioned so it will always have space (no more than
1473 * processors + 1 can have an outstanding reserve buffer)
1474 */
1475 buffer = cache_get_nowait(reserve_cache, index);
1476 if(!buffer) {
1477 /* failsafe */
1478 ERROR("no space in reserve cache\n");
1479 goto again;
1480 }
1481 }
1482
1483 pthread_mutex_unlock(&dup_mutex);
1484
1485 compressed_buffer = cache_lookup(fwriter_buffer, index);
1486
1487 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
1488 pthread_mutex_lock(&fragment_mutex);
1489 disk_fragment = &fragment_table[index];
1490 size = SQUASHFS_COMPRESSED_SIZE_BLOCK(disk_fragment->size);
1491 start_block = disk_fragment->start_block;
1492 pthread_cleanup_pop(1);
1493
1494 if(SQUASHFS_COMPRESSED_BLOCK(disk_fragment->size)) {
1495 int error;
1496 char *data;
1497
1498 if(compressed_buffer)
1499 data = compressed_buffer->data;
1500 else {
1501 data = read_from_disk(start_block, size);
1502 if(data == NULL) {
1503 ERROR("Failed to read fragment from output"
1504 " filesystem\n");
1505 BAD_ERROR("Output filesystem corrupted?\n");
1506 }
1507 }
1508
1509 res = compressor_uncompress(comp, buffer->data, data, size,
1510 block_size, &error);
1511 if(res == -1)
1512 BAD_ERROR("%s uncompress failed with error code %d\n",
1513 comp->name, error);
1514 } else if(compressed_buffer)
1515 memcpy(buffer->data, compressed_buffer->data, size);
1516 else {
1517 res = read_fs_bytes(fd, start_block, size, buffer->data);
1518 if(res == 0) {
1519 ERROR("Failed to read fragment from output "
1520 "filesystem\n");
1521 BAD_ERROR("Output filesystem corrupted?\n");
1522 }
1523 }
1524
1525 cache_unlock(buffer);
1526 cache_block_put(compressed_buffer);
1527
1528 finished:
1529 pthread_cleanup_pop(0);
1530
1531 return buffer;
1532 }
1533
1534
get_fragment_checksum(struct file_info * file)1535 unsigned short get_fragment_checksum(struct file_info *file)
1536 {
1537 struct file_buffer *frag_buffer;
1538 struct append_file *append;
1539 int res, index = file->fragment->index;
1540 unsigned short checksum;
1541
1542 if(index == SQUASHFS_INVALID_FRAG)
1543 return 0;
1544
1545 pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
1546 pthread_mutex_lock(&dup_mutex);
1547 res = file->have_frag_checksum;
1548 checksum = file->fragment_checksum;
1549 pthread_cleanup_pop(1);
1550
1551 if(res)
1552 return checksum;
1553
1554 frag_buffer = get_fragment(file->fragment);
1555
1556 pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
1557
1558 for(append = file_mapping[index]; append; append = append->next) {
1559 int offset = append->file->fragment->offset;
1560 int size = append->file->fragment->size;
1561 unsigned short cksum =
1562 get_checksum_mem(frag_buffer->data + offset, size);
1563
1564 if(file == append->file)
1565 checksum = cksum;
1566
1567 pthread_mutex_lock(&dup_mutex);
1568 append->file->fragment_checksum = cksum;
1569 append->file->have_frag_checksum = TRUE;
1570 pthread_mutex_unlock(&dup_mutex);
1571 }
1572
1573 cache_block_put(frag_buffer);
1574 pthread_cleanup_pop(0);
1575
1576 return checksum;
1577 }
1578
1579
lock_fragments()1580 void lock_fragments()
1581 {
1582 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
1583 pthread_mutex_lock(&fragment_mutex);
1584 fragments_locked = TRUE;
1585 pthread_cleanup_pop(1);
1586 }
1587
1588
unlock_fragments()1589 void unlock_fragments()
1590 {
1591 int frg, size;
1592 struct file_buffer *write_buffer;
1593
1594 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
1595 pthread_mutex_lock(&fragment_mutex);
1596
1597 /*
1598 * Note queue_empty() is inherently racy with respect to concurrent
1599 * queue get and pushes. We avoid this because we're holding the
1600 * fragment_mutex which ensures no other threads can be using the
1601 * queue at this time.
1602 */
1603 while(!queue_empty(locked_fragment)) {
1604 write_buffer = queue_get(locked_fragment);
1605 frg = write_buffer->block;
1606 size = SQUASHFS_COMPRESSED_SIZE_BLOCK(fragment_table[frg].size);
1607 fragment_table[frg].start_block = bytes;
1608 write_buffer->block = bytes;
1609 bytes += size;
1610 fragments_outstanding --;
1611 queue_put(to_writer, write_buffer);
1612 TRACE("fragment_locked writing fragment %d, compressed size %d"
1613 "\n", frg, size);
1614 }
1615 fragments_locked = FALSE;
1616 pthread_cleanup_pop(1);
1617 }
1618
1619 /* Called with the fragment_mutex locked */
add_pending_fragment(struct file_buffer * write_buffer,int c_byte,int fragment)1620 void add_pending_fragment(struct file_buffer *write_buffer, int c_byte,
1621 int fragment)
1622 {
1623 fragment_table[fragment].size = c_byte;
1624 write_buffer->block = fragment;
1625
1626 queue_put(locked_fragment, write_buffer);
1627 }
1628
1629
write_fragment(struct file_buffer * fragment)1630 void write_fragment(struct file_buffer *fragment)
1631 {
1632 if(fragment == NULL)
1633 return;
1634
1635 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
1636 pthread_mutex_lock(&fragment_mutex);
1637 fragment_table[fragment->block].unused = 0;
1638 fragments_outstanding ++;
1639 queue_put(to_frag, fragment);
1640 pthread_cleanup_pop(1);
1641 }
1642
1643
allocate_fragment()1644 struct file_buffer *allocate_fragment()
1645 {
1646 struct file_buffer *fragment = cache_get(fragment_buffer, fragments);
1647
1648 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
1649 pthread_mutex_lock(&fragment_mutex);
1650
1651 if(fragments % FRAG_SIZE == 0) {
1652 void *ft = realloc(fragment_table, (fragments +
1653 FRAG_SIZE) * sizeof(struct squashfs_fragment_entry));
1654 if(ft == NULL)
1655 MEM_ERROR();
1656 fragment_table = ft;
1657 }
1658
1659 fragment->size = 0;
1660 fragment->block = fragments ++;
1661
1662 pthread_cleanup_pop(1);
1663
1664 return fragment;
1665 }
1666
1667
1668 static struct fragment empty_fragment = {SQUASHFS_INVALID_FRAG, 0, 0};
1669
1670
free_fragment(struct fragment * fragment)1671 void free_fragment(struct fragment *fragment)
1672 {
1673 if(fragment != &empty_fragment)
1674 free(fragment);
1675 }
1676
1677
get_and_fill_fragment(struct file_buffer * file_buffer,struct dir_ent * dir_ent)1678 struct fragment *get_and_fill_fragment(struct file_buffer *file_buffer,
1679 struct dir_ent *dir_ent)
1680 {
1681 struct fragment *ffrg;
1682 struct file_buffer **fragment;
1683
1684 if(file_buffer == NULL || file_buffer->size == 0)
1685 return &empty_fragment;
1686
1687 fragment = eval_frag_actions(root_dir, dir_ent);
1688
1689 if((*fragment) && (*fragment)->size + file_buffer->size > block_size) {
1690 write_fragment(*fragment);
1691 *fragment = NULL;
1692 }
1693
1694 ffrg = malloc(sizeof(struct fragment));
1695 if(ffrg == NULL)
1696 MEM_ERROR();
1697
1698 if(*fragment == NULL)
1699 *fragment = allocate_fragment();
1700
1701 ffrg->index = (*fragment)->block;
1702 ffrg->offset = (*fragment)->size;
1703 ffrg->size = file_buffer->size;
1704 memcpy((*fragment)->data + (*fragment)->size, file_buffer->data,
1705 file_buffer->size);
1706 (*fragment)->size += file_buffer->size;
1707
1708 return ffrg;
1709 }
1710
1711
generic_write_table(int length,void * buffer,int length2,void * buffer2,int uncompressed)1712 long long generic_write_table(int length, void *buffer, int length2,
1713 void *buffer2, int uncompressed)
1714 {
1715 int meta_blocks = (length + SQUASHFS_METADATA_SIZE - 1) /
1716 SQUASHFS_METADATA_SIZE;
1717 long long *list, start_bytes;
1718 int compressed_size, i, list_size = meta_blocks * sizeof(long long);
1719 unsigned short c_byte;
1720 char cbuffer[(SQUASHFS_METADATA_SIZE << 2) + 2];
1721
1722 #ifdef SQUASHFS_TRACE
1723 long long obytes = bytes;
1724 int olength = length;
1725 #endif
1726
1727 list = malloc(list_size);
1728 if(list == NULL)
1729 MEM_ERROR();
1730
1731 for(i = 0; i < meta_blocks; i++) {
1732 int avail_bytes = length > SQUASHFS_METADATA_SIZE ?
1733 SQUASHFS_METADATA_SIZE : length;
1734 c_byte = mangle(cbuffer + BLOCK_OFFSET, buffer + i *
1735 SQUASHFS_METADATA_SIZE , avail_bytes,
1736 SQUASHFS_METADATA_SIZE, uncompressed, 0);
1737 SQUASHFS_SWAP_SHORTS(&c_byte, cbuffer, 1);
1738 list[i] = bytes;
1739 compressed_size = SQUASHFS_COMPRESSED_SIZE(c_byte) +
1740 BLOCK_OFFSET;
1741 TRACE("block %d @ 0x%llx, compressed size %d\n", i, bytes,
1742 compressed_size);
1743 write_destination(fd, bytes, compressed_size, cbuffer);
1744 bytes += compressed_size;
1745 total_bytes += avail_bytes;
1746 length -= avail_bytes;
1747 }
1748
1749 start_bytes = bytes;
1750 if(length2) {
1751 write_destination(fd, bytes, length2, buffer2);
1752 bytes += length2;
1753 total_bytes += length2;
1754 }
1755
1756 SQUASHFS_INSWAP_LONG_LONGS(list, meta_blocks);
1757 write_destination(fd, bytes, list_size, list);
1758 bytes += list_size;
1759 total_bytes += list_size;
1760
1761 TRACE("generic_write_table: total uncompressed %d compressed %lld\n",
1762 olength, bytes - obytes);
1763
1764 free(list);
1765
1766 return start_bytes;
1767 }
1768
1769
write_fragment_table()1770 long long write_fragment_table()
1771 {
1772 unsigned int frag_bytes = SQUASHFS_FRAGMENT_BYTES(fragments);
1773 int i;
1774
1775 TRACE("write_fragment_table: fragments %d, frag_bytes %d\n", fragments,
1776 frag_bytes);
1777 for(i = 0; i < fragments; i++) {
1778 TRACE("write_fragment_table: fragment %d, start_block 0x%llx, "
1779 "size %d\n", i, fragment_table[i].start_block,
1780 fragment_table[i].size);
1781 SQUASHFS_INSWAP_FRAGMENT_ENTRY(&fragment_table[i]);
1782 }
1783
1784 return generic_write_table(frag_bytes, fragment_table, 0, NULL, noF);
1785 }
1786
1787
1788 char read_from_file_buffer[SQUASHFS_FILE_MAX_SIZE];
read_from_disk(long long start,unsigned int avail_bytes)1789 static char *read_from_disk(long long start, unsigned int avail_bytes)
1790 {
1791 int res;
1792
1793 res = read_fs_bytes(fd, start, avail_bytes, read_from_file_buffer);
1794 if(res == 0)
1795 return NULL;
1796
1797 return read_from_file_buffer;
1798 }
1799
1800
1801 char read_from_file_buffer2[SQUASHFS_FILE_MAX_SIZE];
read_from_disk2(long long start,unsigned int avail_bytes)1802 char *read_from_disk2(long long start, unsigned int avail_bytes)
1803 {
1804 int res;
1805
1806 res = read_fs_bytes(fd, start, avail_bytes, read_from_file_buffer2);
1807 if(res == 0)
1808 return NULL;
1809
1810 return read_from_file_buffer2;
1811 }
1812
1813
1814 /*
1815 * Compute 16 bit BSD checksum over the data
1816 */
get_checksum(char * buff,int bytes,unsigned short chksum)1817 unsigned short get_checksum(char *buff, int bytes, unsigned short chksum)
1818 {
1819 unsigned char *b = (unsigned char *) buff;
1820
1821 while(bytes --) {
1822 chksum = (chksum & 1) ? (chksum >> 1) | 0x8000 : chksum >> 1;
1823 chksum += *b++;
1824 }
1825
1826 return chksum;
1827 }
1828
1829
get_checksum_disk(long long start,long long l,unsigned int * blocks)1830 unsigned short get_checksum_disk(long long start, long long l,
1831 unsigned int *blocks)
1832 {
1833 unsigned short chksum = 0;
1834 unsigned int bytes;
1835 struct file_buffer *write_buffer;
1836 int i;
1837
1838 for(i = 0; l; i++) {
1839 bytes = SQUASHFS_COMPRESSED_SIZE_BLOCK(blocks[i]);
1840 if(bytes == 0) /* sparse block */
1841 continue;
1842 write_buffer = cache_lookup(bwriter_buffer, start);
1843 if(write_buffer) {
1844 chksum = get_checksum(write_buffer->data, bytes,
1845 chksum);
1846 cache_block_put(write_buffer);
1847 } else {
1848 void *data = read_from_disk(start, bytes);
1849 if(data == NULL) {
1850 ERROR("Failed to checksum data from output"
1851 " filesystem\n");
1852 BAD_ERROR("Output filesystem corrupted?\n");
1853 }
1854
1855 chksum = get_checksum(data, bytes, chksum);
1856 }
1857
1858 l -= bytes;
1859 start += bytes;
1860 }
1861
1862 return chksum;
1863 }
1864
1865
get_checksum_mem(char * buff,int bytes)1866 unsigned short get_checksum_mem(char *buff, int bytes)
1867 {
1868 return get_checksum(buff, bytes, 0);
1869 }
1870
1871
get_checksum_mem_buffer(struct file_buffer * file_buffer)1872 unsigned short get_checksum_mem_buffer(struct file_buffer *file_buffer)
1873 {
1874 if(file_buffer == NULL)
1875 return 0;
1876 else
1877 return get_checksum(file_buffer->data, file_buffer->size, 0);
1878 }
1879
1880
1881 #define DUP_HASH(a) (a & 0xffff)
add_file(long long start,long long file_size,long long file_bytes,unsigned int * block_listp,int blocks,unsigned int fragment,int offset,int bytes)1882 void add_file(long long start, long long file_size, long long file_bytes,
1883 unsigned int *block_listp, int blocks, unsigned int fragment,
1884 int offset, int bytes)
1885 {
1886 struct fragment *frg;
1887 unsigned int *block_list = block_listp;
1888 struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)];
1889 struct append_file *append_file;
1890 struct file_info *file;
1891
1892 if(!duplicate_checking || file_size == 0)
1893 return;
1894
1895 for(; dupl_ptr; dupl_ptr = dupl_ptr->next) {
1896 if(file_size != dupl_ptr->file_size)
1897 continue;
1898 if(blocks != 0 && start != dupl_ptr->start)
1899 continue;
1900 if(fragment != dupl_ptr->fragment->index)
1901 continue;
1902 if(fragment != SQUASHFS_INVALID_FRAG && (offset !=
1903 dupl_ptr->fragment->offset || bytes !=
1904 dupl_ptr->fragment->size))
1905 continue;
1906 return;
1907 }
1908
1909 frg = malloc(sizeof(struct fragment));
1910 if(frg == NULL)
1911 MEM_ERROR();
1912
1913 frg->index = fragment;
1914 frg->offset = offset;
1915 frg->size = bytes;
1916
1917 file = add_non_dup(file_size, file_bytes, block_list, start, frg, 0, 0,
1918 FALSE, FALSE);
1919
1920 if(fragment == SQUASHFS_INVALID_FRAG)
1921 return;
1922
1923 append_file = malloc(sizeof(struct append_file));
1924 if(append_file == NULL)
1925 MEM_ERROR();
1926
1927 append_file->file = file;
1928 append_file->next = file_mapping[fragment];
1929 file_mapping[fragment] = append_file;
1930 }
1931
1932
pre_duplicate(long long file_size)1933 int pre_duplicate(long long file_size)
1934 {
1935 struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)];
1936
1937 for(; dupl_ptr; dupl_ptr = dupl_ptr->next)
1938 if(dupl_ptr->file_size == file_size)
1939 return TRUE;
1940
1941 return FALSE;
1942 }
1943
1944
add_non_dup(long long file_size,long long bytes,unsigned int * block_list,long long start,struct fragment * fragment,unsigned short checksum,unsigned short fragment_checksum,int checksum_flag,int checksum_frag_flag)1945 struct file_info *add_non_dup(long long file_size, long long bytes,
1946 unsigned int *block_list, long long start, struct fragment *fragment,
1947 unsigned short checksum, unsigned short fragment_checksum,
1948 int checksum_flag, int checksum_frag_flag)
1949 {
1950 struct file_info *dupl_ptr = malloc(sizeof(struct file_info));
1951
1952 if(dupl_ptr == NULL)
1953 MEM_ERROR();
1954
1955 dupl_ptr->file_size = file_size;
1956 dupl_ptr->bytes = bytes;
1957 dupl_ptr->block_list = block_list;
1958 dupl_ptr->start = start;
1959 dupl_ptr->fragment = fragment;
1960 dupl_ptr->checksum = checksum;
1961 dupl_ptr->fragment_checksum = fragment_checksum;
1962 dupl_ptr->have_frag_checksum = checksum_frag_flag;
1963 dupl_ptr->have_checksum = checksum_flag;
1964
1965 pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
1966 pthread_mutex_lock(&dup_mutex);
1967 dupl_ptr->next = dupl[DUP_HASH(file_size)];
1968 dupl[DUP_HASH(file_size)] = dupl_ptr;
1969 dup_files ++;
1970 pthread_cleanup_pop(1);
1971
1972 return dupl_ptr;
1973 }
1974
1975
frag_duplicate(struct file_buffer * file_buffer,char * dont_put)1976 struct fragment *frag_duplicate(struct file_buffer *file_buffer, char *dont_put)
1977 {
1978 struct file_info *dupl_ptr;
1979 struct file_buffer *buffer;
1980 struct file_info *dupl_start = file_buffer->dupl_start;
1981 long long file_size = file_buffer->file_size;
1982 unsigned short checksum = file_buffer->checksum;
1983 int res;
1984
1985 if(file_buffer->duplicate) {
1986 TRACE("Found duplicate file, fragment %d, size %d, offset %d, "
1987 "checksum 0x%x\n", dupl_start->fragment->index,
1988 file_size, dupl_start->fragment->offset, checksum);
1989 *dont_put = TRUE;
1990 return dupl_start->fragment;
1991 } else {
1992 *dont_put = FALSE;
1993 dupl_ptr = dupl[DUP_HASH(file_size)];
1994 }
1995
1996 for(; dupl_ptr && dupl_ptr != dupl_start; dupl_ptr = dupl_ptr->next) {
1997 if(file_size == dupl_ptr->file_size && file_size ==
1998 dupl_ptr->fragment->size) {
1999 if(get_fragment_checksum(dupl_ptr) == checksum) {
2000 buffer = get_fragment(dupl_ptr->fragment);
2001 res = memcmp(file_buffer->data, buffer->data +
2002 dupl_ptr->fragment->offset, file_size);
2003 cache_block_put(buffer);
2004 if(res == 0)
2005 break;
2006 }
2007 }
2008 }
2009
2010 if(!dupl_ptr || dupl_ptr == dupl_start)
2011 return NULL;
2012
2013 TRACE("Found duplicate file, fragment %d, size %d, offset %d, "
2014 "checksum 0x%x\n", dupl_ptr->fragment->index, file_size,
2015 dupl_ptr->fragment->offset, checksum);
2016
2017 return dupl_ptr->fragment;
2018 }
2019
2020
duplicate(long long file_size,long long bytes,unsigned int ** block_list,long long * start,struct fragment ** fragment,struct file_buffer * file_buffer,int blocks,unsigned short checksum,int checksum_flag)2021 struct file_info *duplicate(long long file_size, long long bytes,
2022 unsigned int **block_list, long long *start, struct fragment **fragment,
2023 struct file_buffer *file_buffer, int blocks, unsigned short checksum,
2024 int checksum_flag)
2025 {
2026 struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)];
2027 int frag_bytes = file_buffer ? file_buffer->size : 0;
2028 unsigned short fragment_checksum = file_buffer ?
2029 file_buffer->checksum : 0;
2030
2031 for(; dupl_ptr; dupl_ptr = dupl_ptr->next)
2032 if(file_size == dupl_ptr->file_size && bytes == dupl_ptr->bytes
2033 && frag_bytes == dupl_ptr->fragment->size) {
2034 long long target_start, dup_start = dupl_ptr->start;
2035 int block;
2036
2037 if(memcmp(*block_list, dupl_ptr->block_list, blocks *
2038 sizeof(unsigned int)) != 0)
2039 continue;
2040
2041 if(checksum_flag == FALSE) {
2042 checksum = get_checksum_disk(*start, bytes,
2043 *block_list);
2044 checksum_flag = TRUE;
2045 }
2046
2047 if(!dupl_ptr->have_checksum) {
2048 dupl_ptr->checksum =
2049 get_checksum_disk(dupl_ptr->start,
2050 dupl_ptr->bytes, dupl_ptr->block_list);
2051 dupl_ptr->have_checksum = TRUE;
2052 }
2053
2054 if(checksum != dupl_ptr->checksum ||
2055 fragment_checksum !=
2056 get_fragment_checksum(dupl_ptr))
2057 continue;
2058
2059 target_start = *start;
2060 for(block = 0; block < blocks; block ++) {
2061 int size = SQUASHFS_COMPRESSED_SIZE_BLOCK
2062 ((*block_list)[block]);
2063 struct file_buffer *target_buffer = NULL;
2064 struct file_buffer *dup_buffer = NULL;
2065 char *target_data, *dup_data;
2066 int res;
2067
2068 if(size == 0)
2069 continue;
2070 target_buffer = cache_lookup(bwriter_buffer,
2071 target_start);
2072 if(target_buffer)
2073 target_data = target_buffer->data;
2074 else {
2075 target_data =
2076 read_from_disk(target_start,
2077 size);
2078 if(target_data == NULL) {
2079 ERROR("Failed to read data from"
2080 " output filesystem\n");
2081 BAD_ERROR("Output filesystem"
2082 " corrupted?\n");
2083 }
2084 }
2085
2086 dup_buffer = cache_lookup(bwriter_buffer,
2087 dup_start);
2088 if(dup_buffer)
2089 dup_data = dup_buffer->data;
2090 else {
2091 dup_data = read_from_disk2(dup_start,
2092 size);
2093 if(dup_data == NULL) {
2094 ERROR("Failed to read data from"
2095 " output filesystem\n");
2096 BAD_ERROR("Output filesystem"
2097 " corrupted?\n");
2098 }
2099 }
2100
2101 res = memcmp(target_data, dup_data, size);
2102 cache_block_put(target_buffer);
2103 cache_block_put(dup_buffer);
2104 if(res != 0)
2105 break;
2106 target_start += size;
2107 dup_start += size;
2108 }
2109 if(block == blocks) {
2110 struct file_buffer *frag_buffer =
2111 get_fragment(dupl_ptr->fragment);
2112
2113 if(frag_bytes == 0 ||
2114 memcmp(file_buffer->data,
2115 frag_buffer->data +
2116 dupl_ptr->fragment->offset,
2117 frag_bytes) == 0) {
2118 TRACE("Found duplicate file, start "
2119 "0x%llx, size %lld, checksum "
2120 "0x%x, fragment %d, size %d, "
2121 "offset %d, checksum 0x%x\n",
2122 dupl_ptr->start,
2123 dupl_ptr->bytes,
2124 dupl_ptr->checksum,
2125 dupl_ptr->fragment->index,
2126 frag_bytes,
2127 dupl_ptr->fragment->offset,
2128 fragment_checksum);
2129 *block_list = dupl_ptr->block_list;
2130 *start = dupl_ptr->start;
2131 *fragment = dupl_ptr->fragment;
2132 cache_block_put(frag_buffer);
2133 return 0;
2134 }
2135 cache_block_put(frag_buffer);
2136 }
2137 }
2138
2139
2140 return add_non_dup(file_size, bytes, *block_list, *start, *fragment,
2141 checksum, fragment_checksum, checksum_flag, TRUE);
2142 }
2143
2144
is_fragment(struct inode_info * inode)2145 static inline int is_fragment(struct inode_info *inode)
2146 {
2147 off_t file_size = inode->buf.st_size;
2148
2149 /*
2150 * If this block is to be compressed differently to the
2151 * fragment compression then it cannot be a fragment
2152 */
2153 if(inode->noF != noF)
2154 return FALSE;
2155
2156 return !inode->no_fragments && file_size && (file_size < block_size ||
2157 (inode->always_use_fragments && file_size & (block_size - 1)));
2158 }
2159
2160
put_file_buffer(struct file_buffer * file_buffer)2161 void put_file_buffer(struct file_buffer *file_buffer)
2162 {
2163 /*
2164 * Decide where to send the file buffer:
2165 * - compressible non-fragment blocks go to the deflate threads,
2166 * - fragments go to the process fragment threads,
2167 * - all others go directly to the main thread
2168 */
2169 if(file_buffer->error) {
2170 file_buffer->fragment = 0;
2171 seq_queue_put(to_main, file_buffer);
2172 } else if (file_buffer->file_size == 0)
2173 seq_queue_put(to_main, file_buffer);
2174 else if(file_buffer->fragment)
2175 queue_put(to_process_frag, file_buffer);
2176 else
2177 queue_put(to_deflate, file_buffer);
2178 }
2179
2180
2181 static int seq = 0;
reader_read_process(struct dir_ent * dir_ent)2182 void reader_read_process(struct dir_ent *dir_ent)
2183 {
2184 long long bytes = 0;
2185 struct inode_info *inode = dir_ent->inode;
2186 struct file_buffer *prev_buffer = NULL, *file_buffer;
2187 int status, byte, res, child;
2188 int file = pseudo_exec_file(get_pseudo_file(inode->pseudo_id), &child);
2189
2190 if(!file) {
2191 file_buffer = cache_get_nohash(reader_buffer);
2192 file_buffer->sequence = seq ++;
2193 goto read_err;
2194 }
2195
2196 while(1) {
2197 file_buffer = cache_get_nohash(reader_buffer);
2198 file_buffer->sequence = seq ++;
2199 file_buffer->noD = inode->noD;
2200
2201 byte = read_bytes(file, file_buffer->data, block_size);
2202 if(byte == -1)
2203 goto read_err2;
2204
2205 file_buffer->size = byte;
2206 file_buffer->file_size = -1;
2207 file_buffer->error = FALSE;
2208 file_buffer->fragment = FALSE;
2209 bytes += byte;
2210
2211 if(byte == 0)
2212 break;
2213
2214 /*
2215 * Update progress bar size. This is done
2216 * on every block rather than waiting for all blocks to be
2217 * read incase write_file_process() is running in parallel
2218 * with this. Otherwise the current progress bar position
2219 * may get ahead of the progress bar size.
2220 */
2221 progress_bar_size(1);
2222
2223 if(prev_buffer)
2224 put_file_buffer(prev_buffer);
2225 prev_buffer = file_buffer;
2226 }
2227
2228 /*
2229 * Update inode file size now that the size of the dynamic pseudo file
2230 * is known. This is needed for the -info option.
2231 */
2232 inode->buf.st_size = bytes;
2233
2234 res = waitpid(child, &status, 0);
2235 close(file);
2236
2237 if(res == -1 || !WIFEXITED(status) || WEXITSTATUS(status) != 0)
2238 goto read_err;
2239
2240 if(prev_buffer == NULL)
2241 prev_buffer = file_buffer;
2242 else {
2243 cache_block_put(file_buffer);
2244 seq --;
2245 }
2246 prev_buffer->file_size = bytes;
2247 prev_buffer->fragment = is_fragment(inode);
2248 put_file_buffer(prev_buffer);
2249
2250 return;
2251
2252 read_err2:
2253 close(file);
2254 read_err:
2255 if(prev_buffer) {
2256 cache_block_put(file_buffer);
2257 seq --;
2258 file_buffer = prev_buffer;
2259 }
2260 file_buffer->error = TRUE;
2261 put_file_buffer(file_buffer);
2262 }
2263
2264
reader_read_file(struct dir_ent * dir_ent)2265 void reader_read_file(struct dir_ent *dir_ent)
2266 {
2267 struct stat *buf = &dir_ent->inode->buf, buf2;
2268 struct file_buffer *file_buffer;
2269 int blocks, file, res;
2270 long long bytes, read_size;
2271 struct inode_info *inode = dir_ent->inode;
2272
2273 if(inode->read)
2274 return;
2275
2276 inode->read = TRUE;
2277 again:
2278 bytes = 0;
2279 read_size = buf->st_size;
2280 blocks = (read_size + block_size - 1) >> block_log;
2281
2282 file = open(pathname_reader(dir_ent), O_RDONLY);
2283 if(file == -1) {
2284 file_buffer = cache_get_nohash(reader_buffer);
2285 file_buffer->sequence = seq ++;
2286 goto read_err2;
2287 }
2288
2289 do {
2290 file_buffer = cache_get_nohash(reader_buffer);
2291 file_buffer->file_size = read_size;
2292 file_buffer->sequence = seq ++;
2293 file_buffer->noD = inode->noD;
2294 file_buffer->error = FALSE;
2295
2296 /*
2297 * Always try to read block_size bytes from the file rather
2298 * than expected bytes (which will be less than the block_size
2299 * at the file tail) to check that the file hasn't grown
2300 * since being stated. If it is longer (or shorter) than
2301 * expected, then restat, and try again. Note the special
2302 * case where the file is an exact multiple of the block_size
2303 * is dealt with later.
2304 */
2305 file_buffer->size = read_bytes(file, file_buffer->data,
2306 block_size);
2307 if(file_buffer->size == -1)
2308 goto read_err;
2309
2310 bytes += file_buffer->size;
2311
2312 if(blocks > 1) {
2313 /* non-tail block should be exactly block_size */
2314 if(file_buffer->size < block_size)
2315 goto restat;
2316
2317 file_buffer->fragment = FALSE;
2318 put_file_buffer(file_buffer);
2319 }
2320 } while(-- blocks > 0);
2321
2322 /* Overall size including tail should match */
2323 if(read_size != bytes)
2324 goto restat;
2325
2326 if(read_size && read_size % block_size == 0) {
2327 /*
2328 * Special case where we've not tried to read past the end of
2329 * the file. We expect to get EOF, i.e. the file isn't larger
2330 * than we expect.
2331 */
2332 char buffer;
2333 int res;
2334
2335 res = read_bytes(file, &buffer, 1);
2336 if(res == -1)
2337 goto read_err;
2338
2339 if(res != 0)
2340 goto restat;
2341 }
2342
2343 file_buffer->fragment = is_fragment(inode);
2344 put_file_buffer(file_buffer);
2345
2346 close(file);
2347
2348 return;
2349
2350 restat:
2351 res = fstat(file, &buf2);
2352 if(res == -1) {
2353 ERROR("Cannot stat dir/file %s because %s\n",
2354 pathname_reader(dir_ent), strerror(errno));
2355 goto read_err;
2356 }
2357
2358 if(read_size != buf2.st_size) {
2359 close(file);
2360 memcpy(buf, &buf2, sizeof(struct stat));
2361 file_buffer->error = 2;
2362 put_file_buffer(file_buffer);
2363 goto again;
2364 }
2365 read_err:
2366 close(file);
2367 read_err2:
2368 file_buffer->error = TRUE;
2369 put_file_buffer(file_buffer);
2370 }
2371
2372
reader_scan(struct dir_info * dir)2373 void reader_scan(struct dir_info *dir) {
2374 struct dir_ent *dir_ent = dir->list;
2375
2376 for(; dir_ent; dir_ent = dir_ent->next) {
2377 struct stat *buf = &dir_ent->inode->buf;
2378 if(dir_ent->inode->root_entry)
2379 continue;
2380
2381 if(IS_PSEUDO_PROCESS(dir_ent->inode)) {
2382 reader_read_process(dir_ent);
2383 continue;
2384 }
2385
2386 switch(buf->st_mode & S_IFMT) {
2387 case S_IFREG:
2388 reader_read_file(dir_ent);
2389 break;
2390 case S_IFDIR:
2391 reader_scan(dir_ent->dir);
2392 break;
2393 }
2394 }
2395 }
2396
2397
reader(void * arg)2398 void *reader(void *arg)
2399 {
2400 if(!sorted)
2401 reader_scan(queue_get(to_reader));
2402 else {
2403 int i;
2404 struct priority_entry *entry;
2405
2406 queue_get(to_reader);
2407 for(i = 65535; i >= 0; i--)
2408 for(entry = priority_list[i]; entry;
2409 entry = entry->next)
2410 reader_read_file(entry->dir);
2411 }
2412
2413 pthread_exit(NULL);
2414 }
2415
2416
writer(void * arg)2417 void *writer(void *arg)
2418 {
2419 while(1) {
2420 struct file_buffer *file_buffer = queue_get(to_writer);
2421 off_t off;
2422
2423 if(file_buffer == NULL) {
2424 queue_put(from_writer, NULL);
2425 continue;
2426 }
2427
2428 off = file_buffer->block;
2429
2430 pthread_cleanup_push((void *) pthread_mutex_unlock, &pos_mutex);
2431 pthread_mutex_lock(&pos_mutex);
2432
2433 if(lseek(fd, off, SEEK_SET) == -1) {
2434 ERROR("writer: Lseek on destination failed because "
2435 "%s, offset=0x%llx\n", strerror(errno), off);
2436 BAD_ERROR("Probably out of space on output "
2437 "%s\n", block_device ? "block device" :
2438 "filesystem");
2439 }
2440
2441 if(write_bytes(fd, file_buffer->data,
2442 file_buffer->size) == -1)
2443 BAD_ERROR("Failed to write to output %s\n",
2444 block_device ? "block device" : "filesystem");
2445
2446 pthread_cleanup_pop(1);
2447
2448 cache_block_put(file_buffer);
2449 }
2450 }
2451
2452
all_zero(struct file_buffer * file_buffer)2453 int all_zero(struct file_buffer *file_buffer)
2454 {
2455 int i;
2456 long entries = file_buffer->size / sizeof(long);
2457 long *p = (long *) file_buffer->data;
2458
2459 for(i = 0; i < entries && p[i] == 0; i++);
2460
2461 if(i == entries) {
2462 for(i = file_buffer->size & ~(sizeof(long) - 1);
2463 i < file_buffer->size && file_buffer->data[i] == 0;
2464 i++);
2465
2466 return i == file_buffer->size;
2467 }
2468
2469 return 0;
2470 }
2471
2472
deflator(void * arg)2473 void *deflator(void *arg)
2474 {
2475 struct file_buffer *write_buffer = cache_get_nohash(bwriter_buffer);
2476 void *stream = NULL;
2477 int res;
2478
2479 res = compressor_init(comp, &stream, block_size, 1);
2480 if(res)
2481 BAD_ERROR("deflator:: compressor_init failed\n");
2482
2483 while(1) {
2484 struct file_buffer *file_buffer = queue_get(to_deflate);
2485
2486 if(sparse_files && all_zero(file_buffer)) {
2487 file_buffer->c_byte = 0;
2488 seq_queue_put(to_main, file_buffer);
2489 } else {
2490 write_buffer->c_byte = mangle2(stream,
2491 write_buffer->data, file_buffer->data,
2492 file_buffer->size, block_size,
2493 file_buffer->noD, 1);
2494 write_buffer->sequence = file_buffer->sequence;
2495 write_buffer->file_size = file_buffer->file_size;
2496 write_buffer->block = file_buffer->block;
2497 write_buffer->size = SQUASHFS_COMPRESSED_SIZE_BLOCK
2498 (write_buffer->c_byte);
2499 write_buffer->fragment = FALSE;
2500 write_buffer->error = FALSE;
2501 cache_block_put(file_buffer);
2502 seq_queue_put(to_main, write_buffer);
2503 write_buffer = cache_get_nohash(bwriter_buffer);
2504 }
2505 }
2506 }
2507
2508
frag_deflator(void * arg)2509 void *frag_deflator(void *arg)
2510 {
2511 void *stream = NULL;
2512 int res;
2513
2514 res = compressor_init(comp, &stream, block_size, 1);
2515 if(res)
2516 BAD_ERROR("frag_deflator:: compressor_init failed\n");
2517
2518 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
2519
2520 while(1) {
2521 int c_byte, compressed_size;
2522 struct file_buffer *file_buffer = queue_get(to_frag);
2523 struct file_buffer *write_buffer =
2524 cache_get(fwriter_buffer, file_buffer->block);
2525
2526 c_byte = mangle2(stream, write_buffer->data, file_buffer->data,
2527 file_buffer->size, block_size, noF, 1);
2528 compressed_size = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
2529 write_buffer->size = compressed_size;
2530 pthread_mutex_lock(&fragment_mutex);
2531 if(fragments_locked == FALSE) {
2532 fragment_table[file_buffer->block].size = c_byte;
2533 fragment_table[file_buffer->block].start_block = bytes;
2534 write_buffer->block = bytes;
2535 bytes += compressed_size;
2536 fragments_outstanding --;
2537 queue_put(to_writer, write_buffer);
2538 pthread_mutex_unlock(&fragment_mutex);
2539 TRACE("Writing fragment %lld, uncompressed size %d, "
2540 "compressed size %d\n", file_buffer->block,
2541 file_buffer->size, compressed_size);
2542 } else {
2543 add_pending_fragment(write_buffer, c_byte,
2544 file_buffer->block);
2545 pthread_mutex_unlock(&fragment_mutex);
2546 }
2547 cache_block_put(file_buffer);
2548 }
2549
2550 pthread_cleanup_pop(0);
2551 }
2552
2553
get_file_buffer()2554 struct file_buffer *get_file_buffer()
2555 {
2556 struct file_buffer *file_buffer = seq_queue_get(to_main);
2557
2558 return file_buffer;
2559 }
2560
2561
write_file_empty(squashfs_inode * inode,struct dir_ent * dir_ent,struct file_buffer * file_buffer,int * duplicate_file)2562 void write_file_empty(squashfs_inode *inode, struct dir_ent *dir_ent,
2563 struct file_buffer *file_buffer, int *duplicate_file)
2564 {
2565 file_count ++;
2566 *duplicate_file = FALSE;
2567 cache_block_put(file_buffer);
2568 create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, 0, 0, 0,
2569 NULL, &empty_fragment, NULL, 0);
2570 }
2571
2572
write_file_frag(squashfs_inode * inode,struct dir_ent * dir_ent,struct file_buffer * file_buffer,int * duplicate_file)2573 void write_file_frag(squashfs_inode *inode, struct dir_ent *dir_ent,
2574 struct file_buffer *file_buffer, int *duplicate_file)
2575 {
2576 int size = file_buffer->file_size;
2577 struct fragment *fragment;
2578 unsigned short checksum = file_buffer->checksum;
2579 char dont_put;
2580
2581 fragment = frag_duplicate(file_buffer, &dont_put);
2582 *duplicate_file = !fragment;
2583 if(!fragment) {
2584 fragment = get_and_fill_fragment(file_buffer, dir_ent);
2585 if(duplicate_checking)
2586 add_non_dup(size, 0, NULL, 0, fragment, 0, checksum,
2587 TRUE, TRUE);
2588 }
2589
2590 if(dont_put)
2591 free(file_buffer);
2592 else
2593 cache_block_put(file_buffer);
2594
2595 total_bytes += size;
2596 file_count ++;
2597
2598 inc_progress_bar();
2599
2600 create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, size, 0,
2601 0, NULL, fragment, NULL, 0);
2602
2603 if(!duplicate_checking)
2604 free_fragment(fragment);
2605 }
2606
2607
write_file_process(squashfs_inode * inode,struct dir_ent * dir_ent,struct file_buffer * read_buffer,int * duplicate_file)2608 int write_file_process(squashfs_inode *inode, struct dir_ent *dir_ent,
2609 struct file_buffer *read_buffer, int *duplicate_file)
2610 {
2611 long long read_size, file_bytes, start;
2612 struct fragment *fragment;
2613 unsigned int *block_list = NULL;
2614 int block = 0, status;
2615 long long sparse = 0;
2616 struct file_buffer *fragment_buffer = NULL;
2617
2618 *duplicate_file = FALSE;
2619
2620 lock_fragments();
2621
2622 file_bytes = 0;
2623 start = bytes;
2624 while (1) {
2625 read_size = read_buffer->file_size;
2626 if(read_buffer->fragment)
2627 fragment_buffer = read_buffer;
2628 else {
2629 block_list = realloc(block_list, (block + 1) *
2630 sizeof(unsigned int));
2631 if(block_list == NULL)
2632 MEM_ERROR();
2633 block_list[block ++] = read_buffer->c_byte;
2634 if(read_buffer->c_byte) {
2635 read_buffer->block = bytes;
2636 bytes += read_buffer->size;
2637 cache_hash(read_buffer, read_buffer->block);
2638 file_bytes += read_buffer->size;
2639 queue_put(to_writer, read_buffer);
2640 } else {
2641 sparse += read_buffer->size;
2642 cache_block_put(read_buffer);
2643 }
2644 }
2645 inc_progress_bar();
2646
2647 if(read_size != -1)
2648 break;
2649
2650 read_buffer = get_file_buffer();
2651 if(read_buffer->error)
2652 goto read_err;
2653 }
2654
2655 unlock_fragments();
2656 fragment = get_and_fill_fragment(fragment_buffer, dir_ent);
2657
2658 if(duplicate_checking)
2659 add_non_dup(read_size, file_bytes, block_list, start, fragment,
2660 0, fragment_buffer ? fragment_buffer->checksum : 0,
2661 FALSE, TRUE);
2662 cache_block_put(fragment_buffer);
2663 file_count ++;
2664 total_bytes += read_size;
2665
2666 create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, read_size, start,
2667 block, block_list, fragment, NULL, sparse);
2668
2669 if(duplicate_checking == FALSE) {
2670 free(block_list);
2671 free_fragment(fragment);
2672 }
2673
2674 return 0;
2675
2676 read_err:
2677 dec_progress_bar(block);
2678 status = read_buffer->error;
2679 bytes = start;
2680 if(!block_device) {
2681 int res;
2682
2683 queue_put(to_writer, NULL);
2684 if(queue_get(from_writer) != 0)
2685 EXIT_MKSQUASHFS();
2686 res = ftruncate(fd, bytes);
2687 if(res != 0)
2688 BAD_ERROR("Failed to truncate dest file because %s\n",
2689 strerror(errno));
2690 }
2691 unlock_fragments();
2692 free(block_list);
2693 cache_block_put(read_buffer);
2694 return status;
2695 }
2696
2697
write_file_blocks_dup(squashfs_inode * inode,struct dir_ent * dir_ent,struct file_buffer * read_buffer,int * duplicate_file)2698 int write_file_blocks_dup(squashfs_inode *inode, struct dir_ent *dir_ent,
2699 struct file_buffer *read_buffer, int *duplicate_file)
2700 {
2701 int block, thresh;
2702 long long read_size = read_buffer->file_size;
2703 long long file_bytes, dup_start, start;
2704 struct fragment *fragment;
2705 struct file_info *dupl_ptr;
2706 int blocks = (read_size + block_size - 1) >> block_log;
2707 unsigned int *block_list, *block_listp;
2708 struct file_buffer **buffer_list;
2709 int status;
2710 long long sparse = 0;
2711 struct file_buffer *fragment_buffer = NULL;
2712
2713 block_list = malloc(blocks * sizeof(unsigned int));
2714 if(block_list == NULL)
2715 MEM_ERROR();
2716 block_listp = block_list;
2717
2718 buffer_list = malloc(blocks * sizeof(struct file_buffer *));
2719 if(buffer_list == NULL)
2720 MEM_ERROR();
2721
2722 lock_fragments();
2723
2724 file_bytes = 0;
2725 start = dup_start = bytes;
2726 thresh = blocks > bwriter_size ? blocks - bwriter_size : 0;
2727
2728 for(block = 0; block < blocks;) {
2729 if(read_buffer->fragment) {
2730 block_list[block] = 0;
2731 buffer_list[block] = NULL;
2732 fragment_buffer = read_buffer;
2733 blocks = read_size >> block_log;
2734 } else {
2735 block_list[block] = read_buffer->c_byte;
2736
2737 if(read_buffer->c_byte) {
2738 read_buffer->block = bytes;
2739 bytes += read_buffer->size;
2740 file_bytes += read_buffer->size;
2741 cache_hash(read_buffer, read_buffer->block);
2742 if(block < thresh) {
2743 buffer_list[block] = NULL;
2744 queue_put(to_writer, read_buffer);
2745 } else
2746 buffer_list[block] = read_buffer;
2747 } else {
2748 buffer_list[block] = NULL;
2749 sparse += read_buffer->size;
2750 cache_block_put(read_buffer);
2751 }
2752 }
2753 inc_progress_bar();
2754
2755 if(++block < blocks) {
2756 read_buffer = get_file_buffer();
2757 if(read_buffer->error)
2758 goto read_err;
2759 }
2760 }
2761
2762 dupl_ptr = duplicate(read_size, file_bytes, &block_listp, &dup_start,
2763 &fragment, fragment_buffer, blocks, 0, FALSE);
2764
2765 if(dupl_ptr) {
2766 *duplicate_file = FALSE;
2767 for(block = thresh; block < blocks; block ++)
2768 if(buffer_list[block])
2769 queue_put(to_writer, buffer_list[block]);
2770 fragment = get_and_fill_fragment(fragment_buffer, dir_ent);
2771 dupl_ptr->fragment = fragment;
2772 } else {
2773 *duplicate_file = TRUE;
2774 for(block = thresh; block < blocks; block ++)
2775 cache_block_put(buffer_list[block]);
2776 bytes = start;
2777 if(thresh && !block_device) {
2778 int res;
2779
2780 queue_put(to_writer, NULL);
2781 if(queue_get(from_writer) != 0)
2782 EXIT_MKSQUASHFS();
2783 res = ftruncate(fd, bytes);
2784 if(res != 0)
2785 BAD_ERROR("Failed to truncate dest file because"
2786 " %s\n", strerror(errno));
2787 }
2788 }
2789
2790 unlock_fragments();
2791 cache_block_put(fragment_buffer);
2792 free(buffer_list);
2793 file_count ++;
2794 total_bytes += read_size;
2795
2796 /*
2797 * sparse count is needed to ensure squashfs correctly reports a
2798 * a smaller block count on stat calls to sparse files. This is
2799 * to ensure intelligent applications like cp correctly handle the
2800 * file as a sparse file. If the file in the original filesystem isn't
2801 * stored as a sparse file then still store it sparsely in squashfs, but
2802 * report it as non-sparse on stat calls to preserve semantics
2803 */
2804 if(sparse && (dir_ent->inode->buf.st_blocks << 9) >= read_size)
2805 sparse = 0;
2806
2807 create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, read_size,
2808 dup_start, blocks, block_listp, fragment, NULL, sparse);
2809
2810 if(*duplicate_file == TRUE)
2811 free(block_list);
2812
2813 return 0;
2814
2815 read_err:
2816 dec_progress_bar(block);
2817 status = read_buffer->error;
2818 bytes = start;
2819 if(thresh && !block_device) {
2820 int res;
2821
2822 queue_put(to_writer, NULL);
2823 if(queue_get(from_writer) != 0)
2824 EXIT_MKSQUASHFS();
2825 res = ftruncate(fd, bytes);
2826 if(res != 0)
2827 BAD_ERROR("Failed to truncate dest file because %s\n",
2828 strerror(errno));
2829 }
2830 unlock_fragments();
2831 for(blocks = thresh; blocks < block; blocks ++)
2832 cache_block_put(buffer_list[blocks]);
2833 free(buffer_list);
2834 free(block_list);
2835 cache_block_put(read_buffer);
2836 return status;
2837 }
2838
2839
write_file_blocks(squashfs_inode * inode,struct dir_ent * dir_ent,struct file_buffer * read_buffer,int * dup)2840 int write_file_blocks(squashfs_inode *inode, struct dir_ent *dir_ent,
2841 struct file_buffer *read_buffer, int *dup)
2842 {
2843 long long read_size = read_buffer->file_size;
2844 long long file_bytes, start;
2845 struct fragment *fragment;
2846 unsigned int *block_list;
2847 int block, status;
2848 int blocks = (read_size + block_size - 1) >> block_log;
2849 long long sparse = 0;
2850 struct file_buffer *fragment_buffer = NULL;
2851
2852 if(pre_duplicate(read_size))
2853 return write_file_blocks_dup(inode, dir_ent, read_buffer, dup);
2854
2855 *dup = FALSE;
2856
2857 block_list = malloc(blocks * sizeof(unsigned int));
2858 if(block_list == NULL)
2859 MEM_ERROR();
2860
2861 lock_fragments();
2862
2863 file_bytes = 0;
2864 /* ANDROID CHANGES START*/
2865 #ifdef ANDROID
2866 if (align_4k_blocks && bytes % 4096) {
2867 bytes += 4096 - (bytes % 4096);
2868 }
2869 #endif
2870 /* ANDROID CHANGES END */
2871 start = bytes;
2872 for(block = 0; block < blocks;) {
2873 if(read_buffer->fragment) {
2874 block_list[block] = 0;
2875 fragment_buffer = read_buffer;
2876 blocks = read_size >> block_log;
2877 } else {
2878 block_list[block] = read_buffer->c_byte;
2879 if(read_buffer->c_byte) {
2880 read_buffer->block = bytes;
2881 bytes += read_buffer->size;
2882 cache_hash(read_buffer, read_buffer->block);
2883 file_bytes += read_buffer->size;
2884 queue_put(to_writer, read_buffer);
2885 } else {
2886 sparse += read_buffer->size;
2887 cache_block_put(read_buffer);
2888 }
2889 }
2890 inc_progress_bar();
2891
2892 if(++block < blocks) {
2893 read_buffer = get_file_buffer();
2894 if(read_buffer->error)
2895 goto read_err;
2896 }
2897 }
2898
2899 unlock_fragments();
2900 fragment = get_and_fill_fragment(fragment_buffer, dir_ent);
2901
2902 if(duplicate_checking)
2903 add_non_dup(read_size, file_bytes, block_list, start, fragment,
2904 0, fragment_buffer ? fragment_buffer->checksum : 0,
2905 FALSE, TRUE);
2906 cache_block_put(fragment_buffer);
2907 file_count ++;
2908 total_bytes += read_size;
2909
2910 /*
2911 * sparse count is needed to ensure squashfs correctly reports a
2912 * a smaller block count on stat calls to sparse files. This is
2913 * to ensure intelligent applications like cp correctly handle the
2914 * file as a sparse file. If the file in the original filesystem isn't
2915 * stored as a sparse file then still store it sparsely in squashfs, but
2916 * report it as non-sparse on stat calls to preserve semantics
2917 */
2918 if(sparse && (dir_ent->inode->buf.st_blocks << 9) >= read_size)
2919 sparse = 0;
2920
2921 create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, read_size, start,
2922 blocks, block_list, fragment, NULL, sparse);
2923
2924 if(duplicate_checking == FALSE) {
2925 free(block_list);
2926 free_fragment(fragment);
2927 }
2928
2929 return 0;
2930
2931 read_err:
2932 dec_progress_bar(block);
2933 status = read_buffer->error;
2934 bytes = start;
2935 if(!block_device) {
2936 int res;
2937
2938 queue_put(to_writer, NULL);
2939 if(queue_get(from_writer) != 0)
2940 EXIT_MKSQUASHFS();
2941 res = ftruncate(fd, bytes);
2942 if(res != 0)
2943 BAD_ERROR("Failed to truncate dest file because %s\n",
2944 strerror(errno));
2945 }
2946 unlock_fragments();
2947 free(block_list);
2948 cache_block_put(read_buffer);
2949 return status;
2950 }
2951
2952
write_file(squashfs_inode * inode,struct dir_ent * dir,int * dup)2953 void write_file(squashfs_inode *inode, struct dir_ent *dir, int *dup)
2954 {
2955 int status;
2956 struct file_buffer *read_buffer;
2957
2958 again:
2959 read_buffer = get_file_buffer();
2960 status = read_buffer->error;
2961
2962 if(status)
2963 cache_block_put(read_buffer);
2964 else if(read_buffer->file_size == -1)
2965 status = write_file_process(inode, dir, read_buffer, dup);
2966 else if(read_buffer->file_size == 0)
2967 write_file_empty(inode, dir, read_buffer, dup);
2968 else if(read_buffer->fragment && read_buffer->c_byte)
2969 write_file_frag(inode, dir, read_buffer, dup);
2970 else
2971 status = write_file_blocks(inode, dir, read_buffer, dup);
2972
2973 if(status == 2) {
2974 ERROR("File %s changed size while reading filesystem, "
2975 "attempting to re-read\n", pathname(dir));
2976 goto again;
2977 } else if(status == 1) {
2978 ERROR_START("Failed to read file %s", pathname(dir));
2979 ERROR_EXIT(", creating empty file\n");
2980 write_file_empty(inode, dir, NULL, dup);
2981 }
2982 }
2983
2984
2985 #define BUFF_SIZE 512
2986 char *name;
2987 char *basename_r();
2988
getbase(char * pathname)2989 char *getbase(char *pathname)
2990 {
2991 static char *b_buffer = NULL;
2992 static int b_size = BUFF_SIZE;
2993 char *result;
2994
2995 if(b_buffer == NULL) {
2996 b_buffer = malloc(b_size);
2997 if(b_buffer == NULL)
2998 MEM_ERROR();
2999 }
3000
3001 while(1) {
3002 if(*pathname != '/') {
3003 result = getcwd(b_buffer, b_size);
3004 if(result == NULL && errno != ERANGE)
3005 BAD_ERROR("Getcwd failed in getbase\n");
3006
3007 /* enough room for pathname + "/" + '\0' terminator? */
3008 if(result && strlen(pathname) + 2 <=
3009 b_size - strlen(b_buffer)) {
3010 strcat(strcat(b_buffer, "/"), pathname);
3011 break;
3012 }
3013 } else if(strlen(pathname) < b_size) {
3014 strcpy(b_buffer, pathname);
3015 break;
3016 }
3017
3018 /* Buffer not large enough, realloc and try again */
3019 b_buffer = realloc(b_buffer, b_size += BUFF_SIZE);
3020 if(b_buffer == NULL)
3021 MEM_ERROR();
3022 }
3023
3024 name = b_buffer;
3025 if(((result = basename_r()) == NULL) || (strcmp(result, "..") == 0))
3026 return NULL;
3027 else
3028 return result;
3029 }
3030
3031
basename_r()3032 char *basename_r()
3033 {
3034 char *s;
3035 char *p;
3036 int n = 1;
3037
3038 for(;;) {
3039 s = name;
3040 if(*name == '\0')
3041 return NULL;
3042 if(*name != '/') {
3043 while(*name != '\0' && *name != '/') name++;
3044 n = name - s;
3045 }
3046 while(*name == '/') name++;
3047 if(strncmp(s, ".", n) == 0)
3048 continue;
3049 if((*name == '\0') || (strncmp(s, "..", n) == 0) ||
3050 ((p = basename_r()) == NULL)) {
3051 s[n] = '\0';
3052 return s;
3053 }
3054 if(strcmp(p, "..") == 0)
3055 continue;
3056 return p;
3057 }
3058 }
3059
3060
lookup_inode3(struct stat * buf,int pseudo,int id,char * symlink,int bytes)3061 struct inode_info *lookup_inode3(struct stat *buf, int pseudo, int id,
3062 char *symlink, int bytes)
3063 {
3064 int ino_hash = INODE_HASH(buf->st_dev, buf->st_ino);
3065 struct inode_info *inode;
3066
3067 /*
3068 * Look-up inode in hash table, if it already exists we have a
3069 * hard-link, so increment the nlink count and return it.
3070 * Don't do the look-up for directories because we don't hard-link
3071 * directories.
3072 */
3073 if ((buf->st_mode & S_IFMT) != S_IFDIR) {
3074 for(inode = inode_info[ino_hash]; inode; inode = inode->next) {
3075 if(memcmp(buf, &inode->buf, sizeof(struct stat)) == 0) {
3076 inode->nlink ++;
3077 return inode;
3078 }
3079 }
3080 }
3081
3082 inode = malloc(sizeof(struct inode_info) + bytes);
3083 if(inode == NULL)
3084 MEM_ERROR();
3085
3086 if(bytes)
3087 memcpy(&inode->symlink, symlink, bytes);
3088 memcpy(&inode->buf, buf, sizeof(struct stat));
3089 inode->read = FALSE;
3090 inode->root_entry = FALSE;
3091 inode->pseudo_file = pseudo;
3092 inode->pseudo_id = id;
3093 inode->inode = SQUASHFS_INVALID_BLK;
3094 inode->nlink = 1;
3095 inode->inode_number = 0;
3096
3097 /*
3098 * Copy filesystem wide defaults into inode, these filesystem
3099 * wide defaults may be altered on an individual inode basis by
3100 * user specified actions
3101 *
3102 */
3103 inode->no_fragments = no_fragments;
3104 inode->always_use_fragments = always_use_fragments;
3105
3106 /* ANDROID CHANGES START*/
3107 #ifdef ANDROID
3108 /* Check the whitelist */
3109 inode->noD = whitelisted(buf);
3110 #else
3111 inode->noD = noD;
3112 #endif
3113 /* ANDROID CHANGES END */
3114
3115 inode->noF = noF;
3116
3117 inode->next = inode_info[ino_hash];
3118 inode_info[ino_hash] = inode;
3119
3120 return inode;
3121 }
3122
3123
lookup_inode2(struct stat * buf,int pseudo,int id)3124 static inline struct inode_info *lookup_inode2(struct stat *buf, int pseudo, int id)
3125 {
3126 return lookup_inode3(buf, pseudo, id, NULL, 0);
3127 }
3128
3129
lookup_inode(struct stat * buf)3130 static inline struct inode_info *lookup_inode(struct stat *buf)
3131 {
3132 return lookup_inode2(buf, 0, 0);
3133 }
3134
3135
alloc_inode_no(struct inode_info * inode,unsigned int use_this)3136 static inline void alloc_inode_no(struct inode_info *inode, unsigned int use_this)
3137 {
3138 if (inode->inode_number == 0) {
3139 inode->inode_number = use_this ? : inode_no ++;
3140 if((inode->buf.st_mode & S_IFMT) == S_IFREG)
3141 progress_bar_size((inode->buf.st_size + block_size - 1)
3142 >> block_log);
3143 }
3144 }
3145
3146
create_dir_entry(char * name,char * source_name,char * nonstandard_pathname,struct dir_info * dir)3147 static inline struct dir_ent *create_dir_entry(char *name, char *source_name,
3148 char *nonstandard_pathname, struct dir_info *dir)
3149 {
3150 struct dir_ent *dir_ent = malloc(sizeof(struct dir_ent));
3151 if(dir_ent == NULL)
3152 MEM_ERROR();
3153
3154 dir_ent->name = name;
3155 dir_ent->source_name = source_name;
3156 dir_ent->nonstandard_pathname = nonstandard_pathname;
3157 dir_ent->our_dir = dir;
3158 dir_ent->inode = NULL;
3159 dir_ent->next = NULL;
3160 /* ANDROID CHANGES START*/
3161 #ifdef ANDROID
3162 dir_ent->capabilities = 0;
3163 #endif
3164 /* ANDROID CHANGES END */
3165
3166 return dir_ent;
3167 }
3168
3169
add_dir_entry(struct dir_ent * dir_ent,struct dir_info * sub_dir,struct inode_info * inode_info)3170 static inline void add_dir_entry(struct dir_ent *dir_ent, struct dir_info *sub_dir,
3171 struct inode_info *inode_info)
3172 {
3173 struct dir_info *dir = dir_ent->our_dir;
3174
3175 if(sub_dir)
3176 sub_dir->dir_ent = dir_ent;
3177
3178 /* ANDROID CHANGES START*/
3179 #ifdef ANDROID
3180 if (android_config) {
3181 if (mount_point) {
3182 char *mounted_path;
3183 char *rel_path;
3184
3185 alloc_mounted_path(mount_point, subpathname(dir_ent), &mounted_path);
3186 rel_path = mounted_path;
3187 while (rel_path && *rel_path == '/')
3188 rel_path++;
3189 android_fs_config(fs_config_func, rel_path, &inode_info->buf, target_out_path, &dir_ent->capabilities);
3190 free(mounted_path);
3191 } else {
3192 android_fs_config(fs_config_func, pathname(dir_ent), &inode_info->buf, target_out_path, &dir_ent->capabilities);
3193 }
3194 }
3195 #endif
3196 /* ANDROID CHANGES END */
3197
3198 dir_ent->inode = inode_info;
3199 dir_ent->dir = sub_dir;
3200
3201 dir_ent->next = dir->list;
3202 dir->list = dir_ent;
3203 dir->count++;
3204 }
3205
add_dir_entry2(char * name,char * source_name,char * nonstandard_pathname,struct dir_info * sub_dir,struct inode_info * inode_info,struct dir_info * dir)3206 static inline void add_dir_entry2(char *name, char *source_name,
3207 char *nonstandard_pathname, struct dir_info *sub_dir,
3208 struct inode_info *inode_info, struct dir_info *dir)
3209 {
3210 struct dir_ent *dir_ent = create_dir_entry(name, source_name,
3211 nonstandard_pathname, dir);
3212
3213
3214 add_dir_entry(dir_ent, sub_dir, inode_info);
3215 }
3216
3217
free_dir_entry(struct dir_ent * dir_ent)3218 static inline void free_dir_entry(struct dir_ent *dir_ent)
3219 {
3220 if(dir_ent->name)
3221 free(dir_ent->name);
3222
3223 if(dir_ent->source_name)
3224 free(dir_ent->source_name);
3225
3226 if(dir_ent->nonstandard_pathname)
3227 free(dir_ent->nonstandard_pathname);
3228
3229 /* if this entry has been associated with an inode, then we need
3230 * to update the inode nlink count. Orphaned inodes are harmless, and
3231 * is easier to leave them than go to the bother of deleting them */
3232 if(dir_ent->inode && !dir_ent->inode->root_entry)
3233 dir_ent->inode->nlink --;
3234
3235 free(dir_ent);
3236 }
3237
3238
add_excluded(struct dir_info * dir)3239 static inline void add_excluded(struct dir_info *dir)
3240 {
3241 dir->excluded ++;
3242 }
3243
3244
dir_scan(squashfs_inode * inode,char * pathname,struct dir_ent * (_readdir)(struct dir_info *),int progress)3245 void dir_scan(squashfs_inode *inode, char *pathname,
3246 struct dir_ent *(_readdir)(struct dir_info *), int progress)
3247 {
3248 struct stat buf;
3249 struct dir_ent *dir_ent;
3250 /* ANDROID CHANGES START*/
3251 #ifdef ANDROID
3252 uint64_t caps = 0;
3253 #endif
3254 /* ANDROID CHANGES END */
3255
3256 root_dir = dir_scan1(pathname, "", paths, _readdir, 1);
3257 if(root_dir == NULL)
3258 return;
3259
3260 /* Create root directory dir_ent and associated inode, and connect
3261 * it to the root directory dir_info structure */
3262 dir_ent = create_dir_entry("", NULL, pathname,
3263 scan1_opendir("", "", 0));
3264
3265 if(pathname[0] == '\0') {
3266 /*
3267 * dummy top level directory, if multiple sources specified on
3268 * command line
3269 */
3270 memset(&buf, 0, sizeof(buf));
3271 buf.st_mode = S_IRWXU | S_IRWXG | S_IRWXO | S_IFDIR;
3272 buf.st_uid = getuid();
3273 buf.st_gid = getgid();
3274 buf.st_mtime = time(NULL);
3275 buf.st_dev = 0;
3276 buf.st_ino = 0;
3277 dir_ent->inode = lookup_inode2(&buf, PSEUDO_FILE_OTHER, 0);
3278 } else {
3279 if(lstat(pathname, &buf) == -1)
3280 /* source directory has disappeared? */
3281 BAD_ERROR("Cannot stat source directory %s because %s\n",
3282 pathname, strerror(errno));
3283 /* ANDROID CHANGES START*/
3284 #ifdef ANDROID
3285 buf.st_mode = S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; // root mode
3286 buf.st_uid = 0;
3287 buf.st_gid = 0;
3288 buf.st_mtime = time(NULL);
3289 buf.st_dev = 0;
3290 buf.st_ino = 0;
3291 #endif
3292 /* ANDROID CHANGES END */
3293 dir_ent->inode = lookup_inode(&buf);
3294 }
3295
3296 /* ANDROID CHANGES START*/
3297 #ifdef ANDROID
3298 dir_ent->capabilities = caps;
3299 #endif
3300 /* ANDROID CHANGES END */
3301
3302 dir_ent->dir = root_dir;
3303 root_dir->dir_ent = dir_ent;
3304
3305 /*
3306 * Process most actions and any pseudo files
3307 */
3308 if(actions() || get_pseudo())
3309 dir_scan2(root_dir, get_pseudo());
3310
3311 /*
3312 * Process move actions
3313 */
3314 if(move_actions()) {
3315 dir_scan3(root_dir);
3316 do_move_actions();
3317 }
3318
3319 /*
3320 * Process prune actions
3321 */
3322 if(prune_actions())
3323 dir_scan4(root_dir);
3324
3325 /*
3326 * Process empty actions
3327 */
3328 if(empty_actions())
3329 dir_scan5(root_dir);
3330
3331 /*
3332 * Sort directories and compute the inode numbers
3333 */
3334 dir_scan6(root_dir);
3335
3336 alloc_inode_no(dir_ent->inode, root_inode_number);
3337
3338 eval_actions(root_dir, dir_ent);
3339
3340 if(sorted)
3341 generate_file_priorities(root_dir, 0,
3342 &root_dir->dir_ent->inode->buf);
3343
3344 if(appending) {
3345 sigset_t sigmask;
3346
3347 restore_thread = init_restore_thread();
3348 sigemptyset(&sigmask);
3349 sigaddset(&sigmask, SIGINT);
3350 sigaddset(&sigmask, SIGTERM);
3351 sigaddset(&sigmask, SIGUSR1);
3352 if(pthread_sigmask(SIG_BLOCK, &sigmask, NULL) == -1)
3353 BAD_ERROR("Failed to set signal mask\n");
3354 write_destination(fd, SQUASHFS_START, 4, "\0\0\0\0");
3355 }
3356
3357 queue_put(to_reader, root_dir);
3358
3359 set_progressbar_state(progress);
3360
3361 if(sorted)
3362 sort_files_and_write(root_dir);
3363
3364 dir_scan7(inode, root_dir);
3365 dir_ent->inode->inode = *inode;
3366 dir_ent->inode->type = SQUASHFS_DIR_TYPE;
3367 }
3368
3369
3370 /*
3371 * dir_scan1 routines...
3372 * These scan the source directories into memory for processing.
3373 * Exclude actions are processed here (in contrast to the other actions)
3374 * because they affect what is scanned.
3375 */
scan1_opendir(char * pathname,char * subpath,int depth)3376 struct dir_info *scan1_opendir(char *pathname, char *subpath, int depth)
3377 {
3378 struct dir_info *dir;
3379
3380 dir = malloc(sizeof(struct dir_info));
3381 if(dir == NULL)
3382 MEM_ERROR();
3383
3384 if(pathname[0] != '\0') {
3385 dir->linuxdir = opendir(pathname);
3386 if(dir->linuxdir == NULL) {
3387 free(dir);
3388 return NULL;
3389 }
3390 }
3391
3392 dir->pathname = strdup(pathname);
3393 dir->subpath = strdup(subpath);
3394 dir->count = 0;
3395 dir->directory_count = 0;
3396 dir->dir_is_ldir = TRUE;
3397 dir->list = NULL;
3398 dir->depth = depth;
3399 dir->excluded = 0;
3400
3401 return dir;
3402 }
3403
3404
scan1_encomp_readdir(struct dir_info * dir)3405 struct dir_ent *scan1_encomp_readdir(struct dir_info *dir)
3406 {
3407 static int index = 0;
3408
3409 if(dir->count < old_root_entries) {
3410 int i;
3411
3412 for(i = 0; i < old_root_entries; i++) {
3413 if(old_root_entry[i].inode.type == SQUASHFS_DIR_TYPE)
3414 dir->directory_count ++;
3415 add_dir_entry2(old_root_entry[i].name, NULL, NULL, NULL,
3416 &old_root_entry[i].inode, dir);
3417 }
3418 }
3419
3420 while(index < source) {
3421 char *basename = NULL;
3422 char *dir_name = getbase(source_path[index]);
3423 int pass = 1, res;
3424
3425 if(dir_name == NULL) {
3426 ERROR_START("Bad source directory %s",
3427 source_path[index]);
3428 ERROR_EXIT(" - skipping ...\n");
3429 index ++;
3430 continue;
3431 }
3432 dir_name = strdup(dir_name);
3433 for(;;) {
3434 struct dir_ent *dir_ent = dir->list;
3435
3436 for(; dir_ent && strcmp(dir_ent->name, dir_name) != 0;
3437 dir_ent = dir_ent->next);
3438 if(dir_ent == NULL)
3439 break;
3440 ERROR("Source directory entry %s already used! - trying"
3441 " ", dir_name);
3442 if(pass == 1)
3443 basename = dir_name;
3444 else
3445 free(dir_name);
3446 res = asprintf(&dir_name, "%s_%d", basename, pass++);
3447 if(res == -1)
3448 BAD_ERROR("asprintf failed in "
3449 "scan1_encomp_readdir\n");
3450 ERROR("%s\n", dir_name);
3451 }
3452 return create_dir_entry(dir_name, basename,
3453 strdup(source_path[index ++]), dir);
3454 }
3455 return NULL;
3456 }
3457
3458
scan1_single_readdir(struct dir_info * dir)3459 struct dir_ent *scan1_single_readdir(struct dir_info *dir)
3460 {
3461 struct dirent *d_name;
3462 int i;
3463
3464 if(dir->count < old_root_entries) {
3465 for(i = 0; i < old_root_entries; i++) {
3466 if(old_root_entry[i].inode.type == SQUASHFS_DIR_TYPE)
3467 dir->directory_count ++;
3468 add_dir_entry2(old_root_entry[i].name, NULL, NULL, NULL,
3469 &old_root_entry[i].inode, dir);
3470 }
3471 }
3472
3473 if((d_name = readdir(dir->linuxdir)) != NULL) {
3474 char *basename = NULL;
3475 char *dir_name = strdup(d_name->d_name);
3476 int pass = 1, res;
3477
3478 for(;;) {
3479 struct dir_ent *dir_ent = dir->list;
3480
3481 for(; dir_ent && strcmp(dir_ent->name, dir_name) != 0;
3482 dir_ent = dir_ent->next);
3483 if(dir_ent == NULL)
3484 break;
3485 ERROR("Source directory entry %s already used! - trying"
3486 " ", dir_name);
3487 if (pass == 1)
3488 basename = dir_name;
3489 else
3490 free(dir_name);
3491 res = asprintf(&dir_name, "%s_%d", d_name->d_name, pass++);
3492 if(res == -1)
3493 BAD_ERROR("asprintf failed in "
3494 "scan1_single_readdir\n");
3495 ERROR("%s\n", dir_name);
3496 }
3497 return create_dir_entry(dir_name, basename, NULL, dir);
3498 }
3499
3500 return NULL;
3501 }
3502
3503
scan1_readdir(struct dir_info * dir)3504 struct dir_ent *scan1_readdir(struct dir_info *dir)
3505 {
3506 struct dirent *d_name = readdir(dir->linuxdir);
3507
3508 return d_name ?
3509 create_dir_entry(strdup(d_name->d_name), NULL, NULL, dir) :
3510 NULL;
3511 }
3512
3513
scan1_freedir(struct dir_info * dir)3514 void scan1_freedir(struct dir_info *dir)
3515 {
3516 if(dir->pathname[0] != '\0')
3517 closedir(dir->linuxdir);
3518 }
3519
3520
dir_scan1(char * filename,char * subpath,struct pathnames * paths,struct dir_ent * (_readdir)(struct dir_info *),int depth)3521 struct dir_info *dir_scan1(char *filename, char *subpath,
3522 struct pathnames *paths,
3523 struct dir_ent *(_readdir)(struct dir_info *), int depth)
3524 {
3525 struct dir_info *dir = scan1_opendir(filename, subpath, depth);
3526 struct dir_ent *dir_ent;
3527
3528 if(dir == NULL) {
3529 ERROR_START("Could not open %s", filename);
3530 ERROR_EXIT(", skipping...\n");
3531 return NULL;
3532 }
3533
3534 while((dir_ent = _readdir(dir))) {
3535 struct dir_info *sub_dir;
3536 struct stat buf;
3537 struct pathnames *new = NULL;
3538 char *filename = pathname(dir_ent);
3539 char *subpath = NULL;
3540 char *dir_name = dir_ent->name;
3541
3542 if(strcmp(dir_name, ".") == 0 || strcmp(dir_name, "..") == 0) {
3543 free_dir_entry(dir_ent);
3544 continue;
3545 }
3546
3547 if(lstat(filename, &buf) == -1) {
3548 ERROR_START("Cannot stat dir/file %s because %s",
3549 filename, strerror(errno));
3550 ERROR_EXIT(", ignoring\n");
3551 free_dir_entry(dir_ent);
3552 continue;
3553 }
3554
3555 if((buf.st_mode & S_IFMT) != S_IFREG &&
3556 (buf.st_mode & S_IFMT) != S_IFDIR &&
3557 (buf.st_mode & S_IFMT) != S_IFLNK &&
3558 (buf.st_mode & S_IFMT) != S_IFCHR &&
3559 (buf.st_mode & S_IFMT) != S_IFBLK &&
3560 (buf.st_mode & S_IFMT) != S_IFIFO &&
3561 (buf.st_mode & S_IFMT) != S_IFSOCK) {
3562 ERROR_START("File %s has unrecognised filetype %d",
3563 filename, buf.st_mode & S_IFMT);
3564 ERROR_EXIT(", ignoring\n");
3565 free_dir_entry(dir_ent);
3566 continue;
3567 }
3568
3569 if((old_exclude && old_excluded(filename, &buf)) ||
3570 (!old_exclude && excluded(dir_name, paths, &new))) {
3571 add_excluded(dir);
3572 free_dir_entry(dir_ent);
3573 continue;
3574 }
3575
3576 if(exclude_actions()) {
3577 subpath = subpathname(dir_ent);
3578
3579 if(eval_exclude_actions(dir_name, filename, subpath,
3580 &buf, depth, dir_ent)) {
3581 add_excluded(dir);
3582 free_dir_entry(dir_ent);
3583 continue;
3584 }
3585 }
3586
3587 switch(buf.st_mode & S_IFMT) {
3588 case S_IFDIR:
3589 if(subpath == NULL)
3590 subpath = subpathname(dir_ent);
3591
3592 sub_dir = dir_scan1(filename, subpath, new,
3593 scan1_readdir, depth + 1);
3594 if(sub_dir) {
3595 dir->directory_count ++;
3596 add_dir_entry(dir_ent, sub_dir,
3597 lookup_inode(&buf));
3598 } else
3599 free_dir_entry(dir_ent);
3600 break;
3601 case S_IFLNK: {
3602 int byte;
3603 static char buff[65536]; /* overflow safe */
3604
3605 byte = readlink(filename, buff, 65536);
3606 if(byte == -1) {
3607 ERROR_START("Failed to read symlink %s",
3608 filename);
3609 ERROR_EXIT(", ignoring\n");
3610 } else if(byte == 65536) {
3611 ERROR_START("Symlink %s is greater than 65536 "
3612 "bytes!", filename);
3613 ERROR_EXIT(", ignoring\n");
3614 } else {
3615 /* readlink doesn't 0 terminate the returned
3616 * path */
3617 buff[byte] = '\0';
3618 add_dir_entry(dir_ent, NULL, lookup_inode3(&buf,
3619 0, 0, buff, byte + 1));
3620 }
3621 break;
3622 }
3623 default:
3624 add_dir_entry(dir_ent, NULL, lookup_inode(&buf));
3625 }
3626
3627 free(new);
3628 }
3629
3630 scan1_freedir(dir);
3631
3632 return dir;
3633 }
3634
3635
3636 /*
3637 * dir_scan2 routines...
3638 * This processes most actions and any pseudo files
3639 */
scan2_readdir(struct dir_info * dir,struct dir_ent * dir_ent)3640 struct dir_ent *scan2_readdir(struct dir_info *dir, struct dir_ent *dir_ent)
3641 {
3642 if (dir_ent == NULL)
3643 dir_ent = dir->list;
3644 else
3645 dir_ent = dir_ent->next;
3646
3647 for(; dir_ent && dir_ent->inode->root_entry; dir_ent = dir_ent->next);
3648
3649 return dir_ent;
3650 }
3651
3652
scan2_lookup(struct dir_info * dir,char * name)3653 struct dir_ent *scan2_lookup(struct dir_info *dir, char *name)
3654 {
3655 struct dir_ent *dir_ent = dir->list;
3656
3657 for(; dir_ent && strcmp(dir_ent->name, name) != 0;
3658 dir_ent = dir_ent->next);
3659
3660 return dir_ent;
3661 }
3662
3663
dir_scan2(struct dir_info * dir,struct pseudo * pseudo)3664 void dir_scan2(struct dir_info *dir, struct pseudo *pseudo)
3665 {
3666 struct dir_ent *dir_ent = NULL;
3667 struct pseudo_entry *pseudo_ent;
3668 struct stat buf;
3669 static int pseudo_ino = 1;
3670
3671 while((dir_ent = scan2_readdir(dir, dir_ent)) != NULL) {
3672 struct inode_info *inode_info = dir_ent->inode;
3673 struct stat *buf = &inode_info->buf;
3674 char *name = dir_ent->name;
3675
3676 eval_actions(root_dir, dir_ent);
3677
3678 if((buf->st_mode & S_IFMT) == S_IFDIR)
3679 dir_scan2(dir_ent->dir, pseudo_subdir(name, pseudo));
3680 }
3681
3682 while((pseudo_ent = pseudo_readdir(pseudo)) != NULL) {
3683 dir_ent = scan2_lookup(dir, pseudo_ent->name);
3684 if(pseudo_ent->dev->type == 'm') {
3685 struct stat *buf;
3686 if(dir_ent == NULL) {
3687 ERROR_START("Pseudo modify file \"%s\" does "
3688 "not exist in source filesystem.",
3689 pseudo_ent->pathname);
3690 ERROR_EXIT(" Ignoring.\n");
3691 continue;
3692 }
3693 if(dir_ent->inode->root_entry) {
3694 ERROR_START("Pseudo modify file \"%s\" is a "
3695 "pre-existing file in the filesystem "
3696 "being appended to. It cannot be "\
3697 "modified.", pseudo_ent->pathname);
3698 ERROR_EXIT(" Ignoring.\n");
3699 continue;
3700 }
3701 buf = &dir_ent->inode->buf;
3702 buf->st_mode = (buf->st_mode & S_IFMT) |
3703 pseudo_ent->dev->mode;
3704 buf->st_uid = pseudo_ent->dev->uid;
3705 buf->st_gid = pseudo_ent->dev->gid;
3706 continue;
3707 }
3708
3709 if(dir_ent) {
3710 if(dir_ent->inode->root_entry) {
3711 ERROR_START("Pseudo file \"%s\" is a "
3712 "pre-existing file in the filesystem "
3713 "being appended to.",
3714 pseudo_ent->pathname);
3715 ERROR_EXIT(" Ignoring.\n");
3716 } else {
3717 ERROR_START("Pseudo file \"%s\" exists in "
3718 "source filesystem \"%s\".",
3719 pseudo_ent->pathname,
3720 pathname(dir_ent));
3721 ERROR_EXIT("\nIgnoring, exclude it (-e/-ef) to "
3722 "override.\n");
3723 }
3724 continue;
3725 }
3726
3727 memset(&buf, 0, sizeof(buf));
3728 buf.st_mode = pseudo_ent->dev->mode;
3729 buf.st_uid = pseudo_ent->dev->uid;
3730 buf.st_gid = pseudo_ent->dev->gid;
3731 buf.st_rdev = makedev(pseudo_ent->dev->major,
3732 pseudo_ent->dev->minor);
3733 buf.st_mtime = time(NULL);
3734 buf.st_ino = pseudo_ino ++;
3735
3736 if(pseudo_ent->dev->type == 'd') {
3737 struct dir_ent *dir_ent =
3738 create_dir_entry(pseudo_ent->name, NULL,
3739 pseudo_ent->pathname, dir);
3740 char *subpath = strdup(subpathname(dir_ent));
3741 struct dir_info *sub_dir = scan1_opendir("", subpath,
3742 dir->depth + 1);
3743 if(sub_dir == NULL) {
3744 ERROR_START("Could not create pseudo directory "
3745 "\"%s\"", pseudo_ent->pathname);
3746 ERROR_EXIT(", skipping...\n");
3747 free(subpath);
3748 pseudo_ino --;
3749 continue;
3750 }
3751 dir_scan2(sub_dir, pseudo_ent->pseudo);
3752 dir->directory_count ++;
3753 add_dir_entry(dir_ent, sub_dir,
3754 lookup_inode2(&buf, PSEUDO_FILE_OTHER, 0));
3755 } else if(pseudo_ent->dev->type == 'f') {
3756 add_dir_entry2(pseudo_ent->name, NULL,
3757 pseudo_ent->pathname, NULL,
3758 lookup_inode2(&buf, PSEUDO_FILE_PROCESS,
3759 pseudo_ent->dev->pseudo_id), dir);
3760 } else {
3761 add_dir_entry2(pseudo_ent->name, NULL,
3762 pseudo_ent->pathname, NULL,
3763 lookup_inode2(&buf, PSEUDO_FILE_OTHER, 0), dir);
3764 }
3765 }
3766 }
3767
3768
3769 /*
3770 * dir_scan3 routines...
3771 * This processes the move action
3772 */
dir_scan3(struct dir_info * dir)3773 void dir_scan3(struct dir_info *dir)
3774 {
3775 struct dir_ent *dir_ent = NULL;
3776
3777 while((dir_ent = scan2_readdir(dir, dir_ent)) != NULL) {
3778
3779 eval_move_actions(root_dir, dir_ent);
3780
3781 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR)
3782 dir_scan3(dir_ent->dir);
3783 }
3784 }
3785
3786
3787 /*
3788 * dir_scan4 routines...
3789 * This processes the prune action. This action is designed to do fine
3790 * grained tuning of the in-core directory structure after the exclude,
3791 * move and pseudo actions have been performed. This allows complex
3792 * tests to be performed which are impossible at exclude time (i.e.
3793 * tests which rely on the in-core directory structure)
3794 */
free_dir(struct dir_info * dir)3795 void free_dir(struct dir_info *dir)
3796 {
3797 struct dir_ent *dir_ent = dir->list;
3798
3799 while(dir_ent) {
3800 struct dir_ent *tmp = dir_ent;
3801
3802 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR)
3803 free_dir(dir_ent->dir);
3804
3805 dir_ent = dir_ent->next;
3806 free_dir_entry(tmp);
3807 }
3808
3809 free(dir->pathname);
3810 free(dir->subpath);
3811 free(dir);
3812 }
3813
3814
dir_scan4(struct dir_info * dir)3815 void dir_scan4(struct dir_info *dir)
3816 {
3817 struct dir_ent *dir_ent = dir->list, *prev = NULL;
3818
3819 while(dir_ent) {
3820 if(dir_ent->inode->root_entry) {
3821 prev = dir_ent;
3822 dir_ent = dir_ent->next;
3823 continue;
3824 }
3825
3826 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR)
3827 dir_scan4(dir_ent->dir);
3828
3829 if(eval_prune_actions(root_dir, dir_ent)) {
3830 struct dir_ent *tmp = dir_ent;
3831
3832 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR) {
3833 free_dir(dir_ent->dir);
3834 dir->directory_count --;
3835 }
3836
3837 dir->count --;
3838
3839 /* remove dir_ent from list */
3840 dir_ent = dir_ent->next;
3841 if(prev)
3842 prev->next = dir_ent;
3843 else
3844 dir->list = dir_ent;
3845
3846 /* free it */
3847 free_dir_entry(tmp);
3848
3849 add_excluded(dir);
3850 continue;
3851 }
3852
3853 prev = dir_ent;
3854 dir_ent = dir_ent->next;
3855 }
3856 }
3857
3858
3859 /*
3860 * dir_scan5 routines...
3861 * This processes the empty action. This action has to be processed after
3862 * all other actions because the previous exclude and move actions and the
3863 * pseudo actions affect whether a directory is empty
3864 */
dir_scan5(struct dir_info * dir)3865 void dir_scan5(struct dir_info *dir)
3866 {
3867 struct dir_ent *dir_ent = dir->list, *prev = NULL;
3868
3869 while(dir_ent) {
3870 if(dir_ent->inode->root_entry) {
3871 prev = dir_ent;
3872 dir_ent = dir_ent->next;
3873 continue;
3874 }
3875
3876 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR) {
3877 dir_scan5(dir_ent->dir);
3878
3879 if(eval_empty_actions(root_dir, dir_ent)) {
3880 struct dir_ent *tmp = dir_ent;
3881
3882 /*
3883 * delete sub-directory, this is by definition
3884 * empty
3885 */
3886 free(dir_ent->dir->pathname);
3887 free(dir_ent->dir->subpath);
3888 free(dir_ent->dir);
3889
3890 /* remove dir_ent from list */
3891 dir_ent = dir_ent->next;
3892 if(prev)
3893 prev->next = dir_ent;
3894 else
3895 dir->list = dir_ent;
3896
3897 /* free it */
3898 free_dir_entry(tmp);
3899
3900 /* update counts */
3901 dir->directory_count --;
3902 dir->count --;
3903 add_excluded(dir);
3904 continue;
3905 }
3906 }
3907
3908 prev = dir_ent;
3909 dir_ent = dir_ent->next;
3910 }
3911 }
3912
3913
3914 /*
3915 * dir_scan6 routines...
3916 * This sorts every directory and computes the inode numbers
3917 */
3918
3919 /*
3920 * Bottom up linked list merge sort.
3921 *
3922 * Qsort and other O(n log n) algorithms work well with arrays but not
3923 * linked lists. Merge sort another O(n log n) sort algorithm on the other hand
3924 * is not ideal for arrays (as it needs an additonal n storage locations
3925 * as sorting is not done in place), but it is ideal for linked lists because
3926 * it doesn't require any extra storage,
3927 */
sort_directory(struct dir_info * dir)3928 void sort_directory(struct dir_info *dir)
3929 {
3930 struct dir_ent *cur, *l1, *l2, *next;
3931 int len1, len2, stride = 1;
3932
3933 if(dir->list == NULL || dir->count < 2)
3934 return;
3935
3936 /*
3937 * We can consider our linked-list to be made up of stride length
3938 * sublists. Eacn iteration around this loop merges adjacent
3939 * stride length sublists into larger 2*stride sublists. We stop
3940 * when stride becomes equal to the entire list.
3941 *
3942 * Initially stride = 1 (by definition a sublist of 1 is sorted), and
3943 * these 1 element sublists are merged into 2 element sublists, which
3944 * are then merged into 4 element sublists and so on.
3945 */
3946 do {
3947 l2 = dir->list; /* head of current linked list */
3948 cur = NULL; /* empty output list */
3949
3950 /*
3951 * Iterate through the linked list, merging adjacent sublists.
3952 * On each interation l2 points to the next sublist pair to be
3953 * merged (if there's only one sublist left this is simply added
3954 * to the output list)
3955 */
3956 while(l2) {
3957 l1 = l2;
3958 for(len1 = 0; l2 && len1 < stride; len1 ++, l2 = l2->next);
3959 len2 = stride;
3960
3961 /*
3962 * l1 points to first sublist.
3963 * l2 points to second sublist.
3964 * Merge them onto the output list
3965 */
3966 while(len1 && l2 && len2) {
3967 if(strcmp(l1->name, l2->name) <= 0) {
3968 next = l1;
3969 l1 = l1->next;
3970 len1 --;
3971 } else {
3972 next = l2;
3973 l2 = l2->next;
3974 len2 --;
3975 }
3976
3977 if(cur) {
3978 cur->next = next;
3979 cur = next;
3980 } else
3981 dir->list = cur = next;
3982 }
3983 /*
3984 * One sublist is now empty, copy the other one onto the
3985 * output list
3986 */
3987 for(; len1; len1 --, l1 = l1->next) {
3988 if(cur) {
3989 cur->next = l1;
3990 cur = l1;
3991 } else
3992 dir->list = cur = l1;
3993 }
3994 for(; l2 && len2; len2 --, l2 = l2->next) {
3995 if(cur) {
3996 cur->next = l2;
3997 cur = l2;
3998 } else
3999 dir->list = cur = l2;
4000 }
4001 }
4002 cur->next = NULL;
4003 stride = stride << 1;
4004 } while(stride < dir->count);
4005 }
4006
4007
dir_scan6(struct dir_info * dir)4008 void dir_scan6(struct dir_info *dir)
4009 {
4010 struct dir_ent *dir_ent;
4011 unsigned int byte_count = 0;
4012
4013 sort_directory(dir);
4014
4015 for(dir_ent = dir->list; dir_ent; dir_ent = dir_ent->next) {
4016 byte_count += strlen(dir_ent->name) +
4017 sizeof(struct squashfs_dir_entry);
4018
4019 if(dir_ent->inode->root_entry)
4020 continue;
4021
4022 alloc_inode_no(dir_ent->inode, 0);
4023
4024 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR)
4025 dir_scan6(dir_ent->dir);
4026 }
4027
4028 if((dir->count < 257 && byte_count < SQUASHFS_METADATA_SIZE))
4029 dir->dir_is_ldir = FALSE;
4030 }
4031
4032
4033 /*
4034 * dir_scan6 routines...
4035 * This generates the filesystem metadata and writes it out to the destination
4036 */
scan7_init_dir(struct directory * dir)4037 void scan7_init_dir(struct directory *dir)
4038 {
4039 dir->buff = malloc(SQUASHFS_METADATA_SIZE);
4040 if(dir->buff == NULL)
4041 MEM_ERROR();
4042
4043 dir->size = SQUASHFS_METADATA_SIZE;
4044 dir->p = dir->index_count_p = dir->buff;
4045 dir->entry_count = 256;
4046 dir->entry_count_p = NULL;
4047 dir->index = NULL;
4048 dir->i_count = dir->i_size = 0;
4049 }
4050
4051
scan7_readdir(struct directory * dir,struct dir_info * dir_info,struct dir_ent * dir_ent)4052 struct dir_ent *scan7_readdir(struct directory *dir, struct dir_info *dir_info,
4053 struct dir_ent *dir_ent)
4054 {
4055 if (dir_ent == NULL)
4056 dir_ent = dir_info->list;
4057 else
4058 dir_ent = dir_ent->next;
4059
4060 for(; dir_ent && dir_ent->inode->root_entry; dir_ent = dir_ent->next)
4061 add_dir(dir_ent->inode->inode, dir_ent->inode->inode_number,
4062 dir_ent->name, dir_ent->inode->type, dir);
4063
4064 return dir_ent;
4065 }
4066
4067
scan7_freedir(struct directory * dir)4068 void scan7_freedir(struct directory *dir)
4069 {
4070 if(dir->index)
4071 free(dir->index);
4072 free(dir->buff);
4073 }
4074
4075
dir_scan7(squashfs_inode * inode,struct dir_info * dir_info)4076 void dir_scan7(squashfs_inode *inode, struct dir_info *dir_info)
4077 {
4078 int squashfs_type;
4079 int duplicate_file;
4080 struct directory dir;
4081 struct dir_ent *dir_ent = NULL;
4082
4083 scan7_init_dir(&dir);
4084
4085 while((dir_ent = scan7_readdir(&dir, dir_info, dir_ent)) != NULL) {
4086 struct stat *buf = &dir_ent->inode->buf;
4087
4088 update_info(dir_ent);
4089
4090 if(dir_ent->inode->inode == SQUASHFS_INVALID_BLK) {
4091 switch(buf->st_mode & S_IFMT) {
4092 case S_IFREG:
4093 squashfs_type = SQUASHFS_FILE_TYPE;
4094 write_file(inode, dir_ent,
4095 &duplicate_file);
4096 INFO("file %s, uncompressed size %lld "
4097 "bytes %s\n",
4098 subpathname(dir_ent),
4099 (long long) buf->st_size,
4100 duplicate_file ? "DUPLICATE" :
4101 "");
4102 break;
4103
4104 case S_IFDIR:
4105 squashfs_type = SQUASHFS_DIR_TYPE;
4106 dir_scan7(inode, dir_ent->dir);
4107 break;
4108
4109 case S_IFLNK:
4110 squashfs_type = SQUASHFS_SYMLINK_TYPE;
4111 create_inode(inode, NULL, dir_ent,
4112 squashfs_type, 0, 0, 0, NULL,
4113 NULL, NULL, 0);
4114 INFO("symbolic link %s inode 0x%llx\n",
4115 subpathname(dir_ent), *inode);
4116 sym_count ++;
4117 break;
4118
4119 case S_IFCHR:
4120 squashfs_type = SQUASHFS_CHRDEV_TYPE;
4121 create_inode(inode, NULL, dir_ent,
4122 squashfs_type, 0, 0, 0, NULL,
4123 NULL, NULL, 0);
4124 INFO("character device %s inode 0x%llx"
4125 "\n", subpathname(dir_ent),
4126 *inode);
4127 dev_count ++;
4128 break;
4129
4130 case S_IFBLK:
4131 squashfs_type = SQUASHFS_BLKDEV_TYPE;
4132 create_inode(inode, NULL, dir_ent,
4133 squashfs_type, 0, 0, 0, NULL,
4134 NULL, NULL, 0);
4135 INFO("block device %s inode 0x%llx\n",
4136 subpathname(dir_ent), *inode);
4137 dev_count ++;
4138 break;
4139
4140 case S_IFIFO:
4141 squashfs_type = SQUASHFS_FIFO_TYPE;
4142 create_inode(inode, NULL, dir_ent,
4143 squashfs_type, 0, 0, 0, NULL,
4144 NULL, NULL, 0);
4145 INFO("fifo %s inode 0x%llx\n",
4146 subpathname(dir_ent), *inode);
4147 fifo_count ++;
4148 break;
4149
4150 case S_IFSOCK:
4151 squashfs_type = SQUASHFS_SOCKET_TYPE;
4152 create_inode(inode, NULL, dir_ent,
4153 squashfs_type, 0, 0, 0, NULL,
4154 NULL, NULL, 0);
4155 INFO("unix domain socket %s inode "
4156 "0x%llx\n",
4157 subpathname(dir_ent), *inode);
4158 sock_count ++;
4159 break;
4160
4161 default:
4162 BAD_ERROR("%s unrecognised file type, "
4163 "mode is %x\n",
4164 subpathname(dir_ent),
4165 buf->st_mode);
4166 }
4167 dir_ent->inode->inode = *inode;
4168 dir_ent->inode->type = squashfs_type;
4169 } else {
4170 *inode = dir_ent->inode->inode;
4171 squashfs_type = dir_ent->inode->type;
4172 switch(squashfs_type) {
4173 case SQUASHFS_FILE_TYPE:
4174 if(!sorted)
4175 INFO("file %s, uncompressed "
4176 "size %lld bytes LINK"
4177 "\n",
4178 subpathname(dir_ent),
4179 (long long)
4180 buf->st_size);
4181 break;
4182 case SQUASHFS_SYMLINK_TYPE:
4183 INFO("symbolic link %s inode 0x%llx "
4184 "LINK\n", subpathname(dir_ent),
4185 *inode);
4186 break;
4187 case SQUASHFS_CHRDEV_TYPE:
4188 INFO("character device %s inode 0x%llx "
4189 "LINK\n", subpathname(dir_ent),
4190 *inode);
4191 break;
4192 case SQUASHFS_BLKDEV_TYPE:
4193 INFO("block device %s inode 0x%llx "
4194 "LINK\n", subpathname(dir_ent),
4195 *inode);
4196 break;
4197 case SQUASHFS_FIFO_TYPE:
4198 INFO("fifo %s inode 0x%llx LINK\n",
4199 subpathname(dir_ent), *inode);
4200 break;
4201 case SQUASHFS_SOCKET_TYPE:
4202 INFO("unix domain socket %s inode "
4203 "0x%llx LINK\n",
4204 subpathname(dir_ent), *inode);
4205 break;
4206 }
4207 }
4208
4209 add_dir(*inode, get_inode_no(dir_ent->inode), dir_ent->name,
4210 squashfs_type, &dir);
4211 }
4212
4213 write_dir(inode, dir_info, &dir);
4214 INFO("directory %s inode 0x%llx\n", subpathname(dir_info->dir_ent),
4215 *inode);
4216
4217 scan7_freedir(&dir);
4218 }
4219
4220
slog(unsigned int block)4221 unsigned int slog(unsigned int block)
4222 {
4223 int i;
4224
4225 for(i = 12; i <= 20; i++)
4226 if(block == (1 << i))
4227 return i;
4228 return 0;
4229 }
4230
4231
old_excluded(char * filename,struct stat * buf)4232 int old_excluded(char *filename, struct stat *buf)
4233 {
4234 int i;
4235
4236 for(i = 0; i < exclude; i++)
4237 if((exclude_paths[i].st_dev == buf->st_dev) &&
4238 (exclude_paths[i].st_ino == buf->st_ino))
4239 return TRUE;
4240 return FALSE;
4241 }
4242
4243
4244 #define ADD_ENTRY(buf) \
4245 if(exclude % EXCLUDE_SIZE == 0) { \
4246 exclude_paths = realloc(exclude_paths, (exclude + EXCLUDE_SIZE) \
4247 * sizeof(struct exclude_info)); \
4248 if(exclude_paths == NULL) \
4249 MEM_ERROR(); \
4250 } \
4251 exclude_paths[exclude].st_dev = buf.st_dev; \
4252 exclude_paths[exclude++].st_ino = buf.st_ino;
old_add_exclude(char * path)4253 int old_add_exclude(char *path)
4254 {
4255 int i;
4256 char *filename;
4257 struct stat buf;
4258
4259 if(path[0] == '/' || strncmp(path, "./", 2) == 0 ||
4260 strncmp(path, "../", 3) == 0) {
4261 if(lstat(path, &buf) == -1) {
4262 ERROR_START("Cannot stat exclude dir/file %s because "
4263 "%s", path, strerror(errno));
4264 ERROR_EXIT(", ignoring\n");
4265 return TRUE;
4266 }
4267 ADD_ENTRY(buf);
4268 return TRUE;
4269 }
4270
4271 for(i = 0; i < source; i++) {
4272 int res = asprintf(&filename, "%s/%s", source_path[i], path);
4273 if(res == -1)
4274 BAD_ERROR("asprintf failed in old_add_exclude\n");
4275 if(lstat(filename, &buf) == -1) {
4276 if(!(errno == ENOENT || errno == ENOTDIR)) {
4277 ERROR_START("Cannot stat exclude dir/file %s "
4278 "because %s", filename, strerror(errno));
4279 ERROR_EXIT(", ignoring\n");
4280 }
4281 free(filename);
4282 continue;
4283 }
4284 free(filename);
4285 ADD_ENTRY(buf);
4286 }
4287 return TRUE;
4288 }
4289
4290
add_old_root_entry(char * name,squashfs_inode inode,int inode_number,int type)4291 void add_old_root_entry(char *name, squashfs_inode inode, int inode_number,
4292 int type)
4293 {
4294 old_root_entry = realloc(old_root_entry,
4295 sizeof(struct old_root_entry_info) * (old_root_entries + 1));
4296 if(old_root_entry == NULL)
4297 MEM_ERROR();
4298
4299 old_root_entry[old_root_entries].name = strdup(name);
4300 old_root_entry[old_root_entries].inode.inode = inode;
4301 old_root_entry[old_root_entries].inode.inode_number = inode_number;
4302 old_root_entry[old_root_entries].inode.type = type;
4303 old_root_entry[old_root_entries++].inode.root_entry = TRUE;
4304 }
4305
4306
initialise_threads(int readq,int fragq,int bwriteq,int fwriteq,int freelst,char * destination_file)4307 void initialise_threads(int readq, int fragq, int bwriteq, int fwriteq,
4308 int freelst, char *destination_file)
4309 {
4310 int i;
4311 sigset_t sigmask, old_mask;
4312 int total_mem = readq;
4313 int reader_size;
4314 int fragment_size;
4315 int fwriter_size;
4316 /*
4317 * bwriter_size is global because it is needed in
4318 * write_file_blocks_dup()
4319 */
4320
4321 /*
4322 * Never allow the total size of the queues to be larger than
4323 * physical memory
4324 *
4325 * When adding together the possibly user supplied values, make
4326 * sure they've not been deliberately contrived to overflow an int
4327 */
4328 if(add_overflow(total_mem, fragq))
4329 BAD_ERROR("Queue sizes rediculously too large\n");
4330 total_mem += fragq;
4331 if(add_overflow(total_mem, bwriteq))
4332 BAD_ERROR("Queue sizes rediculously too large\n");
4333 total_mem += bwriteq;
4334 if(add_overflow(total_mem, fwriteq))
4335 BAD_ERROR("Queue sizes rediculously too large\n");
4336 total_mem += fwriteq;
4337
4338 check_usable_phys_mem(total_mem);
4339
4340 /*
4341 * convert from queue size in Mbytes to queue size in
4342 * blocks.
4343 *
4344 * This isn't going to overflow an int unless there exists
4345 * systems with more than 8 Petabytes of RAM!
4346 */
4347 reader_size = readq << (20 - block_log);
4348 fragment_size = fragq << (20 - block_log);
4349 bwriter_size = bwriteq << (20 - block_log);
4350 fwriter_size = fwriteq << (20 - block_log);
4351
4352 /*
4353 * setup signal handlers for the main thread, these cleanup
4354 * deleting the destination file, if appending the
4355 * handlers for SIGTERM and SIGINT will be replaced with handlers
4356 * allowing the user to press ^C twice to restore the existing
4357 * filesystem.
4358 *
4359 * SIGUSR1 is an internal signal, which is used by the sub-threads
4360 * to tell the main thread to terminate, deleting the destination file,
4361 * or if necessary restoring the filesystem on appending
4362 */
4363 signal(SIGTERM, sighandler);
4364 signal(SIGINT, sighandler);
4365 signal(SIGUSR1, sighandler);
4366
4367 /* block SIGQUIT and SIGHUP, these are handled by the info thread */
4368 sigemptyset(&sigmask);
4369 sigaddset(&sigmask, SIGQUIT);
4370 sigaddset(&sigmask, SIGHUP);
4371 sigaddset(&sigmask, SIGALRM);
4372 if(pthread_sigmask(SIG_BLOCK, &sigmask, NULL) == -1)
4373 BAD_ERROR("Failed to set signal mask in intialise_threads\n");
4374
4375 /*
4376 * temporarily block these signals, so the created sub-threads
4377 * will ignore them, ensuring the main thread handles them
4378 */
4379 sigemptyset(&sigmask);
4380 sigaddset(&sigmask, SIGINT);
4381 sigaddset(&sigmask, SIGTERM);
4382 sigaddset(&sigmask, SIGUSR1);
4383 if(pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask) == -1)
4384 BAD_ERROR("Failed to set signal mask in intialise_threads\n");
4385
4386 if(processors == -1) {
4387 #ifndef linux
4388 int mib[2];
4389 size_t len = sizeof(processors);
4390
4391 mib[0] = CTL_HW;
4392 #ifdef HW_AVAILCPU
4393 mib[1] = HW_AVAILCPU;
4394 #else
4395 mib[1] = HW_NCPU;
4396 #endif
4397
4398 if(sysctl(mib, 2, &processors, &len, NULL, 0) == -1) {
4399 ERROR_START("Failed to get number of available "
4400 "processors.");
4401 ERROR_EXIT(" Defaulting to 1\n");
4402 processors = 1;
4403 }
4404 #else
4405 processors = sysconf(_SC_NPROCESSORS_ONLN);
4406 #endif
4407 }
4408
4409 if(multiply_overflow(processors, 3) ||
4410 multiply_overflow(processors * 3, sizeof(pthread_t)))
4411 BAD_ERROR("Processors too large\n");
4412
4413 deflator_thread = malloc(processors * 3 * sizeof(pthread_t));
4414 if(deflator_thread == NULL)
4415 MEM_ERROR();
4416
4417 frag_deflator_thread = &deflator_thread[processors];
4418 frag_thread = &frag_deflator_thread[processors];
4419
4420 to_reader = queue_init(1);
4421 to_deflate = queue_init(reader_size);
4422 to_process_frag = queue_init(reader_size);
4423 to_writer = queue_init(bwriter_size + fwriter_size);
4424 from_writer = queue_init(1);
4425 to_frag = queue_init(fragment_size);
4426 locked_fragment = queue_init(fragment_size);
4427 to_main = seq_queue_init();
4428 reader_buffer = cache_init(block_size, reader_size, 0, 0);
4429 bwriter_buffer = cache_init(block_size, bwriter_size, 1, freelst);
4430 fwriter_buffer = cache_init(block_size, fwriter_size, 1, freelst);
4431 fragment_buffer = cache_init(block_size, fragment_size, 1, 0);
4432 reserve_cache = cache_init(block_size, processors + 1, 1, 0);
4433 pthread_create(&reader_thread, NULL, reader, NULL);
4434 pthread_create(&writer_thread, NULL, writer, NULL);
4435 init_progress_bar();
4436 init_info();
4437
4438 for(i = 0; i < processors; i++) {
4439 if(pthread_create(&deflator_thread[i], NULL, deflator, NULL))
4440 BAD_ERROR("Failed to create thread\n");
4441 if(pthread_create(&frag_deflator_thread[i], NULL, frag_deflator,
4442 NULL) != 0)
4443 BAD_ERROR("Failed to create thread\n");
4444 if(pthread_create(&frag_thread[i], NULL, frag_thrd,
4445 (void *) destination_file) != 0)
4446 BAD_ERROR("Failed to create thread\n");
4447 }
4448
4449 main_thread = pthread_self();
4450
4451 printf("Parallel mksquashfs: Using %d processor%s\n", processors,
4452 processors == 1 ? "" : "s");
4453
4454 /* Restore the signal mask for the main thread */
4455 if(pthread_sigmask(SIG_SETMASK, &old_mask, NULL) == -1)
4456 BAD_ERROR("Failed to set signal mask in intialise_threads\n");
4457 }
4458
4459
write_inode_lookup_table()4460 long long write_inode_lookup_table()
4461 {
4462 int i, inode_number, lookup_bytes = SQUASHFS_LOOKUP_BYTES(inode_count);
4463 void *it;
4464
4465 if(inode_count == sinode_count)
4466 goto skip_inode_hash_table;
4467
4468 it = realloc(inode_lookup_table, lookup_bytes);
4469 if(it == NULL)
4470 MEM_ERROR();
4471 inode_lookup_table = it;
4472
4473 for(i = 0; i < INODE_HASH_SIZE; i ++) {
4474 struct inode_info *inode;
4475
4476 for(inode = inode_info[i]; inode; inode = inode->next) {
4477
4478 inode_number = get_inode_no(inode);
4479
4480 /* The empty action will produce orphaned inode
4481 * entries in the inode_info[] table. These
4482 * entries because they are orphaned will not be
4483 * allocated an inode number in dir_scan5(), so
4484 * skip any entries with the default dummy inode
4485 * number of 0 */
4486 if(inode_number == 0)
4487 continue;
4488
4489 SQUASHFS_SWAP_LONG_LONGS(&inode->inode,
4490 &inode_lookup_table[inode_number - 1], 1);
4491
4492 }
4493 }
4494
4495 skip_inode_hash_table:
4496 return generic_write_table(lookup_bytes, inode_lookup_table, 0, NULL,
4497 noI);
4498 }
4499
4500
get_component(char * target,char ** targname)4501 char *get_component(char *target, char **targname)
4502 {
4503 char *start;
4504
4505 while(*target == '/')
4506 target ++;
4507
4508 start = target;
4509 while(*target != '/' && *target != '\0')
4510 target ++;
4511
4512 *targname = strndup(start, target - start);
4513
4514 while(*target == '/')
4515 target ++;
4516
4517 return target;
4518 }
4519
4520
free_path(struct pathname * paths)4521 void free_path(struct pathname *paths)
4522 {
4523 int i;
4524
4525 for(i = 0; i < paths->names; i++) {
4526 if(paths->name[i].paths)
4527 free_path(paths->name[i].paths);
4528 free(paths->name[i].name);
4529 if(paths->name[i].preg) {
4530 regfree(paths->name[i].preg);
4531 free(paths->name[i].preg);
4532 }
4533 }
4534
4535 free(paths);
4536 }
4537
4538
add_path(struct pathname * paths,char * target,char * alltarget)4539 struct pathname *add_path(struct pathname *paths, char *target, char *alltarget)
4540 {
4541 char *targname;
4542 int i, error;
4543
4544 target = get_component(target, &targname);
4545
4546 if(paths == NULL) {
4547 paths = malloc(sizeof(struct pathname));
4548 if(paths == NULL)
4549 MEM_ERROR();
4550
4551 paths->names = 0;
4552 paths->name = NULL;
4553 }
4554
4555 for(i = 0; i < paths->names; i++)
4556 if(strcmp(paths->name[i].name, targname) == 0)
4557 break;
4558
4559 if(i == paths->names) {
4560 /* allocate new name entry */
4561 paths->names ++;
4562 paths->name = realloc(paths->name, (i + 1) *
4563 sizeof(struct path_entry));
4564 if(paths->name == NULL)
4565 MEM_ERROR();
4566 paths->name[i].name = targname;
4567 paths->name[i].paths = NULL;
4568 if(use_regex) {
4569 paths->name[i].preg = malloc(sizeof(regex_t));
4570 if(paths->name[i].preg == NULL)
4571 MEM_ERROR();
4572 error = regcomp(paths->name[i].preg, targname,
4573 REG_EXTENDED|REG_NOSUB);
4574 if(error) {
4575 char str[1024]; /* overflow safe */
4576
4577 regerror(error, paths->name[i].preg, str, 1024);
4578 BAD_ERROR("invalid regex %s in export %s, "
4579 "because %s\n", targname, alltarget,
4580 str);
4581 }
4582 } else
4583 paths->name[i].preg = NULL;
4584
4585 if(target[0] == '\0')
4586 /* at leaf pathname component */
4587 paths->name[i].paths = NULL;
4588 else
4589 /* recurse adding child components */
4590 paths->name[i].paths = add_path(NULL, target,
4591 alltarget);
4592 } else {
4593 /* existing matching entry */
4594 free(targname);
4595
4596 if(paths->name[i].paths == NULL) {
4597 /* No sub-directory which means this is the leaf
4598 * component of a pre-existing exclude which subsumes
4599 * the exclude currently being added, in which case stop
4600 * adding components */
4601 } else if(target[0] == '\0') {
4602 /* at leaf pathname component and child components exist
4603 * from more specific excludes, delete as they're
4604 * subsumed by this exclude */
4605 free_path(paths->name[i].paths);
4606 paths->name[i].paths = NULL;
4607 } else
4608 /* recurse adding child components */
4609 add_path(paths->name[i].paths, target, alltarget);
4610 }
4611
4612 return paths;
4613 }
4614
4615
add_exclude(char * target)4616 void add_exclude(char *target)
4617 {
4618
4619 if(target[0] == '/' || strncmp(target, "./", 2) == 0 ||
4620 strncmp(target, "../", 3) == 0)
4621 BAD_ERROR("/, ./ and ../ prefixed excludes not supported with "
4622 "-wildcards or -regex options\n");
4623 else if(strncmp(target, "... ", 4) == 0)
4624 stickypath = add_path(stickypath, target + 4, target + 4);
4625 else
4626 path = add_path(path, target, target);
4627 }
4628
4629
display_path(int depth,struct pathname * paths)4630 void display_path(int depth, struct pathname *paths)
4631 {
4632 int i, n;
4633
4634 if(paths == NULL)
4635 return;
4636
4637 for(i = 0; i < paths->names; i++) {
4638 for(n = 0; n < depth; n++)
4639 printf("\t");
4640 printf("%d: %s\n", depth, paths->name[i].name);
4641 display_path(depth + 1, paths->name[i].paths);
4642 }
4643 }
4644
4645
display_path2(struct pathname * paths,char * string)4646 void display_path2(struct pathname *paths, char *string)
4647 {
4648 int i;
4649 char *path;
4650
4651 if(paths == NULL) {
4652 printf("%s\n", string);
4653 return;
4654 }
4655
4656 for(i = 0; i < paths->names; i++) {
4657 int res = asprintf(&path, "%s/%s", string, paths->name[i].name);
4658 if(res == -1)
4659 BAD_ERROR("asprintf failed in display_path2\n");
4660 display_path2(paths->name[i].paths, path);
4661 free(path);
4662 }
4663 }
4664
4665
add_subdir(struct pathnames * paths,struct pathname * path)4666 struct pathnames *add_subdir(struct pathnames *paths, struct pathname *path)
4667 {
4668 int count = paths == NULL ? 0 : paths->count;
4669
4670 if(count % PATHS_ALLOC_SIZE == 0) {
4671 paths = realloc(paths, sizeof(struct pathnames) +
4672 (count + PATHS_ALLOC_SIZE) * sizeof(struct pathname *));
4673 if(paths == NULL)
4674 MEM_ERROR();
4675 }
4676
4677 paths->path[count] = path;
4678 paths->count = count + 1;
4679 return paths;
4680 }
4681
4682
excluded_match(char * name,struct pathname * path,struct pathnames ** new)4683 int excluded_match(char *name, struct pathname *path, struct pathnames **new)
4684 {
4685 int i;
4686
4687 for(i = 0; i < path->names; i++) {
4688 int match = use_regex ?
4689 regexec(path->name[i].preg, name, (size_t) 0,
4690 NULL, 0) == 0 :
4691 fnmatch(path->name[i].name, name,
4692 FNM_PATHNAME|FNM_PERIOD|FNM_EXTMATCH) == 0;
4693
4694 if(match) {
4695 if(path->name[i].paths == NULL || new == NULL)
4696 /* match on a leaf component, any subdirectories
4697 * in the filesystem should be excluded */
4698 return TRUE;
4699 else
4700 /* match on a non-leaf component, add any
4701 * subdirectories to the new set of
4702 * subdirectories to scan for this name */
4703 *new = add_subdir(*new, path->name[i].paths);
4704 }
4705 }
4706
4707 return FALSE;
4708 }
4709
4710
excluded(char * name,struct pathnames * paths,struct pathnames ** new)4711 int excluded(char *name, struct pathnames *paths, struct pathnames **new)
4712 {
4713 int n;
4714
4715 if(stickypath && excluded_match(name, stickypath, NULL))
4716 return TRUE;
4717
4718 for(n = 0; paths && n < paths->count; n++) {
4719 int res = excluded_match(name, paths->path[n], new);
4720 if(res) {
4721 free(*new);
4722 *new = NULL;
4723 return TRUE;
4724 }
4725 }
4726
4727 /*
4728 * Either:
4729 * - no matching names found, return empty new search set, or
4730 * - one or more matches with sub-directories found (no leaf matches),
4731 * in which case return new search set.
4732 *
4733 * In either case return FALSE as we don't want to exclude this entry
4734 */
4735 return FALSE;
4736 }
4737
4738
process_exclude_file(char * argv)4739 void process_exclude_file(char *argv)
4740 {
4741 FILE *fd;
4742 char buffer[MAX_LINE + 1]; /* overflow safe */
4743 char *filename;
4744
4745 fd = fopen(argv, "r");
4746 if(fd == NULL)
4747 BAD_ERROR("Failed to open exclude file \"%s\" because %s\n",
4748 argv, strerror(errno));
4749
4750 while(fgets(filename = buffer, MAX_LINE + 1, fd) != NULL) {
4751 int len = strlen(filename);
4752
4753 if(len == MAX_LINE && filename[len - 1] != '\n')
4754 /* line too large */
4755 BAD_ERROR("Line too long when reading "
4756 "exclude file \"%s\", larger than %d "
4757 "bytes\n", argv, MAX_LINE);
4758
4759 /*
4760 * Remove '\n' terminator if it exists (the last line
4761 * in the file may not be '\n' terminated)
4762 */
4763 if(len && filename[len - 1] == '\n')
4764 filename[len - 1] = '\0';
4765
4766 /* Skip any leading whitespace */
4767 while(isspace(*filename))
4768 filename ++;
4769
4770 /* if comment line, skip */
4771 if(*filename == '#')
4772 continue;
4773
4774 /*
4775 * check for initial backslash, to accommodate
4776 * filenames with leading space or leading # character
4777 */
4778 if(*filename == '\\')
4779 filename ++;
4780
4781 /* if line is now empty after skipping characters, skip it */
4782 if(*filename == '\0')
4783 continue;
4784
4785 if(old_exclude)
4786 old_add_exclude(filename);
4787 else
4788 add_exclude(filename);
4789 }
4790
4791 if(ferror(fd))
4792 BAD_ERROR("Reading exclude file \"%s\" failed because %s\n",
4793 argv, strerror(errno));
4794
4795 fclose(fd);
4796 }
4797
4798 /* ANDROID CHANGES START*/
4799 #ifdef ANDROID
4800 /*
4801 * Return TRUE (don't compress) if the (regular) file is in the
4802 * whitelist. Else return the Global noD value.
4803 *
4804 * Note : These functions are lifted 100% from the existing exclude
4805 * file code. For maintainability, I've kept this code separate from
4806 * the exclude code instead of having common code for both paths.
4807 */
4808 static int
whitelisted(struct stat * buf)4809 whitelisted(struct stat *buf)
4810 {
4811 int i;
4812
4813 /*
4814 * only regular files in the whitelist
4815 */
4816 if (!S_ISREG(buf->st_mode))
4817 return noD;
4818 for (i = 0; i < whitelist; i++) {
4819 if ((whitelist_paths[i].st_dev == buf->st_dev) &&
4820 (whitelist_paths[i].st_ino == buf->st_ino)) {
4821 /* Don't compress */
4822 whitelisted_count++;
4823 return TRUE;
4824 }
4825 }
4826 return noD;
4827 }
4828
4829 static void
add_whitelist_entry(char * filename,struct stat * buf)4830 add_whitelist_entry(char *filename, struct stat *buf)
4831 {
4832 if (!S_ISREG(buf->st_mode)) {
4833 BAD_ERROR("Cannot whitelist %s only regular files can be whitelisted",
4834 filename);
4835 }
4836 if (whitelist % WHITELIST_SIZE == 0) {
4837 whitelist_paths = realloc(whitelist_paths,
4838 (whitelist + WHITELIST_SIZE)
4839 * sizeof(struct whitelist_info));
4840 if (whitelist_paths == NULL)
4841 MEM_ERROR();
4842 }
4843 whitelist_paths[whitelist].st_dev = buf->st_dev;
4844 whitelist_paths[whitelist++].st_ino = buf->st_ino;
4845 }
4846
4847 static int
add_whitelist(char * path)4848 add_whitelist(char *path)
4849 {
4850 int i;
4851 char *filename;
4852 struct stat buf;
4853
4854 /* Absolute of (filesystem) relative path */
4855 if (path[0] == '/' || strncmp(path, "./", 2) == 0 ||
4856 strncmp(path, "../", 3) == 0) {
4857 if(lstat(path, &buf) == -1) {
4858 BAD_ERROR("Cannot stat whitelist dir/file %s because "
4859 "%s", path, strerror(errno));
4860 }
4861 add_whitelist_entry(path, &buf);
4862 return TRUE;
4863 }
4864
4865 /* pathname relative to mksquashfs source dirs */
4866 for(i = 0; i < source; i++) {
4867 int res = asprintf(&filename, "%s/%s", source_path[i], path);
4868 if(res == -1)
4869 BAD_ERROR("asprintf failed in add_whitelist\n");
4870 if(lstat(filename, &buf) == -1) {
4871 if(!(errno == ENOENT || errno == ENOTDIR)) {
4872 BAD_ERROR("Cannot stat whitelist dir/file %s "
4873 "because %s", filename, strerror(errno));
4874 }
4875 free(filename);
4876 continue;
4877 }
4878 add_whitelist_entry(filename, &buf);
4879 free(filename);
4880 }
4881 return TRUE;
4882 }
4883
4884 static void
process_whitelist_file(char * argv)4885 process_whitelist_file(char *argv)
4886 {
4887 FILE *fd;
4888 char buffer[MAX_LINE + 1]; /* overflow safe */
4889 char *filename;
4890
4891 fd = fopen(argv, "r");
4892 if(fd == NULL)
4893 BAD_ERROR("Failed to open whitelist file \"%s\" because %s\n",
4894 argv, strerror(errno));
4895
4896 while(fgets(filename = buffer, MAX_LINE + 1, fd) != NULL) {
4897 int len = strlen(filename);
4898
4899 if(len == MAX_LINE && filename[len - 1] != '\n')
4900 /* line too large */
4901 BAD_ERROR("Line too long when reading "
4902 "whitelist file \"%s\", larger than %d "
4903 "bytes\n", argv, MAX_LINE);
4904
4905 /*
4906 * Remove '\n' terminator if it exists (the last line
4907 * in the file may not be '\n' terminated)
4908 */
4909 if(len && filename[len - 1] == '\n')
4910 filename[len - 1] = '\0';
4911
4912 /* Skip any leading whitespace */
4913 while(isspace(*filename))
4914 filename ++;
4915
4916 /* if comment line, skip */
4917 if(*filename == '#')
4918 continue;
4919
4920 /*
4921 * check for initial backslash, to accommodate
4922 * filenames with leading space or leading # character
4923 */
4924 if(*filename == '\\')
4925 filename ++;
4926
4927 /* if line is now empty after skipping characters, skip it */
4928 if(*filename == '\0')
4929 continue;
4930
4931 add_whitelist(filename);
4932 }
4933
4934 if(ferror(fd))
4935 BAD_ERROR("Reading whitelist file \"%s\" failed because %s\n",
4936 argv, strerror(errno));
4937
4938 fclose(fd);
4939 }
4940 #endif
4941 /* ANDROID CHANGES END */
4942
4943 #define RECOVER_ID "Squashfs recovery file v1.0\n"
4944 #define RECOVER_ID_SIZE 28
4945
write_recovery_data(struct squashfs_super_block * sBlk)4946 void write_recovery_data(struct squashfs_super_block *sBlk)
4947 {
4948 int res, recoverfd, bytes = sBlk->bytes_used - sBlk->inode_table_start;
4949 pid_t pid = getpid();
4950 char *metadata;
4951 char header[] = RECOVER_ID;
4952
4953 if(recover == FALSE) {
4954 printf("No recovery data option specified.\n");
4955 printf("Skipping saving recovery file.\n\n");
4956 return;
4957 }
4958
4959 metadata = malloc(bytes);
4960 if(metadata == NULL)
4961 MEM_ERROR();
4962
4963 res = read_fs_bytes(fd, sBlk->inode_table_start, bytes, metadata);
4964 if(res == 0) {
4965 ERROR("Failed to read append filesystem metadata\n");
4966 BAD_ERROR("Filesystem corrupted?\n");
4967 }
4968
4969 res = asprintf(&recovery_file, "squashfs_recovery_%s_%d",
4970 getbase(destination_file), pid);
4971 if(res == -1)
4972 MEM_ERROR();
4973
4974 recoverfd = open(recovery_file, O_CREAT | O_TRUNC | O_RDWR, S_IRWXU);
4975 if(recoverfd == -1)
4976 BAD_ERROR("Failed to create recovery file, because %s. "
4977 "Aborting\n", strerror(errno));
4978
4979 if(write_bytes(recoverfd, header, RECOVER_ID_SIZE) == -1)
4980 BAD_ERROR("Failed to write recovery file, because %s\n",
4981 strerror(errno));
4982
4983 if(write_bytes(recoverfd, sBlk, sizeof(struct squashfs_super_block)) == -1)
4984 BAD_ERROR("Failed to write recovery file, because %s\n",
4985 strerror(errno));
4986
4987 if(write_bytes(recoverfd, metadata, bytes) == -1)
4988 BAD_ERROR("Failed to write recovery file, because %s\n",
4989 strerror(errno));
4990
4991 close(recoverfd);
4992 free(metadata);
4993
4994 printf("Recovery file \"%s\" written\n", recovery_file);
4995 printf("If Mksquashfs aborts abnormally (i.e. power failure), run\n");
4996 printf("mksquashfs dummy %s -recover %s\n", destination_file,
4997 recovery_file);
4998 printf("to restore filesystem\n\n");
4999 }
5000
5001
read_recovery_data(char * recovery_file,char * destination_file)5002 void read_recovery_data(char *recovery_file, char *destination_file)
5003 {
5004 int fd, recoverfd, bytes;
5005 struct squashfs_super_block orig_sBlk, sBlk;
5006 char *metadata;
5007 int res;
5008 struct stat buf;
5009 char header[] = RECOVER_ID;
5010 char header2[RECOVER_ID_SIZE];
5011
5012 recoverfd = open(recovery_file, O_RDONLY);
5013 if(recoverfd == -1)
5014 BAD_ERROR("Failed to open recovery file because %s\n",
5015 strerror(errno));
5016
5017 if(stat(destination_file, &buf) == -1)
5018 BAD_ERROR("Failed to stat destination file, because %s\n",
5019 strerror(errno));
5020
5021 fd = open(destination_file, O_RDWR);
5022 if(fd == -1)
5023 BAD_ERROR("Failed to open destination file because %s\n",
5024 strerror(errno));
5025
5026 res = read_bytes(recoverfd, header2, RECOVER_ID_SIZE);
5027 if(res == -1)
5028 BAD_ERROR("Failed to read recovery file, because %s\n",
5029 strerror(errno));
5030 if(res < RECOVER_ID_SIZE)
5031 BAD_ERROR("Recovery file appears to be truncated\n");
5032 if(strncmp(header, header2, RECOVER_ID_SIZE) !=0 )
5033 BAD_ERROR("Not a recovery file\n");
5034
5035 res = read_bytes(recoverfd, &sBlk, sizeof(struct squashfs_super_block));
5036 if(res == -1)
5037 BAD_ERROR("Failed to read recovery file, because %s\n",
5038 strerror(errno));
5039 if(res < sizeof(struct squashfs_super_block))
5040 BAD_ERROR("Recovery file appears to be truncated\n");
5041
5042 res = read_fs_bytes(fd, 0, sizeof(struct squashfs_super_block), &orig_sBlk);
5043 if(res == 0) {
5044 ERROR("Failed to read superblock from output filesystem\n");
5045 BAD_ERROR("Output filesystem is empty!\n");
5046 }
5047
5048 if(memcmp(((char *) &sBlk) + 4, ((char *) &orig_sBlk) + 4,
5049 sizeof(struct squashfs_super_block) - 4) != 0)
5050 BAD_ERROR("Recovery file and destination file do not seem to "
5051 "match\n");
5052
5053 bytes = sBlk.bytes_used - sBlk.inode_table_start;
5054
5055 metadata = malloc(bytes);
5056 if(metadata == NULL)
5057 MEM_ERROR();
5058
5059 res = read_bytes(recoverfd, metadata, bytes);
5060 if(res == -1)
5061 BAD_ERROR("Failed to read recovery file, because %s\n",
5062 strerror(errno));
5063 if(res < bytes)
5064 BAD_ERROR("Recovery file appears to be truncated\n");
5065
5066 write_destination(fd, 0, sizeof(struct squashfs_super_block), &sBlk);
5067
5068 write_destination(fd, sBlk.inode_table_start, bytes, metadata);
5069
5070 close(recoverfd);
5071 close(fd);
5072
5073 printf("Successfully wrote recovery file \"%s\". Exiting\n",
5074 recovery_file);
5075
5076 exit(0);
5077 }
5078
5079
write_filesystem_tables(struct squashfs_super_block * sBlk,int nopad)5080 void write_filesystem_tables(struct squashfs_super_block *sBlk, int nopad)
5081 {
5082 int i;
5083
5084 sBlk->fragments = fragments;
5085 sBlk->no_ids = id_count;
5086 sBlk->inode_table_start = write_inodes();
5087 sBlk->directory_table_start = write_directories();
5088 sBlk->fragment_table_start = write_fragment_table();
5089 sBlk->lookup_table_start = exportable ? write_inode_lookup_table() :
5090 SQUASHFS_INVALID_BLK;
5091 sBlk->id_table_start = write_id_table();
5092 sBlk->xattr_id_table_start = write_xattrs();
5093
5094 TRACE("sBlk->inode_table_start 0x%llx\n", sBlk->inode_table_start);
5095 TRACE("sBlk->directory_table_start 0x%llx\n",
5096 sBlk->directory_table_start);
5097 TRACE("sBlk->fragment_table_start 0x%llx\n", sBlk->fragment_table_start);
5098 if(exportable)
5099 TRACE("sBlk->lookup_table_start 0x%llx\n",
5100 sBlk->lookup_table_start);
5101
5102 sBlk->bytes_used = bytes;
5103
5104 sBlk->compression = comp->id;
5105
5106 SQUASHFS_INSWAP_SUPER_BLOCK(sBlk);
5107 write_destination(fd, SQUASHFS_START, sizeof(*sBlk), sBlk);
5108
5109 if(!nopad && (i = bytes & (4096 - 1))) {
5110 char temp[4096] = {0};
5111 write_destination(fd, bytes, 4096 - i, temp);
5112 }
5113
5114 close(fd);
5115
5116 if(recovery_file)
5117 unlink(recovery_file);
5118
5119 total_bytes += total_inode_bytes + total_directory_bytes +
5120 sizeof(struct squashfs_super_block) + total_xattr_bytes;
5121
5122 printf("\n%sSquashfs %d.%d filesystem, %s compressed, data block size"
5123 " %d\n", exportable ? "Exportable " : "", SQUASHFS_MAJOR,
5124 SQUASHFS_MINOR, comp->name, block_size);
5125 printf("\t%s data, %s metadata, %s fragments, %s xattrs\n",
5126 noD ? "uncompressed" : "compressed", noI ? "uncompressed" :
5127 "compressed", no_fragments ? "no" : noF ? "uncompressed" :
5128 "compressed", no_xattrs ? "no" : noX ? "uncompressed" :
5129 "compressed");
5130 printf("\tduplicates are %sremoved\n", duplicate_checking ? "" :
5131 "not ");
5132 printf("Filesystem size %.2f Kbytes (%.2f Mbytes)\n", bytes / 1024.0,
5133 bytes / (1024.0 * 1024.0));
5134 printf("\t%.2f%% of uncompressed filesystem size (%.2f Kbytes)\n",
5135 ((float) bytes / total_bytes) * 100.0, total_bytes / 1024.0);
5136 printf("Inode table size %d bytes (%.2f Kbytes)\n",
5137 inode_bytes, inode_bytes / 1024.0);
5138 printf("\t%.2f%% of uncompressed inode table size (%d bytes)\n",
5139 ((float) inode_bytes / total_inode_bytes) * 100.0,
5140 total_inode_bytes);
5141 printf("Directory table size %d bytes (%.2f Kbytes)\n",
5142 directory_bytes, directory_bytes / 1024.0);
5143 printf("\t%.2f%% of uncompressed directory table size (%d bytes)\n",
5144 ((float) directory_bytes / total_directory_bytes) * 100.0,
5145 total_directory_bytes);
5146 if(total_xattr_bytes) {
5147 printf("Xattr table size %d bytes (%.2f Kbytes)\n",
5148 xattr_bytes, xattr_bytes / 1024.0);
5149 printf("\t%.2f%% of uncompressed xattr table size (%d bytes)\n",
5150 ((float) xattr_bytes / total_xattr_bytes) * 100.0,
5151 total_xattr_bytes);
5152 }
5153 if(duplicate_checking)
5154 printf("Number of duplicate files found %d\n", file_count -
5155 dup_files);
5156 else
5157 printf("No duplicate files removed\n");
5158 printf("Number of inodes %d\n", inode_count);
5159 printf("Number of files %d\n", file_count);
5160 if(!no_fragments)
5161 printf("Number of fragments %d\n", fragments);
5162 printf("Number of symbolic links %d\n", sym_count);
5163 printf("Number of device nodes %d\n", dev_count);
5164 printf("Number of fifo nodes %d\n", fifo_count);
5165 printf("Number of socket nodes %d\n", sock_count);
5166 printf("Number of directories %d\n", dir_count);
5167 printf("Number of ids (unique uids + gids) %d\n", id_count);
5168 printf("Number of uids %d\n", uid_count);
5169
5170 for(i = 0; i < id_count; i++) {
5171 if(id_table[i]->flags & ISA_UID) {
5172 struct passwd *user = getpwuid(id_table[i]->id);
5173 printf("\t%s (%d)\n", user == NULL ? "unknown" :
5174 user->pw_name, id_table[i]->id);
5175 }
5176 }
5177
5178 printf("Number of gids %d\n", guid_count);
5179
5180 for(i = 0; i < id_count; i++) {
5181 if(id_table[i]->flags & ISA_GID) {
5182 struct group *group = getgrgid(id_table[i]->id);
5183 printf("\t%s (%d)\n", group == NULL ? "unknown" :
5184 group->gr_name, id_table[i]->id);
5185 }
5186 }
5187
5188 printf("Number of whitelisted (uncompressed) files %d\n",
5189 whitelisted_count);
5190 }
5191
5192
parse_numberll(char * start,long long * res,int size)5193 int parse_numberll(char *start, long long *res, int size)
5194 {
5195 char *end;
5196 long long number;
5197
5198 errno = 0; /* To distinguish success/failure after call */
5199
5200 number = strtoll(start, &end, 10);
5201
5202 /*
5203 * check for strtoll underflow or overflow in conversion, and other
5204 * errors.
5205 */
5206 if((errno == ERANGE && (number == LLONG_MIN || number == LLONG_MAX)) ||
5207 (errno != 0 && number == 0))
5208 return 0;
5209
5210 /* reject negative numbers as invalid */
5211 if(number < 0)
5212 return 0;
5213
5214 if(size) {
5215 /*
5216 * Check for multiplier and trailing junk.
5217 * But first check that a number exists before the
5218 * multiplier
5219 */
5220 if(end == start)
5221 return 0;
5222
5223 switch(end[0]) {
5224 case 'g':
5225 case 'G':
5226 if(multiply_overflowll(number, 1073741824))
5227 return 0;
5228 number *= 1073741824;
5229
5230 if(end[1] != '\0')
5231 /* trailing junk after multiplier, but
5232 * allow it to be "bytes" */
5233 if(strcmp(end + 1, "bytes"))
5234 return 0;
5235
5236 break;
5237 case 'm':
5238 case 'M':
5239 if(multiply_overflowll(number, 1048576))
5240 return 0;
5241 number *= 1048576;
5242
5243 if(end[1] != '\0')
5244 /* trailing junk after multiplier, but
5245 * allow it to be "bytes" */
5246 if(strcmp(end + 1, "bytes"))
5247 return 0;
5248
5249 break;
5250 case 'k':
5251 case 'K':
5252 if(multiply_overflowll(number, 1024))
5253 return 0;
5254 number *= 1024;
5255
5256 if(end[1] != '\0')
5257 /* trailing junk after multiplier, but
5258 * allow it to be "bytes" */
5259 if(strcmp(end + 1, "bytes"))
5260 return 0;
5261
5262 break;
5263 case '\0':
5264 break;
5265 default:
5266 /* trailing junk after number */
5267 return 0;
5268 }
5269 } else if(end[0] != '\0')
5270 /* trailing junk after number */
5271 return 0;
5272
5273 *res = number;
5274 return 1;
5275 }
5276
5277
parse_number(char * start,int * res,int size)5278 int parse_number(char *start, int *res, int size)
5279 {
5280 long long number;
5281
5282 if(!parse_numberll(start, &number, size))
5283 return 0;
5284
5285 /* check if long result will overflow signed int */
5286 if(number > INT_MAX)
5287 return 0;
5288
5289 *res = (int) number;
5290 return 1;
5291 }
5292
5293
parse_num(char * arg,int * res)5294 int parse_num(char *arg, int *res)
5295 {
5296 return parse_number(arg, res, 0);
5297 }
5298
5299
get_physical_memory()5300 int get_physical_memory()
5301 {
5302 int phys_mem;
5303 #ifndef linux
5304 #ifdef HW_MEMSIZE
5305 #define SYSCTL_PHYSMEM HW_MEMSIZE
5306 #elif defined(HW_PHYSMEM64)
5307 #define SYSCTL_PHYSMEM HW_PHYSMEM64
5308 #else
5309 #define SYSCTL_PHYSMEM HW_PHYSMEM
5310 #endif
5311
5312 int mib[2];
5313 uint64_t sysctl_physmem = 0;
5314 size_t sysctl_len = sizeof(sysctl_physmem);
5315
5316 mib[0] = CTL_HW;
5317 mib[1] = SYSCTL_PHYSMEM;
5318
5319 if(sysctl(mib, 2, &sysctl_physmem, &sysctl_len, NULL, 0) == 0) {
5320 /* some systems use 32-bit values, work with what we're given */
5321 if (sysctl_len == 4)
5322 sysctl_physmem = *(uint32_t*)&sysctl_physmem;
5323 phys_mem = sysctl_physmem >> 20;
5324 } else {
5325 ERROR_START("Failed to get amount of available "
5326 "memory.");
5327 ERROR_EXIT(" Defaulting to least viable amount\n");
5328 phys_mem = SQUASHFS_LOWMEM;
5329 }
5330 #undef SYSCTL_PHYSMEM
5331 #else
5332 /* Long longs are used here because with PAE, a 32-bit
5333 machine can have more than 4GB of physical memory */
5334
5335 long long num_pages = sysconf(_SC_PHYS_PAGES);
5336 long long page_size = sysconf(_SC_PAGESIZE);
5337 phys_mem = num_pages * page_size >> 20;
5338 if(num_pages == -1 || page_size == -1)
5339 return 0;
5340
5341 #endif
5342
5343 if(phys_mem < SQUASHFS_LOWMEM)
5344 BAD_ERROR("Mksquashfs requires more physical memory than is "
5345 "available!\n");
5346
5347 return phys_mem;
5348 }
5349
5350
check_usable_phys_mem(int total_mem)5351 void check_usable_phys_mem(int total_mem)
5352 {
5353 /*
5354 * We want to allow users to use as much of their physical
5355 * memory as they wish. However, for practical reasons there are
5356 * limits which need to be imposed, to protect users from themselves
5357 * and to prevent people from using Mksquashfs as a DOS attack by using
5358 * all physical memory. Mksquashfs uses memory to cache data from disk
5359 * to optimise performance. It is pointless to ask it to use more
5360 * than 75% of physical memory, as this causes thrashing and it is thus
5361 * self-defeating.
5362 */
5363 int mem = get_physical_memory();
5364
5365 mem = (mem >> 1) + (mem >> 2); /* 75% */
5366
5367 if(total_mem > mem && mem) {
5368 ERROR("Total memory requested is more than 75%% of physical "
5369 "memory.\n");
5370 ERROR("Mksquashfs uses memory to cache data from disk to "
5371 "optimise performance.\n");
5372 ERROR("It is pointless to ask it to use more than this amount "
5373 "of memory, as this\n");
5374 ERROR("causes thrashing and it is thus self-defeating.\n");
5375 BAD_ERROR("Requested memory size too large\n");
5376 }
5377
5378 if(sizeof(void *) == 4 && total_mem > 2048) {
5379 /*
5380 * If we're running on a kernel with PAE or on a 64-bit kernel,
5381 * then the 75% physical memory limit can still easily exceed
5382 * the addressable memory by this process.
5383 *
5384 * Due to the typical kernel/user-space split (1GB/3GB, or
5385 * 2GB/2GB), we have to conservatively assume the 32-bit
5386 * processes can only address 2-3GB. So refuse if the user
5387 * tries to allocate more than 2GB.
5388 */
5389 ERROR("Total memory requested may exceed maximum "
5390 "addressable memory by this process\n");
5391 BAD_ERROR("Requested memory size too large\n");
5392 }
5393 }
5394
5395
get_default_phys_mem()5396 int get_default_phys_mem()
5397 {
5398 /*
5399 * get_physical_memory() relies on /proc being mounted.
5400 * If it fails, issue a warning, and use
5401 * SQUASHFS_LOWMEM / SQUASHFS_TAKE as default,
5402 * and allow a larger value to be set with -mem.
5403 */
5404 int mem = get_physical_memory();
5405
5406 if(mem == 0) {
5407 mem = SQUASHFS_LOWMEM / SQUASHFS_TAKE;
5408
5409 ERROR("Warning: Cannot get size of physical memory, probably "
5410 "because /proc is missing.\n");
5411 ERROR("Warning: Defaulting to minimal use of %d Mbytes, use "
5412 "-mem to set a better value,\n", mem);
5413 ERROR("Warning: or fix /proc.\n");
5414 } else
5415 mem /= SQUASHFS_TAKE;
5416
5417 if(sizeof(void *) == 4 && mem > 640) {
5418 /*
5419 * If we're running on a kernel with PAE or on a 64-bit kernel,
5420 * the default memory usage can exceed the addressable
5421 * memory by this process.
5422 * Due to the typical kernel/user-space split (1GB/3GB, or
5423 * 2GB/2GB), we have to conservatively assume the 32-bit
5424 * processes can only address 2-3GB. So limit the default
5425 * usage to 640M, which gives room for other data.
5426 */
5427 mem = 640;
5428 }
5429
5430 return mem;
5431 }
5432
5433
calculate_queue_sizes(int mem,int * readq,int * fragq,int * bwriteq,int * fwriteq)5434 void calculate_queue_sizes(int mem, int *readq, int *fragq, int *bwriteq,
5435 int *fwriteq)
5436 {
5437 *readq = mem / SQUASHFS_READQ_MEM;
5438 *bwriteq = mem / SQUASHFS_BWRITEQ_MEM;
5439 *fwriteq = mem / SQUASHFS_FWRITEQ_MEM;
5440 *fragq = mem - *readq - *bwriteq - *fwriteq;
5441 }
5442
5443
5444 #define VERSION() \
5445 printf("mksquashfs version 4.3-git (2014/09/12)\n");\
5446 printf("copyright (C) 2014 Phillip Lougher "\
5447 "<phillip@squashfs.org.uk>\n\n"); \
5448 printf("This program is free software; you can redistribute it and/or"\
5449 "\n");\
5450 printf("modify it under the terms of the GNU General Public License"\
5451 "\n");\
5452 printf("as published by the Free Software Foundation; either version "\
5453 "2,\n");\
5454 printf("or (at your option) any later version.\n\n");\
5455 printf("This program is distributed in the hope that it will be "\
5456 "useful,\n");\
5457 printf("but WITHOUT ANY WARRANTY; without even the implied warranty "\
5458 "of\n");\
5459 printf("MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the"\
5460 "\n");\
5461 printf("GNU General Public License for more details.\n");
main(int argc,char * argv[])5462 int main(int argc, char *argv[])
5463 {
5464 struct stat buf, source_buf;
5465 int res, i;
5466 char *b, *root_name = NULL;
5467 int keep_as_directory = FALSE;
5468 squashfs_inode inode;
5469 int readq;
5470 int fragq;
5471 int bwriteq;
5472 int fwriteq;
5473 int total_mem = get_default_phys_mem();
5474 int progress = TRUE;
5475 int force_progress = FALSE;
5476 struct file_buffer **fragment = NULL;
5477 /* ANDROID CHANGES START*/
5478 #ifdef ANDROID
5479 const char *fs_config_file = NULL;
5480 #endif
5481 /* ANDROID CHANGES END */
5482
5483 if(argc > 1 && strcmp(argv[1], "-version") == 0) {
5484 VERSION();
5485 exit(0);
5486 }
5487
5488 block_log = slog(block_size);
5489 calculate_queue_sizes(total_mem, &readq, &fragq, &bwriteq, &fwriteq);
5490
5491 for(i = 1; i < argc && argv[i][0] != '-'; i++);
5492 if(i < 3)
5493 goto printOptions;
5494 source_path = argv + 1;
5495 source = i - 2;
5496
5497 /*
5498 * Scan the command line for -comp xxx option, this is to ensure
5499 * any -X compressor specific options are passed to the
5500 * correct compressor
5501 */
5502 for(; i < argc; i++) {
5503 struct compressor *prev_comp = comp;
5504
5505 if(strcmp(argv[i], "-comp") == 0) {
5506 if(++i == argc) {
5507 ERROR("%s: -comp missing compression type\n",
5508 argv[0]);
5509 exit(1);
5510 }
5511 comp = lookup_compressor(argv[i]);
5512 if(!comp->supported) {
5513 ERROR("%s: Compressor \"%s\" is not supported!"
5514 "\n", argv[0], argv[i]);
5515 ERROR("%s: Compressors available:\n", argv[0]);
5516 display_compressors("", COMP_DEFAULT);
5517 exit(1);
5518 }
5519 if(prev_comp != NULL && prev_comp != comp) {
5520 ERROR("%s: -comp multiple conflicting -comp"
5521 " options specified on command line"
5522 ", previously %s, now %s\n", argv[0],
5523 prev_comp->name, comp->name);
5524 exit(1);
5525 }
5526 compressor_opt_parsed = 1;
5527
5528 } else if(strcmp(argv[i], "-e") == 0)
5529 break;
5530 else if(strcmp(argv[i], "-root-becomes") == 0 ||
5531 strcmp(argv[i], "-ef") == 0 ||
5532 strcmp(argv[i], "-pf") == 0 ||
5533 strcmp(argv[i], "-vaf") == 0 ||
5534 strcmp(argv[i], "-comp") == 0)
5535 i++;
5536 }
5537
5538 /*
5539 * if no -comp option specified lookup default compressor. Note the
5540 * Makefile ensures the default compressor has been built, and so we
5541 * don't need to to check for failure here
5542 */
5543 if(comp == NULL)
5544 comp = lookup_compressor(COMP_DEFAULT);
5545
5546 for(i = source + 2; i < argc; i++) {
5547 if(strcmp(argv[i], "-action") == 0 ||
5548 strcmp(argv[i], "-a") ==0) {
5549 if(++i == argc) {
5550 ERROR("%s: %s missing action\n",
5551 argv[0], argv[i - 1]);
5552 exit(1);
5553 }
5554 res = parse_action(argv[i], ACTION_LOG_NONE);
5555 if(res == 0)
5556 exit(1);
5557
5558 } else if(strcmp(argv[i], "-verbose-action") == 0 ||
5559 strcmp(argv[i], "-va") ==0) {
5560 if(++i == argc) {
5561 ERROR("%s: %s missing action\n",
5562 argv[0], argv[i - 1]);
5563 exit(1);
5564 }
5565 res = parse_action(argv[i], ACTION_LOG_VERBOSE);
5566 if(res == 0)
5567 exit(1);
5568
5569 } else if(strcmp(argv[i], "-true-action") == 0 ||
5570 strcmp(argv[i], "-ta") ==0) {
5571 if(++i == argc) {
5572 ERROR("%s: %s missing action\n",
5573 argv[0], argv[i - 1]);
5574 exit(1);
5575 }
5576 res = parse_action(argv[i], ACTION_LOG_TRUE);
5577 if(res == 0)
5578 exit(1);
5579
5580 } else if(strcmp(argv[i], "-false-action") == 0 ||
5581 strcmp(argv[i], "-fa") ==0) {
5582 if(++i == argc) {
5583 ERROR("%s: %s missing action\n",
5584 argv[0], argv[i - 1]);
5585 exit(1);
5586 }
5587 res = parse_action(argv[i], ACTION_LOG_FALSE);
5588 if(res == 0)
5589 exit(1);
5590
5591 } else if(strcmp(argv[i], "-action-file") == 0 ||
5592 strcmp(argv[i], "-af") ==0) {
5593 if(++i == argc) {
5594 ERROR("%s: %s missing filename\n", argv[0],
5595 argv[i - 1]);
5596 exit(1);
5597 }
5598 if(read_action_file(argv[i], ACTION_LOG_NONE) == FALSE)
5599 exit(1);
5600
5601 } else if(strcmp(argv[i], "-verbose-action-file") == 0 ||
5602 strcmp(argv[i], "-vaf") ==0) {
5603 if(++i == argc) {
5604 ERROR("%s: %s missing filename\n", argv[0],
5605 argv[i - 1]);
5606 exit(1);
5607 }
5608 if(read_action_file(argv[i], ACTION_LOG_VERBOSE) == FALSE)
5609 exit(1);
5610
5611 } else if(strcmp(argv[i], "-true-action-file") == 0 ||
5612 strcmp(argv[i], "-taf") ==0) {
5613 if(++i == argc) {
5614 ERROR("%s: %s missing filename\n", argv[0],
5615 argv[i - 1]);
5616 exit(1);
5617 }
5618 if(read_action_file(argv[i], ACTION_LOG_TRUE) == FALSE)
5619 exit(1);
5620
5621 } else if(strcmp(argv[i], "-false-action-file") == 0 ||
5622 strcmp(argv[i], "-faf") ==0) {
5623 if(++i == argc) {
5624 ERROR("%s: %s missing filename\n", argv[0],
5625 argv[i - 1]);
5626 exit(1);
5627 }
5628 if(read_action_file(argv[i], ACTION_LOG_FALSE) == FALSE)
5629 exit(1);
5630
5631 } else if(strcmp(argv[i], "-comp") == 0)
5632 /* parsed previously */
5633 i++;
5634
5635 else if(strncmp(argv[i], "-X", 2) == 0) {
5636 int args;
5637
5638 if(strcmp(argv[i] + 2, "help") == 0)
5639 goto print_compressor_options;
5640
5641 args = compressor_options(comp, argv + i, argc - i);
5642 if(args < 0) {
5643 if(args == -1) {
5644 ERROR("%s: Unrecognised compressor"
5645 " option %s\n", argv[0],
5646 argv[i]);
5647 if(!compressor_opt_parsed)
5648 ERROR("%s: Did you forget to"
5649 " specify -comp?\n",
5650 argv[0]);
5651 print_compressor_options:
5652 ERROR("%s: selected compressor \"%s\""
5653 ". Options supported: %s\n",
5654 argv[0], comp->name,
5655 comp->usage ? "" : "none");
5656 if(comp->usage)
5657 comp->usage();
5658 }
5659 exit(1);
5660 }
5661 i += args;
5662
5663 } else if(strcmp(argv[i], "-pf") == 0) {
5664 if(++i == argc) {
5665 ERROR("%s: -pf missing filename\n", argv[0]);
5666 exit(1);
5667 }
5668 if(read_pseudo_file(argv[i]) == FALSE)
5669 exit(1);
5670 } else if(strcmp(argv[i], "-p") == 0) {
5671 if(++i == argc) {
5672 ERROR("%s: -p missing pseudo file definition\n",
5673 argv[0]);
5674 exit(1);
5675 }
5676 if(read_pseudo_def(argv[i]) == FALSE)
5677 exit(1);
5678 } else if(strcmp(argv[i], "-recover") == 0) {
5679 if(++i == argc) {
5680 ERROR("%s: -recover missing recovery file\n",
5681 argv[0]);
5682 exit(1);
5683 }
5684 read_recovery_data(argv[i], argv[source + 1]);
5685 } else if(strcmp(argv[i], "-no-recovery") == 0)
5686 recover = FALSE;
5687 else if(strcmp(argv[i], "-wildcards") == 0) {
5688 old_exclude = FALSE;
5689 use_regex = FALSE;
5690 } else if(strcmp(argv[i], "-regex") == 0) {
5691 old_exclude = FALSE;
5692 use_regex = TRUE;
5693 } else if(strcmp(argv[i], "-no-sparse") == 0)
5694 sparse_files = FALSE;
5695 else if(strcmp(argv[i], "-no-progress") == 0)
5696 progress = FALSE;
5697 else if(strcmp(argv[i], "-progress") == 0)
5698 force_progress = TRUE;
5699 else if(strcmp(argv[i], "-no-exports") == 0)
5700 exportable = FALSE;
5701 else if(strcmp(argv[i], "-processors") == 0) {
5702 if((++i == argc) || !parse_num(argv[i], &processors)) {
5703 ERROR("%s: -processors missing or invalid "
5704 "processor number\n", argv[0]);
5705 exit(1);
5706 }
5707 if(processors < 1) {
5708 ERROR("%s: -processors should be 1 or larger\n",
5709 argv[0]);
5710 exit(1);
5711 }
5712 } else if(strcmp(argv[i], "-read-queue") == 0) {
5713 if((++i == argc) || !parse_num(argv[i], &readq)) {
5714 ERROR("%s: -read-queue missing or invalid "
5715 "queue size\n", argv[0]);
5716 exit(1);
5717 }
5718 if(readq < 1) {
5719 ERROR("%s: -read-queue should be 1 megabyte or "
5720 "larger\n", argv[0]);
5721 exit(1);
5722 }
5723 } else if(strcmp(argv[i], "-write-queue") == 0) {
5724 if((++i == argc) || !parse_num(argv[i], &bwriteq)) {
5725 ERROR("%s: -write-queue missing or invalid "
5726 "queue size\n", argv[0]);
5727 exit(1);
5728 }
5729 if(bwriteq < 2) {
5730 ERROR("%s: -write-queue should be 2 megabytes "
5731 "or larger\n", argv[0]);
5732 exit(1);
5733 }
5734 fwriteq = bwriteq >> 1;
5735 bwriteq -= fwriteq;
5736 } else if(strcmp(argv[i], "-fragment-queue") == 0) {
5737 if((++i == argc) || !parse_num(argv[i], &fragq)) {
5738 ERROR("%s: -fragment-queue missing or invalid "
5739 "queue size\n", argv[0]);
5740 exit(1);
5741 }
5742 if(fragq < 1) {
5743 ERROR("%s: -fragment-queue should be 1 "
5744 "megabyte or larger\n", argv[0]);
5745 exit(1);
5746 }
5747 } else if(strcmp(argv[i], "-mem") == 0) {
5748 long long number;
5749
5750 if((++i == argc) ||
5751 !parse_numberll(argv[i], &number, 1)) {
5752 ERROR("%s: -mem missing or invalid mem size\n",
5753 argv[0]);
5754 exit(1);
5755 }
5756
5757 /*
5758 * convert from bytes to Mbytes, ensuring the value
5759 * does not overflow a signed int
5760 */
5761 if(number >= (1LL << 51)) {
5762 ERROR("%s: -mem invalid mem size\n", argv[0]);
5763 exit(1);
5764 }
5765
5766 total_mem = number / 1048576;
5767 if(total_mem < (SQUASHFS_LOWMEM / SQUASHFS_TAKE)) {
5768 ERROR("%s: -mem should be %d Mbytes or "
5769 "larger\n", argv[0],
5770 SQUASHFS_LOWMEM / SQUASHFS_TAKE);
5771 exit(1);
5772 }
5773 calculate_queue_sizes(total_mem, &readq, &fragq,
5774 &bwriteq, &fwriteq);
5775 } else if(strcmp(argv[i], "-b") == 0) {
5776 if(++i == argc) {
5777 ERROR("%s: -b missing block size\n", argv[0]);
5778 exit(1);
5779 }
5780 if(!parse_number(argv[i], &block_size, 1)) {
5781 ERROR("%s: -b invalid block size\n", argv[0]);
5782 exit(1);
5783 }
5784 if((block_log = slog(block_size)) == 0) {
5785 ERROR("%s: -b block size not power of two or "
5786 "not between 4096 and 1Mbyte\n",
5787 argv[0]);
5788 exit(1);
5789 }
5790 } else if(strcmp(argv[i], "-ef") == 0) {
5791 if(++i == argc) {
5792 ERROR("%s: -ef missing filename\n", argv[0]);
5793 exit(1);
5794 }
5795 } else if(strcmp(argv[i], "-no-duplicates") == 0)
5796 duplicate_checking = FALSE;
5797
5798 else if(strcmp(argv[i], "-no-fragments") == 0)
5799 no_fragments = TRUE;
5800
5801 else if(strcmp(argv[i], "-always-use-fragments") == 0)
5802 always_use_fragments = TRUE;
5803
5804 else if(strcmp(argv[i], "-sort") == 0) {
5805 if(++i == argc) {
5806 ERROR("%s: -sort missing filename\n", argv[0]);
5807 exit(1);
5808 }
5809 } else if(strcmp(argv[i], "-all-root") == 0 ||
5810 strcmp(argv[i], "-root-owned") == 0)
5811 global_uid = global_gid = 0;
5812
5813 else if(strcmp(argv[i], "-force-uid") == 0) {
5814 if(++i == argc) {
5815 ERROR("%s: -force-uid missing uid or user\n",
5816 argv[0]);
5817 exit(1);
5818 }
5819 if((global_uid = strtoll(argv[i], &b, 10)), *b =='\0') {
5820 if(global_uid < 0 || global_uid >
5821 (((long long) 1 << 32) - 1)) {
5822 ERROR("%s: -force-uid uid out of range"
5823 "\n", argv[0]);
5824 exit(1);
5825 }
5826 } else {
5827 struct passwd *uid = getpwnam(argv[i]);
5828 if(uid)
5829 global_uid = uid->pw_uid;
5830 else {
5831 ERROR("%s: -force-uid invalid uid or "
5832 "unknown user\n", argv[0]);
5833 exit(1);
5834 }
5835 }
5836 } else if(strcmp(argv[i], "-force-gid") == 0) {
5837 if(++i == argc) {
5838 ERROR("%s: -force-gid missing gid or group\n",
5839 argv[0]);
5840 exit(1);
5841 }
5842 if((global_gid = strtoll(argv[i], &b, 10)), *b =='\0') {
5843 if(global_gid < 0 || global_gid >
5844 (((long long) 1 << 32) - 1)) {
5845 ERROR("%s: -force-gid gid out of range"
5846 "\n", argv[0]);
5847 exit(1);
5848 }
5849 } else {
5850 struct group *gid = getgrnam(argv[i]);
5851 if(gid)
5852 global_gid = gid->gr_gid;
5853 else {
5854 ERROR("%s: -force-gid invalid gid or "
5855 "unknown group\n", argv[0]);
5856 exit(1);
5857 }
5858 }
5859 } else if(strcmp(argv[i], "-noI") == 0 ||
5860 strcmp(argv[i], "-noInodeCompression") == 0)
5861 noI = TRUE;
5862
5863 else if(strcmp(argv[i], "-noD") == 0 ||
5864 strcmp(argv[i], "-noDataCompression") == 0)
5865 noD = TRUE;
5866
5867 else if(strcmp(argv[i], "-noF") == 0 ||
5868 strcmp(argv[i], "-noFragmentCompression") == 0)
5869 noF = TRUE;
5870
5871 else if(strcmp(argv[i], "-noX") == 0 ||
5872 strcmp(argv[i], "-noXattrCompression") == 0)
5873 noX = TRUE;
5874
5875 else if(strcmp(argv[i], "-no-xattrs") == 0)
5876 no_xattrs = TRUE;
5877
5878 else if(strcmp(argv[i], "-xattrs") == 0)
5879 no_xattrs = FALSE;
5880
5881 /* ANDROID CHANGES START*/
5882 #ifdef ANDROID
5883 else if(strcmp(argv[i], "-context-file") == 0) {
5884 if(++i == argc) {
5885 ERROR("%s: -context-file: missing file name\n",
5886 argv[0]);
5887 exit(1);
5888 }
5889 context_file = argv[i];
5890 }
5891 else if(strcmp(argv[i], "-fs-config-file") == 0) {
5892 if(++i == argc) {
5893 ERROR("%s: -fs-config-file: missing file name\n",
5894 argv[0]);
5895 exit(1);
5896 }
5897 fs_config_file = argv[i];
5898 } else if(strcmp(argv[i], "-whitelist") == 0) {
5899 if(++i == argc) {
5900 ERROR("%s: -whitelist missing filename\n", argv[0]);
5901 exit(1);
5902 }
5903 whitelist_filename = argv[i];
5904 }
5905 else if(strcmp(argv[i], "-t") == 0) {
5906 if(++i == argc) {
5907 ERROR("%s: -t missing compression threshold percentage\n", argv[0]);
5908 exit(1);
5909 }
5910 if(!parse_number(argv[i], &compress_thresh_per, 1)) {
5911 ERROR("%s: -t invalid compression threshold percentage\n", argv[0]);
5912 exit(1);
5913 }
5914 if(compress_thresh_per > 100 || compress_thresh_per < 0) {
5915 ERROR("%s: -t compression threshold percentage not between 0 and 100\n",
5916 argv[0]);
5917 exit(1);
5918 }
5919 }
5920 #endif
5921 /* ANDROID CHANGES END */
5922 else if(strcmp(argv[i], "-nopad") == 0)
5923 nopad = TRUE;
5924
5925 else if(strcmp(argv[i], "-info") == 0)
5926 silent = FALSE;
5927
5928 else if(strcmp(argv[i], "-e") == 0)
5929 break;
5930
5931 else if(strcmp(argv[i], "-noappend") == 0)
5932 delete = TRUE;
5933
5934 else if(strcmp(argv[i], "-keep-as-directory") == 0)
5935 keep_as_directory = TRUE;
5936 /* ANDROID CHANGES START*/
5937 #ifdef ANDROID
5938 else if(strcmp(argv[i], "-android-fs-config") == 0)
5939 android_config = TRUE;
5940 else if(strcmp(argv[i], "-mount-point") == 0) {
5941 if(++i == argc) {
5942 ERROR("%s: -mount-point: missing mount point name\n",
5943 argv[0]);
5944 exit(1);
5945 }
5946 mount_point = argv[i];
5947 }
5948 else if(strcmp(argv[i], "-product-out") == 0) {
5949 if(++i == argc) {
5950 ERROR("%s: -product-out: missing path name\n",
5951 argv[0]);
5952 exit(1);
5953 }
5954 target_out_path = argv[i];
5955 }
5956 else if(strcmp(argv[i], "-disable-4k-align") == 0)
5957 align_4k_blocks = FALSE;
5958 else if(strcmp(argv[i], "-block-map") == 0) {
5959 if(++i == argc) {
5960 ERROR("%s: -block-map: missing path name\n",
5961 argv[0]);
5962 exit(1);
5963 }
5964 block_map_file = fopen(argv[i], "w");
5965 if (block_map_file == NULL) {
5966 ERROR("%s: -block-map: failed to open %s\n",
5967 argv[0], argv[i]);
5968 exit(1);
5969 }
5970 if (!align_4k_blocks) {
5971 ERROR("WARNING: Using block maps with unaligned 4k blocks "
5972 "is not ideal as block map offsets are multiples of 4k, "
5973 "consider not passing -disable-4k-align\n");
5974 }
5975 }
5976 #endif
5977 /* ANDROID CHANGES END */
5978
5979 else if(strcmp(argv[i], "-exit-on-error") == 0)
5980 exit_on_error = TRUE;
5981
5982 else if(strcmp(argv[i], "-root-becomes") == 0) {
5983 if(++i == argc) {
5984 ERROR("%s: -root-becomes: missing name\n",
5985 argv[0]);
5986 exit(1);
5987 }
5988 root_name = argv[i];
5989 } else if(strcmp(argv[i], "-version") == 0) {
5990 VERSION();
5991 } else {
5992 ERROR("%s: invalid option\n\n", argv[0]);
5993 printOptions:
5994 ERROR("SYNTAX:%s source1 source2 ... dest [options] "
5995 "[-e list of exclude\ndirs/files]\n", argv[0]);
5996 ERROR("\nFilesystem build options:\n");
5997 ERROR("-comp <comp>\t\tselect <comp> compression\n");
5998 ERROR("\t\t\tCompressors available:\n");
5999 display_compressors("\t\t\t", COMP_DEFAULT);
6000 ERROR("-b <block_size>\t\tset data block to "
6001 "<block_size>. Default 128 Kbytes\n");
6002 ERROR("\t\t\tOptionally a suffix of K or M can be"
6003 " given to specify\n\t\t\tKbytes or Mbytes"
6004 " respectively\n");
6005 ERROR("-no-exports\t\tdon't make the filesystem "
6006 "exportable via NFS\n");
6007 ERROR("-no-sparse\t\tdon't detect sparse files\n");
6008 ERROR("-no-xattrs\t\tdon't store extended attributes"
6009 NOXOPT_STR "\n");
6010 ERROR("-xattrs\t\t\tstore extended attributes" XOPT_STR
6011 "\n");
6012 /* ANDROID CHANGES START*/
6013 #ifdef ANDROID
6014 ERROR("-context-file <file>\tApply selinux security "
6015 "xattrs from context-file instead\n\t\t\t"
6016 "of reading xattrs from file system\n");
6017 ERROR("-fs-config-file <file>\tAndroid specific "
6018 "filesystem config file\n");
6019 ERROR("-t <compress_thresh>\tset minimum "
6020 "acceptable compression ratio of a block to\n\t\t\t"
6021 "<compress_thresh_per> otherwise don't compress. "
6022 "Default 0%\n");
6023 ERROR("-whitelist <file>\tAndroid specific whitelist "
6024 "one entry per line (no wildcards)\n");
6025 #endif
6026 /* ANDROID CHANGES END */
6027 ERROR("-noI\t\t\tdo not compress inode table\n");
6028 ERROR("-noD\t\t\tdo not compress data blocks\n");
6029 ERROR("-noF\t\t\tdo not compress fragment blocks\n");
6030 ERROR("-noX\t\t\tdo not compress extended "
6031 "attributes\n");
6032 ERROR("-no-fragments\t\tdo not use fragments\n");
6033 ERROR("-always-use-fragments\tuse fragment blocks for "
6034 "files larger than block size\n");
6035 ERROR("-no-duplicates\t\tdo not perform duplicate "
6036 "checking\n");
6037 ERROR("-all-root\t\tmake all files owned by root\n");
6038 ERROR("-force-uid uid\t\tset all file uids to uid\n");
6039 ERROR("-force-gid gid\t\tset all file gids to gid\n");
6040 ERROR("-nopad\t\t\tdo not pad filesystem to a multiple "
6041 "of 4K\n");
6042 ERROR("-keep-as-directory\tif one source directory is "
6043 "specified, create a root\n");
6044 ERROR("\t\t\tdirectory containing that directory, "
6045 "rather than the\n");
6046 ERROR("\t\t\tcontents of the directory\n");
6047 /* ANDROID CHANGES START*/
6048 #ifdef ANDROID
6049 ERROR("-android-fs-config\tuse android fs config "
6050 "for mode, uid, and gids of inodes\n");
6051 ERROR("-mount-point <name>\tNeed to be provided when "
6052 "android-fs-config or context-file\n\t\t\tare "
6053 "enabled and source directory is not mount point\n");
6054 ERROR("-product-out <path>\tPRODUCT_OUT directory to "
6055 "read device specific FS rules files from\n");
6056 ERROR("-disable-4k-align \tDon't 4k align data blocks. Default is false\n");
6057 ERROR("-block-map <path>\tGenerate a block map for non-fragment files\n");
6058 #endif
6059 /* ANDROID CHANGES END */
6060 ERROR("\nFilesystem filter options:\n");
6061 ERROR("-p <pseudo-definition>\tAdd pseudo file "
6062 "definition\n");
6063 ERROR("-pf <pseudo-file>\tAdd list of pseudo file "
6064 "definitions\n");
6065 ERROR("-sort <sort_file>\tsort files according to "
6066 "priorities in <sort_file>. One\n");
6067 ERROR("\t\t\tfile or dir with priority per line. "
6068 "Priority -32768 to\n");
6069 ERROR("\t\t\t32767, default priority 0\n");
6070 ERROR("-ef <exclude_file>\tlist of exclude dirs/files."
6071 " One per line\n");
6072 ERROR("-wildcards\t\tAllow extended shell wildcards "
6073 "(globbing) to be used in\n\t\t\texclude "
6074 "dirs/files\n");
6075 ERROR("-regex\t\t\tAllow POSIX regular expressions to "
6076 "be used in exclude\n\t\t\tdirs/files\n");
6077 ERROR("\nFilesystem append options:\n");
6078 ERROR("-noappend\t\tdo not append to existing "
6079 "filesystem\n");
6080 ERROR("-root-becomes <name>\twhen appending source "
6081 "files/directories, make the\n");
6082 ERROR("\t\t\toriginal root become a subdirectory in "
6083 "the new root\n");
6084 ERROR("\t\t\tcalled <name>, rather than adding the new "
6085 "source items\n");
6086 ERROR("\t\t\tto the original root\n");
6087 ERROR("\nMksquashfs runtime options:\n");
6088 ERROR("-version\t\tprint version, licence and "
6089 "copyright message\n");
6090 ERROR("-exit-on-error\t\ttreat normally ignored errors "
6091 "as fatal\n");
6092 ERROR("-recover <name>\t\trecover filesystem data "
6093 "using recovery file <name>\n");
6094 ERROR("-no-recovery\t\tdon't generate a recovery "
6095 "file\n");
6096 ERROR("-info\t\t\tprint files written to filesystem\n");
6097 ERROR("-no-progress\t\tdon't display the progress "
6098 "bar\n");
6099 ERROR("-progress\t\tdisplay progress bar when using "
6100 "the -info option\n");
6101 ERROR("-processors <number>\tUse <number> processors."
6102 " By default will use number of\n");
6103 ERROR("\t\t\tprocessors available\n");
6104 ERROR("-mem <size>\t\tUse <size> physical memory. "
6105 "Currently set to %dM\n", total_mem);
6106 ERROR("\t\t\tOptionally a suffix of K, M or G can be"
6107 " given to specify\n\t\t\tKbytes, Mbytes or"
6108 " Gbytes respectively\n");
6109 ERROR("\nMiscellaneous options:\n");
6110 ERROR("-root-owned\t\talternative name for -all-root"
6111 "\n");
6112 ERROR("-noInodeCompression\talternative name for -noI"
6113 "\n");
6114 ERROR("-noDataCompression\talternative name for -noD"
6115 "\n");
6116 ERROR("-noFragmentCompression\talternative name for "
6117 "-noF\n");
6118 ERROR("-noXattrCompression\talternative name for "
6119 "-noX\n");
6120 ERROR("\n-Xhelp\t\t\tprint compressor options for"
6121 " selected compressor\n");
6122 ERROR("\nCompressors available and compressor specific "
6123 "options:\n");
6124 display_compressor_usage(COMP_DEFAULT);
6125 exit(1);
6126 }
6127 }
6128
6129 /* ANDROID CHANGES START*/
6130 #ifdef ANDROID
6131 if (fs_config_file) {
6132 if (load_canned_fs_config(fs_config_file) < 0) {
6133 fprintf(stderr, "failed to load %s\n", fs_config_file);
6134 exit(1);
6135 }
6136 fs_config_func = canned_fs_config;
6137 } else if (mount_point) {
6138 fs_config_func = fs_config;
6139 }
6140 if (whitelist_filename)
6141 process_whitelist_file(whitelist_filename);
6142 #endif
6143 /* ANDROID CHANGES END */
6144
6145 /*
6146 * Some compressors may need the options to be checked for validity
6147 * once all the options have been processed
6148 */
6149 res = compressor_options_post(comp, block_size);
6150 if(res)
6151 EXIT_MKSQUASHFS();
6152
6153 /*
6154 * If the -info option has been selected then disable the
6155 * progress bar unless it has been explicitly enabled with
6156 * the -progress option
6157 */
6158 if(!silent)
6159 progress = force_progress;
6160
6161 #ifdef SQUASHFS_TRACE
6162 /*
6163 * Disable progress bar if full debug tracing is enabled.
6164 * The progress bar in this case just gets in the way of the
6165 * debug trace output
6166 */
6167 progress = FALSE;
6168 #endif
6169
6170 for(i = 0; i < source; i++)
6171 if(lstat(source_path[i], &source_buf) == -1) {
6172 fprintf(stderr, "Cannot stat source directory \"%s\" "
6173 "because %s\n", source_path[i],
6174 strerror(errno));
6175 EXIT_MKSQUASHFS();
6176 }
6177
6178 destination_file = argv[source + 1];
6179 if(stat(argv[source + 1], &buf) == -1) {
6180 if(errno == ENOENT) { /* Does not exist */
6181 fd = open(argv[source + 1], O_CREAT | O_TRUNC | O_RDWR,
6182 S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
6183 if(fd == -1) {
6184 perror("Could not create destination file");
6185 exit(1);
6186 }
6187 delete = TRUE;
6188 } else {
6189 perror("Could not stat destination file");
6190 exit(1);
6191 }
6192
6193 } else {
6194 if(S_ISBLK(buf.st_mode)) {
6195 if((fd = open(argv[source + 1], O_RDWR)) == -1) {
6196 perror("Could not open block device as "
6197 "destination");
6198 exit(1);
6199 }
6200 block_device = 1;
6201
6202 } else if(S_ISREG(buf.st_mode)) {
6203 fd = open(argv[source + 1], (delete ? O_TRUNC : 0) |
6204 O_RDWR);
6205 if(fd == -1) {
6206 perror("Could not open regular file for "
6207 "writing as destination");
6208 exit(1);
6209 }
6210 }
6211 else {
6212 ERROR("Destination not block device or regular file\n");
6213 exit(1);
6214 }
6215
6216 }
6217
6218 /*
6219 * process the exclude files - must be done afer destination file has
6220 * been possibly created
6221 */
6222 for(i = source + 2; i < argc; i++)
6223 if(strcmp(argv[i], "-ef") == 0)
6224 /*
6225 * Note presence of filename arg has already
6226 * been checked
6227 */
6228 process_exclude_file(argv[++i]);
6229 else if(strcmp(argv[i], "-e") == 0)
6230 break;
6231 else if(strcmp(argv[i], "-root-becomes") == 0 ||
6232 strcmp(argv[i], "-sort") == 0 ||
6233 strcmp(argv[i], "-pf") == 0 ||
6234 strcmp(argv[i], "-af") == 0 ||
6235 strcmp(argv[i], "-vaf") == 0 ||
6236 strcmp(argv[i], "-comp") == 0)
6237 i++;
6238
6239 if(i != argc) {
6240 if(++i == argc) {
6241 ERROR("%s: -e missing arguments\n", argv[0]);
6242 EXIT_MKSQUASHFS();
6243 }
6244 while(i < argc)
6245 if(old_exclude)
6246 old_add_exclude(argv[i++]);
6247 else
6248 add_exclude(argv[i++]);
6249 }
6250
6251 /* process the sort files - must be done afer the exclude files */
6252 for(i = source + 2; i < argc; i++)
6253 if(strcmp(argv[i], "-sort") == 0) {
6254 int res = read_sort_file(argv[++i], source,
6255 source_path);
6256 if(res == FALSE)
6257 BAD_ERROR("Failed to read sort file\n");
6258 sorted ++;
6259 } else if(strcmp(argv[i], "-e") == 0)
6260 break;
6261 else if(strcmp(argv[i], "-root-becomes") == 0 ||
6262 strcmp(argv[i], "-ef") == 0 ||
6263 strcmp(argv[i], "-pf") == 0 ||
6264 strcmp(argv[i], "-af") == 0 ||
6265 strcmp(argv[i], "-vaf") == 0 ||
6266 strcmp(argv[i], "-comp") == 0)
6267 i++;
6268
6269 if(!delete) {
6270 comp = read_super(fd, &sBlk, argv[source + 1]);
6271 if(comp == NULL) {
6272 ERROR("Failed to read existing filesystem - will not "
6273 "overwrite - ABORTING!\n");
6274 ERROR("To force Mksquashfs to write to this block "
6275 "device or file use -noappend\n");
6276 EXIT_MKSQUASHFS();
6277 }
6278
6279 block_log = slog(block_size = sBlk.block_size);
6280 noI = SQUASHFS_UNCOMPRESSED_INODES(sBlk.flags);
6281 noD = SQUASHFS_UNCOMPRESSED_DATA(sBlk.flags);
6282 noF = SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.flags);
6283 noX = SQUASHFS_UNCOMPRESSED_XATTRS(sBlk.flags);
6284 no_fragments = SQUASHFS_NO_FRAGMENTS(sBlk.flags);
6285 always_use_fragments = SQUASHFS_ALWAYS_FRAGMENTS(sBlk.flags);
6286 duplicate_checking = SQUASHFS_DUPLICATES(sBlk.flags);
6287 exportable = SQUASHFS_EXPORTABLE(sBlk.flags);
6288 no_xattrs = SQUASHFS_NO_XATTRS(sBlk.flags);
6289 comp_opts = SQUASHFS_COMP_OPTS(sBlk.flags);
6290 }
6291
6292 initialise_threads(readq, fragq, bwriteq, fwriteq, delete,
6293 destination_file);
6294
6295 res = compressor_init(comp, &stream, SQUASHFS_METADATA_SIZE, 0);
6296 if(res)
6297 BAD_ERROR("compressor_init failed\n");
6298
6299 if(delete) {
6300 int size;
6301 void *comp_data = compressor_dump_options(comp, block_size,
6302 &size);
6303
6304 printf("Creating %d.%d filesystem on %s, block size %d.\n",
6305 SQUASHFS_MAJOR, SQUASHFS_MINOR, argv[source + 1], block_size);
6306
6307 /*
6308 * store any compressor specific options after the superblock,
6309 * and set the COMP_OPT flag to show that the filesystem has
6310 * compressor specfic options
6311 */
6312 if(comp_data) {
6313 unsigned short c_byte = size | SQUASHFS_COMPRESSED_BIT;
6314
6315 SQUASHFS_INSWAP_SHORTS(&c_byte, 1);
6316 write_destination(fd, sizeof(struct squashfs_super_block),
6317 sizeof(c_byte), &c_byte);
6318 write_destination(fd, sizeof(struct squashfs_super_block) +
6319 sizeof(c_byte), size, comp_data);
6320 bytes = sizeof(struct squashfs_super_block) + sizeof(c_byte)
6321 + size;
6322 comp_opts = TRUE;
6323 } else
6324 bytes = sizeof(struct squashfs_super_block);
6325 } else {
6326 unsigned int last_directory_block, inode_dir_offset,
6327 inode_dir_file_size, root_inode_size,
6328 inode_dir_start_block, uncompressed_data,
6329 compressed_data, inode_dir_inode_number,
6330 inode_dir_parent_inode;
6331 unsigned int root_inode_start =
6332 SQUASHFS_INODE_BLK(sBlk.root_inode),
6333 root_inode_offset =
6334 SQUASHFS_INODE_OFFSET(sBlk.root_inode);
6335
6336 if((bytes = read_filesystem(root_name, fd, &sBlk, &inode_table,
6337 &data_cache, &directory_table,
6338 &directory_data_cache, &last_directory_block,
6339 &inode_dir_offset, &inode_dir_file_size,
6340 &root_inode_size, &inode_dir_start_block,
6341 &file_count, &sym_count, &dev_count, &dir_count,
6342 &fifo_count, &sock_count, &total_bytes,
6343 &total_inode_bytes, &total_directory_bytes,
6344 &inode_dir_inode_number,
6345 &inode_dir_parent_inode, add_old_root_entry,
6346 &fragment_table, &inode_lookup_table)) == 0) {
6347 ERROR("Failed to read existing filesystem - will not "
6348 "overwrite - ABORTING!\n");
6349 ERROR("To force Mksquashfs to write to this block "
6350 "device or file use -noappend\n");
6351 EXIT_MKSQUASHFS();
6352 }
6353 if((append_fragments = fragments = sBlk.fragments)) {
6354 fragment_table = realloc((char *) fragment_table,
6355 ((fragments + FRAG_SIZE - 1) & ~(FRAG_SIZE - 1))
6356 * sizeof(struct squashfs_fragment_entry));
6357 if(fragment_table == NULL)
6358 BAD_ERROR("Out of memory in save filesystem state\n");
6359 }
6360
6361 printf("Appending to existing %d.%d filesystem on %s, block "
6362 "size %d\n", SQUASHFS_MAJOR, SQUASHFS_MINOR, argv[source + 1],
6363 block_size);
6364 printf("All -b, -noI, -noD, -noF, -noX, no-duplicates, no-fragments, "
6365 "-always-use-fragments,\n-exportable and -comp options "
6366 "ignored\n");
6367 printf("\nIf appending is not wanted, please re-run with "
6368 "-noappend specified!\n\n");
6369
6370 compressed_data = (inode_dir_offset + inode_dir_file_size) &
6371 ~(SQUASHFS_METADATA_SIZE - 1);
6372 uncompressed_data = (inode_dir_offset + inode_dir_file_size) &
6373 (SQUASHFS_METADATA_SIZE - 1);
6374
6375 /* save original filesystem state for restoring ... */
6376 sfragments = fragments;
6377 sbytes = bytes;
6378 sinode_count = sBlk.inodes;
6379 scache_bytes = root_inode_offset + root_inode_size;
6380 sdirectory_cache_bytes = uncompressed_data;
6381 sdata_cache = malloc(scache_bytes);
6382 if(sdata_cache == NULL)
6383 BAD_ERROR("Out of memory in save filesystem state\n");
6384 sdirectory_data_cache = malloc(sdirectory_cache_bytes);
6385 if(sdirectory_data_cache == NULL)
6386 BAD_ERROR("Out of memory in save filesystem state\n");
6387 memcpy(sdata_cache, data_cache, scache_bytes);
6388 memcpy(sdirectory_data_cache, directory_data_cache +
6389 compressed_data, sdirectory_cache_bytes);
6390 sinode_bytes = root_inode_start;
6391 stotal_bytes = total_bytes;
6392 stotal_inode_bytes = total_inode_bytes;
6393 stotal_directory_bytes = total_directory_bytes +
6394 compressed_data;
6395 sfile_count = file_count;
6396 ssym_count = sym_count;
6397 sdev_count = dev_count;
6398 sdir_count = dir_count + 1;
6399 sfifo_count = fifo_count;
6400 ssock_count = sock_count;
6401 sdup_files = dup_files;
6402 sid_count = id_count;
6403 write_recovery_data(&sBlk);
6404 save_xattrs();
6405 appending = TRUE;
6406
6407 /*
6408 * set the filesystem state up to be able to append to the
6409 * original filesystem. The filesystem state differs depending
6410 * on whether we're appending to the original root directory, or
6411 * if the original root directory becomes a sub-directory
6412 * (root-becomes specified on command line, here root_name !=
6413 * NULL)
6414 */
6415 inode_bytes = inode_size = root_inode_start;
6416 directory_size = last_directory_block;
6417 cache_size = root_inode_offset + root_inode_size;
6418 directory_cache_size = inode_dir_offset + inode_dir_file_size;
6419 if(root_name) {
6420 sdirectory_bytes = last_directory_block;
6421 sdirectory_compressed_bytes = 0;
6422 root_inode_number = inode_dir_parent_inode;
6423 inode_no = sBlk.inodes + 2;
6424 directory_bytes = last_directory_block;
6425 directory_cache_bytes = uncompressed_data;
6426 memmove(directory_data_cache, directory_data_cache +
6427 compressed_data, uncompressed_data);
6428 cache_bytes = root_inode_offset + root_inode_size;
6429 add_old_root_entry(root_name, sBlk.root_inode,
6430 inode_dir_inode_number, SQUASHFS_DIR_TYPE);
6431 total_directory_bytes += compressed_data;
6432 dir_count ++;
6433 } else {
6434 sdirectory_compressed_bytes = last_directory_block -
6435 inode_dir_start_block;
6436 sdirectory_compressed =
6437 malloc(sdirectory_compressed_bytes);
6438 if(sdirectory_compressed == NULL)
6439 BAD_ERROR("Out of memory in save filesystem "
6440 "state\n");
6441 memcpy(sdirectory_compressed, directory_table +
6442 inode_dir_start_block,
6443 sdirectory_compressed_bytes);
6444 sdirectory_bytes = inode_dir_start_block;
6445 root_inode_number = inode_dir_inode_number;
6446 inode_no = sBlk.inodes + 1;
6447 directory_bytes = inode_dir_start_block;
6448 directory_cache_bytes = inode_dir_offset;
6449 cache_bytes = root_inode_offset;
6450 }
6451
6452 inode_count = file_count + dir_count + sym_count + dev_count +
6453 fifo_count + sock_count;
6454 }
6455
6456 if(path)
6457 paths = add_subdir(paths, path);
6458
6459 dump_actions();
6460 dump_pseudos();
6461
6462 if(delete && !keep_as_directory && source == 1 &&
6463 S_ISDIR(source_buf.st_mode))
6464 dir_scan(&inode, source_path[0], scan1_readdir, progress);
6465 else if(!keep_as_directory && source == 1 &&
6466 S_ISDIR(source_buf.st_mode))
6467 dir_scan(&inode, source_path[0], scan1_single_readdir, progress);
6468 else
6469 dir_scan(&inode, "", scan1_encomp_readdir, progress);
6470 sBlk.root_inode = inode;
6471 sBlk.inodes = inode_count;
6472 sBlk.s_magic = SQUASHFS_MAGIC;
6473 sBlk.s_major = SQUASHFS_MAJOR;
6474 sBlk.s_minor = SQUASHFS_MINOR;
6475 sBlk.block_size = block_size;
6476 sBlk.block_log = block_log;
6477 sBlk.flags = SQUASHFS_MKFLAGS(noI, noD, noF, noX, no_fragments,
6478 always_use_fragments, duplicate_checking, exportable,
6479 no_xattrs, comp_opts);
6480 sBlk.mkfs_time = time(NULL);
6481
6482 disable_info();
6483
6484 while((fragment = get_frag_action(fragment)))
6485 write_fragment(*fragment);
6486 unlock_fragments();
6487 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
6488 pthread_mutex_lock(&fragment_mutex);
6489 while(fragments_outstanding) {
6490 pthread_mutex_unlock(&fragment_mutex);
6491 sched_yield();
6492 pthread_mutex_lock(&fragment_mutex);
6493 }
6494 pthread_cleanup_pop(1);
6495
6496 queue_put(to_writer, NULL);
6497 if(queue_get(from_writer) != 0)
6498 EXIT_MKSQUASHFS();
6499
6500 set_progressbar_state(FALSE);
6501 write_filesystem_tables(&sBlk, nopad);
6502
6503 /* ANDROID CHANGES START*/
6504 #ifdef ANDROID
6505 if (block_map_file)
6506 fclose(block_map_file);
6507 #endif
6508 /* ANDROID CHANGES END */
6509
6510 return 0;
6511 }
6512