• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2015 Google Inc. All rights reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package zip
16
17import (
18	"bytes"
19	"compress/flate"
20	"crypto/sha256"
21	"encoding/binary"
22	"errors"
23	"fmt"
24	"hash"
25	"hash/crc32"
26	"io"
27	"io/ioutil"
28	"os"
29	"path/filepath"
30	"sort"
31	"strings"
32	"sync"
33	"syscall"
34	"time"
35
36	"android/soong/response"
37
38	"github.com/google/blueprint/pathtools"
39
40	"android/soong/jar"
41	"android/soong/third_party/zip"
42)
43
44// Sha256HeaderID is a custom Header ID for the `extra` field in
45// the file header to store the SHA checksum.
46const Sha256HeaderID = 0x4967
47
48// Sha256HeaderSignature is the signature to verify that the extra
49// data block is used to store the SHA checksum.
50const Sha256HeaderSignature = 0x9514
51
52// Block size used during parallel compression of a single file.
53const parallelBlockSize = 1 * 1024 * 1024 // 1MB
54
55// Minimum file size to use parallel compression. It requires more
56// flate.Writer allocations, since we can't change the dictionary
57// during Reset
58const minParallelFileSize = parallelBlockSize * 6
59
60// Size of the ZIP compression window (32KB)
61const windowSize = 32 * 1024
62
63type nopCloser struct {
64	io.Writer
65}
66
67func (nopCloser) Close() error {
68	return nil
69}
70
71type byteReaderCloser struct {
72	*bytes.Reader
73	io.Closer
74}
75
76type pathMapping struct {
77	dest, src string
78	zipMethod uint16
79}
80
81type FileArg struct {
82	PathPrefixInZip, SourcePrefixToStrip string
83	SourceFiles                          []string
84	JunkPaths                            bool
85	GlobDir                              string
86}
87
88type FileArgsBuilder struct {
89	state FileArg
90	err   error
91	fs    pathtools.FileSystem
92
93	fileArgs []FileArg
94}
95
96func NewFileArgsBuilder() *FileArgsBuilder {
97	return &FileArgsBuilder{
98		fs: pathtools.OsFs,
99	}
100}
101
102func (b *FileArgsBuilder) JunkPaths(v bool) *FileArgsBuilder {
103	b.state.JunkPaths = v
104	b.state.SourcePrefixToStrip = ""
105	return b
106}
107
108func (b *FileArgsBuilder) SourcePrefixToStrip(prefixToStrip string) *FileArgsBuilder {
109	b.state.JunkPaths = false
110	b.state.SourcePrefixToStrip = prefixToStrip
111	return b
112}
113
114func (b *FileArgsBuilder) PathPrefixInZip(rootPrefix string) *FileArgsBuilder {
115	b.state.PathPrefixInZip = rootPrefix
116	return b
117}
118
119func (b *FileArgsBuilder) File(name string) *FileArgsBuilder {
120	if b.err != nil {
121		return b
122	}
123
124	arg := b.state
125	arg.SourceFiles = []string{name}
126	b.fileArgs = append(b.fileArgs, arg)
127	return b
128}
129
130func (b *FileArgsBuilder) Dir(name string) *FileArgsBuilder {
131	if b.err != nil {
132		return b
133	}
134
135	arg := b.state
136	arg.GlobDir = name
137	b.fileArgs = append(b.fileArgs, arg)
138	return b
139}
140
141// List reads the file names from the given file and adds them to the source files list.
142func (b *FileArgsBuilder) List(name string) *FileArgsBuilder {
143	if b.err != nil {
144		return b
145	}
146
147	f, err := b.fs.Open(name)
148	if err != nil {
149		b.err = err
150		return b
151	}
152	defer f.Close()
153
154	list, err := ioutil.ReadAll(f)
155	if err != nil {
156		b.err = err
157		return b
158	}
159
160	arg := b.state
161	arg.SourceFiles = strings.Fields(string(list))
162	b.fileArgs = append(b.fileArgs, arg)
163	return b
164}
165
166// RspFile reads the file names from given .rsp file and adds them to the source files list.
167func (b *FileArgsBuilder) RspFile(name string) *FileArgsBuilder {
168	if b.err != nil {
169		return b
170	}
171
172	f, err := b.fs.Open(name)
173	if err != nil {
174		b.err = err
175		return b
176	}
177	defer f.Close()
178
179	arg := b.state
180	arg.SourceFiles, err = response.ReadRspFile(f)
181	if err != nil {
182		b.err = err
183		return b
184	}
185	for i := range arg.SourceFiles {
186		arg.SourceFiles[i] = pathtools.MatchEscape(arg.SourceFiles[i])
187	}
188	b.fileArgs = append(b.fileArgs, arg)
189	return b
190}
191
192func (b *FileArgsBuilder) Error() error {
193	if b == nil {
194		return nil
195	}
196	return b.err
197}
198
199func (b *FileArgsBuilder) FileArgs() []FileArg {
200	if b == nil {
201		return nil
202	}
203	return b.fileArgs
204}
205
206type IncorrectRelativeRootError struct {
207	RelativeRoot string
208	Path         string
209}
210
211func (x IncorrectRelativeRootError) Error() string {
212	return fmt.Sprintf("path %q is outside relative root %q", x.Path, x.RelativeRoot)
213}
214
215type ConflictingFileError struct {
216	Dest string
217	Prev string
218	Src  string
219}
220
221func (x ConflictingFileError) Error() string {
222	return fmt.Sprintf("destination %q has two files %q and %q", x.Dest, x.Prev, x.Src)
223}
224
225type ZipWriter struct {
226	time         time.Time
227	createdFiles map[string]string
228	createdDirs  map[string]string
229	directories  bool
230
231	errors   chan error
232	writeOps chan chan *zipEntry
233
234	cpuRateLimiter    *CPURateLimiter
235	memoryRateLimiter *MemoryRateLimiter
236
237	compressorPool sync.Pool
238	compLevel      int
239
240	followSymlinks     pathtools.ShouldFollowSymlinks
241	ignoreMissingFiles bool
242
243	stderr io.Writer
244	fs     pathtools.FileSystem
245
246	sha256Checksum bool
247}
248
249type zipEntry struct {
250	fh *zip.FileHeader
251
252	// List of delayed io.Reader
253	futureReaders chan chan io.Reader
254
255	// Only used for passing into the MemoryRateLimiter to ensure we
256	// release as much memory as much as we request
257	allocatedSize int64
258}
259
260type ZipArgs struct {
261	FileArgs                 []FileArg
262	OutputFilePath           string
263	EmulateJar               bool
264	SrcJar                   bool
265	AddDirectoryEntriesToZip bool
266	CompressionLevel         int
267	ManifestSourcePath       string
268	NumParallelJobs          int
269	NonDeflatedFiles         map[string]bool
270	WriteIfChanged           bool
271	StoreSymlinks            bool
272	IgnoreMissingFiles       bool
273	Sha256Checksum           bool
274
275	Stderr     io.Writer
276	Filesystem pathtools.FileSystem
277}
278
279func zipTo(args ZipArgs, w io.Writer) error {
280	if args.EmulateJar {
281		args.AddDirectoryEntriesToZip = true
282	}
283
284	// Have Glob follow symlinks if they are not being stored as symlinks in the zip file.
285	followSymlinks := pathtools.ShouldFollowSymlinks(!args.StoreSymlinks)
286
287	z := &ZipWriter{
288		time:               jar.DefaultTime,
289		createdDirs:        make(map[string]string),
290		createdFiles:       make(map[string]string),
291		directories:        args.AddDirectoryEntriesToZip,
292		compLevel:          args.CompressionLevel,
293		followSymlinks:     followSymlinks,
294		ignoreMissingFiles: args.IgnoreMissingFiles,
295		stderr:             args.Stderr,
296		fs:                 args.Filesystem,
297		sha256Checksum:     args.Sha256Checksum,
298	}
299
300	if z.fs == nil {
301		z.fs = pathtools.OsFs
302	}
303
304	if z.stderr == nil {
305		z.stderr = os.Stderr
306	}
307
308	pathMappings := []pathMapping{}
309
310	noCompression := args.CompressionLevel == 0
311
312	for _, fa := range args.FileArgs {
313		var srcs []string
314		for _, s := range fa.SourceFiles {
315			s = strings.TrimSpace(s)
316			if s == "" {
317				continue
318			}
319
320			result, err := z.fs.Glob(s, nil, followSymlinks)
321			if err != nil {
322				return err
323			}
324			if len(result.Matches) == 0 {
325				err := &os.PathError{
326					Op:   "lstat",
327					Path: s,
328					Err:  os.ErrNotExist,
329				}
330				if args.IgnoreMissingFiles {
331					fmt.Fprintln(z.stderr, "warning:", err)
332				} else {
333					return err
334				}
335			}
336			srcs = append(srcs, result.Matches...)
337		}
338		if fa.GlobDir != "" {
339			if exists, isDir, err := z.fs.Exists(fa.GlobDir); err != nil {
340				return err
341			} else if !exists && !args.IgnoreMissingFiles {
342				err := &os.PathError{
343					Op:   "lstat",
344					Path: fa.GlobDir,
345					Err:  os.ErrNotExist,
346				}
347				if args.IgnoreMissingFiles {
348					fmt.Fprintln(z.stderr, "warning:", err)
349				} else {
350					return err
351				}
352			} else if !isDir && !args.IgnoreMissingFiles {
353				err := &os.PathError{
354					Op:   "lstat",
355					Path: fa.GlobDir,
356					Err:  syscall.ENOTDIR,
357				}
358				if args.IgnoreMissingFiles {
359					fmt.Fprintln(z.stderr, "warning:", err)
360				} else {
361					return err
362				}
363			}
364			result, err := z.fs.Glob(filepath.Join(fa.GlobDir, "**/*"), nil, followSymlinks)
365			if err != nil {
366				return err
367			}
368			srcs = append(srcs, result.Matches...)
369		}
370		for _, src := range srcs {
371			err := fillPathPairs(fa, src, &pathMappings, args.NonDeflatedFiles, noCompression)
372			if err != nil {
373				return err
374			}
375		}
376	}
377
378	return z.write(w, pathMappings, args.ManifestSourcePath, args.EmulateJar, args.SrcJar, args.NumParallelJobs)
379}
380
381// Zip creates an output zip archive from given sources.
382func Zip(args ZipArgs) error {
383	if args.OutputFilePath == "" {
384		return fmt.Errorf("output file path must be nonempty")
385	}
386
387	buf := &bytes.Buffer{}
388	var out io.Writer = buf
389
390	var zipErr error
391
392	if !args.WriteIfChanged {
393		f, err := os.Create(args.OutputFilePath)
394		if err != nil {
395			return err
396		}
397
398		defer f.Close()
399		defer func() {
400			if zipErr != nil {
401				os.Remove(args.OutputFilePath)
402			}
403		}()
404
405		out = f
406	}
407
408	zipErr = zipTo(args, out)
409	if zipErr != nil {
410		return zipErr
411	}
412
413	if args.WriteIfChanged {
414		err := pathtools.WriteFileIfChanged(args.OutputFilePath, buf.Bytes(), 0666)
415		if err != nil {
416			return err
417		}
418	}
419
420	return nil
421}
422
423func fillPathPairs(fa FileArg, src string, pathMappings *[]pathMapping,
424	nonDeflatedFiles map[string]bool, noCompression bool) error {
425
426	var dest string
427
428	if fa.JunkPaths {
429		dest = filepath.Base(src)
430	} else {
431		var err error
432		dest, err = filepath.Rel(fa.SourcePrefixToStrip, src)
433		if err != nil {
434			return err
435		}
436		if strings.HasPrefix(dest, "../") {
437			return IncorrectRelativeRootError{
438				Path:         src,
439				RelativeRoot: fa.SourcePrefixToStrip,
440			}
441		}
442	}
443	dest = filepath.Join(fa.PathPrefixInZip, dest)
444
445	zipMethod := zip.Deflate
446	if _, found := nonDeflatedFiles[dest]; found || noCompression {
447		zipMethod = zip.Store
448	}
449	*pathMappings = append(*pathMappings,
450		pathMapping{dest: dest, src: src, zipMethod: zipMethod})
451
452	return nil
453}
454
455func jarSort(mappings []pathMapping) {
456	sort.SliceStable(mappings, func(i int, j int) bool {
457		return jar.EntryNamesLess(mappings[i].dest, mappings[j].dest)
458	})
459}
460
461func (z *ZipWriter) write(f io.Writer, pathMappings []pathMapping, manifest string, emulateJar, srcJar bool,
462	parallelJobs int) error {
463
464	z.errors = make(chan error)
465	defer close(z.errors)
466
467	// This channel size can be essentially unlimited -- it's used as a fifo
468	// queue decouple the CPU and IO loads. Directories don't require any
469	// compression time, but still cost some IO. Similar with small files that
470	// can be very fast to compress. Some files that are more difficult to
471	// compress won't take a corresponding longer time writing out.
472	//
473	// The optimum size here depends on your CPU and IO characteristics, and
474	// the the layout of your zip file. 1000 was chosen mostly at random as
475	// something that worked reasonably well for a test file.
476	//
477	// The RateLimit object will put the upper bounds on the number of
478	// parallel compressions and outstanding buffers.
479	z.writeOps = make(chan chan *zipEntry, 1000)
480	z.cpuRateLimiter = NewCPURateLimiter(int64(parallelJobs))
481	z.memoryRateLimiter = NewMemoryRateLimiter(0)
482	defer func() {
483		z.cpuRateLimiter.Stop()
484		z.memoryRateLimiter.Stop()
485	}()
486
487	if manifest != "" && !emulateJar {
488		return errors.New("must specify --jar when specifying a manifest via -m")
489	}
490
491	if emulateJar {
492		// manifest may be empty, in which case addManifest will fill in a default
493		pathMappings = append(pathMappings, pathMapping{jar.ManifestFile, manifest, zip.Deflate})
494
495		jarSort(pathMappings)
496	}
497
498	go func() {
499		var err error
500		defer close(z.writeOps)
501
502		for _, ele := range pathMappings {
503			if emulateJar && ele.dest == jar.ManifestFile {
504				err = z.addManifest(ele.dest, ele.src, ele.zipMethod)
505			} else {
506				err = z.addFile(ele.dest, ele.src, ele.zipMethod, emulateJar, srcJar)
507			}
508			if err != nil {
509				z.errors <- err
510				return
511			}
512		}
513	}()
514
515	zipw := zip.NewWriter(f)
516
517	var currentWriteOpChan chan *zipEntry
518	var currentWriter io.WriteCloser
519	var currentReaders chan chan io.Reader
520	var currentReader chan io.Reader
521	var done bool
522
523	for !done {
524		var writeOpsChan chan chan *zipEntry
525		var writeOpChan chan *zipEntry
526		var readersChan chan chan io.Reader
527
528		if currentReader != nil {
529			// Only read and process errors
530		} else if currentReaders != nil {
531			readersChan = currentReaders
532		} else if currentWriteOpChan != nil {
533			writeOpChan = currentWriteOpChan
534		} else {
535			writeOpsChan = z.writeOps
536		}
537
538		select {
539		case writeOp, ok := <-writeOpsChan:
540			if !ok {
541				done = true
542			}
543
544			currentWriteOpChan = writeOp
545
546		case op := <-writeOpChan:
547			currentWriteOpChan = nil
548
549			var err error
550			if op.fh.Method == zip.Deflate {
551				currentWriter, err = zipw.CreateCompressedHeader(op.fh)
552			} else {
553				var zw io.Writer
554
555				op.fh.CompressedSize64 = op.fh.UncompressedSize64
556
557				zw, err = zipw.CreateHeaderAndroid(op.fh)
558				currentWriter = nopCloser{zw}
559			}
560			if err != nil {
561				return err
562			}
563
564			currentReaders = op.futureReaders
565			if op.futureReaders == nil {
566				currentWriter.Close()
567				currentWriter = nil
568			}
569			z.memoryRateLimiter.Finish(op.allocatedSize)
570
571		case futureReader, ok := <-readersChan:
572			if !ok {
573				// Done with reading
574				currentWriter.Close()
575				currentWriter = nil
576				currentReaders = nil
577			}
578
579			currentReader = futureReader
580
581		case reader := <-currentReader:
582			_, err := io.Copy(currentWriter, reader)
583			if err != nil {
584				return err
585			}
586
587			currentReader = nil
588
589		case err := <-z.errors:
590			return err
591		}
592	}
593
594	// One last chance to catch an error
595	select {
596	case err := <-z.errors:
597		return err
598	default:
599		zipw.Close()
600		return nil
601	}
602}
603
604// imports (possibly with compression) <src> into the zip at sub-path <dest>
605func (z *ZipWriter) addFile(dest, src string, method uint16, emulateJar, srcJar bool) error {
606	var fileSize int64
607	var executable bool
608
609	var s os.FileInfo
610	var err error
611	if z.followSymlinks {
612		s, err = z.fs.Stat(src)
613	} else {
614		s, err = z.fs.Lstat(src)
615	}
616
617	if err != nil {
618		if os.IsNotExist(err) && z.ignoreMissingFiles {
619			fmt.Fprintln(z.stderr, "warning:", err)
620			return nil
621		}
622		return err
623	}
624
625	createParentDirs := func(dest, src string) error {
626		if err := z.writeDirectory(filepath.Dir(dest), src, emulateJar); err != nil {
627			return err
628		}
629
630		if prev, exists := z.createdDirs[dest]; exists {
631			return fmt.Errorf("destination %q is both a directory %q and a file %q", dest, prev, src)
632		}
633
634		return nil
635	}
636
637	checkDuplicateFiles := func(dest, src string) (bool, error) {
638		if prev, exists := z.createdFiles[dest]; exists {
639			if prev != src {
640				return true, ConflictingFileError{
641					Dest: dest,
642					Prev: prev,
643					Src:  src,
644				}
645			}
646			return true, nil
647		}
648
649		z.createdFiles[dest] = src
650		return false, nil
651	}
652
653	if s.IsDir() {
654		if z.directories {
655			return z.writeDirectory(dest, src, emulateJar)
656		}
657		return nil
658	} else if s.Mode()&os.ModeSymlink != 0 {
659		err = createParentDirs(dest, src)
660		if err != nil {
661			return err
662		}
663
664		duplicate, err := checkDuplicateFiles(dest, src)
665		if err != nil {
666			return err
667		}
668		if duplicate {
669			return nil
670		}
671
672		return z.writeSymlink(dest, src)
673	} else if s.Mode().IsRegular() {
674		r, err := z.fs.Open(src)
675		if err != nil {
676			return err
677		}
678
679		if srcJar && filepath.Ext(src) == ".java" {
680			// rewrite the destination using the package path if it can be determined
681			pkg, err := jar.JavaPackage(r, src)
682			if err != nil {
683				// ignore errors for now, leaving the file at in its original location in the zip
684			} else {
685				dest = filepath.Join(filepath.Join(strings.Split(pkg, ".")...), filepath.Base(src))
686			}
687
688			_, err = r.Seek(0, io.SeekStart)
689			if err != nil {
690				return err
691			}
692		}
693
694		fileSize = s.Size()
695		executable = s.Mode()&0100 != 0
696
697		header := &zip.FileHeader{
698			Name:               dest,
699			Method:             method,
700			UncompressedSize64: uint64(fileSize),
701		}
702
703		mode := os.FileMode(0644)
704		if executable {
705			mode = 0755
706		}
707		header.SetMode(mode)
708
709		err = createParentDirs(dest, src)
710		if err != nil {
711			return err
712		}
713
714		duplicate, err := checkDuplicateFiles(dest, src)
715		if err != nil {
716			return err
717		}
718		if duplicate {
719			return nil
720		}
721
722		return z.writeFileContents(header, r)
723	} else {
724		return fmt.Errorf("%s is not a file, directory, or symlink", src)
725	}
726}
727
728func (z *ZipWriter) addManifest(dest string, src string, _ uint16) error {
729	if prev, exists := z.createdDirs[dest]; exists {
730		return fmt.Errorf("destination %q is both a directory %q and a file %q", dest, prev, src)
731	}
732	if prev, exists := z.createdFiles[dest]; exists {
733		if prev != src {
734			return ConflictingFileError{
735				Dest: dest,
736				Prev: prev,
737				Src:  src,
738			}
739		}
740		return nil
741	}
742
743	if err := z.writeDirectory(filepath.Dir(dest), src, true); err != nil {
744		return err
745	}
746
747	var contents []byte
748	if src != "" {
749		f, err := z.fs.Open(src)
750		if err != nil {
751			return err
752		}
753
754		contents, err = ioutil.ReadAll(f)
755		f.Close()
756		if err != nil {
757			return err
758		}
759	}
760
761	fh, buf, err := jar.ManifestFileContents(contents)
762	if err != nil {
763		return err
764	}
765
766	reader := &byteReaderCloser{bytes.NewReader(buf), ioutil.NopCloser(nil)}
767
768	return z.writeFileContents(fh, reader)
769}
770
771func (z *ZipWriter) writeFileContents(header *zip.FileHeader, r pathtools.ReaderAtSeekerCloser) (err error) {
772
773	header.SetModTime(z.time)
774
775	compressChan := make(chan *zipEntry, 1)
776	z.writeOps <- compressChan
777
778	// Pre-fill a zipEntry, it will be sent in the compressChan once
779	// we're sure about the Method and CRC.
780	ze := &zipEntry{
781		fh: header,
782	}
783
784	ze.allocatedSize = int64(header.UncompressedSize64)
785	z.cpuRateLimiter.Request()
786	z.memoryRateLimiter.Request(ze.allocatedSize)
787
788	fileSize := int64(header.UncompressedSize64)
789	if fileSize == 0 {
790		fileSize = int64(header.UncompressedSize)
791	}
792
793	if header.Method == zip.Deflate && fileSize >= minParallelFileSize {
794		wg := new(sync.WaitGroup)
795
796		// Allocate enough buffer to hold all readers. We'll limit
797		// this based on actual buffer sizes in RateLimit.
798		ze.futureReaders = make(chan chan io.Reader, (fileSize/parallelBlockSize)+1)
799
800		// Calculate the CRC and SHA256 in the background, since reading
801		// the entire file could take a while.
802		//
803		// We could split this up into chunks as well, but it's faster
804		// than the compression. Due to the Go Zip API, we also need to
805		// know the result before we can begin writing the compressed
806		// data out to the zipfile.
807		//
808		// We calculate SHA256 only if `-sha256` is set.
809		wg.Add(1)
810		go z.checksumFileAsync(r, ze, compressChan, wg)
811
812		for start := int64(0); start < fileSize; start += parallelBlockSize {
813			sr := io.NewSectionReader(r, start, parallelBlockSize)
814			resultChan := make(chan io.Reader, 1)
815			ze.futureReaders <- resultChan
816
817			z.cpuRateLimiter.Request()
818
819			last := !(start+parallelBlockSize < fileSize)
820			var dict []byte
821			if start >= windowSize {
822				dict, err = ioutil.ReadAll(io.NewSectionReader(r, start-windowSize, windowSize))
823				if err != nil {
824					return err
825				}
826			}
827
828			wg.Add(1)
829			go z.compressPartialFile(sr, dict, last, resultChan, wg)
830		}
831
832		close(ze.futureReaders)
833
834		// Close the file handle after all readers are done
835		go func(wg *sync.WaitGroup, closer io.Closer) {
836			wg.Wait()
837			closer.Close()
838		}(wg, r)
839	} else {
840		go func() {
841			z.compressWholeFile(ze, r, compressChan)
842			r.Close()
843		}()
844	}
845
846	return nil
847}
848
849func (z *ZipWriter) checksumFileAsync(r io.ReadSeeker, ze *zipEntry, resultChan chan *zipEntry, wg *sync.WaitGroup) {
850	defer wg.Done()
851	defer z.cpuRateLimiter.Finish()
852
853	z.checksumFile(r, ze)
854
855	resultChan <- ze
856	close(resultChan)
857}
858
859func (z *ZipWriter) checksumFile(r io.ReadSeeker, ze *zipEntry) {
860	crc := crc32.NewIEEE()
861	writers := []io.Writer{crc}
862
863	var shaHasher hash.Hash
864	if z.sha256Checksum && !ze.fh.Mode().IsDir() {
865		shaHasher = sha256.New()
866		writers = append(writers, shaHasher)
867	}
868
869	w := io.MultiWriter(writers...)
870
871	_, err := io.Copy(w, r)
872	if err != nil {
873		z.errors <- err
874		return
875	}
876
877	ze.fh.CRC32 = crc.Sum32()
878	if shaHasher != nil {
879		z.appendSHAToExtra(ze, shaHasher.Sum(nil))
880	}
881}
882
883func (z *ZipWriter) appendSHAToExtra(ze *zipEntry, checksum []byte) {
884	// The block of SHA256 checksum consist of:
885	// - Header ID, equals to Sha256HeaderID (2 bytes)
886	// - Data size (2 bytes)
887	// - Data block:
888	//   - Signature, equals to Sha256HeaderSignature (2 bytes)
889	//   - Data, SHA checksum value
890	var buf []byte
891	buf = binary.LittleEndian.AppendUint16(buf, Sha256HeaderID)
892	buf = binary.LittleEndian.AppendUint16(buf, uint16(len(checksum)+2))
893	buf = binary.LittleEndian.AppendUint16(buf, Sha256HeaderSignature)
894	buf = append(buf, checksum...)
895	ze.fh.Extra = append(ze.fh.Extra, buf...)
896}
897
898func (z *ZipWriter) compressPartialFile(r io.Reader, dict []byte, last bool, resultChan chan io.Reader, wg *sync.WaitGroup) {
899	defer wg.Done()
900
901	result, err := z.compressBlock(r, dict, last)
902	if err != nil {
903		z.errors <- err
904		return
905	}
906
907	z.cpuRateLimiter.Finish()
908
909	resultChan <- result
910}
911
912func (z *ZipWriter) compressBlock(r io.Reader, dict []byte, last bool) (*bytes.Buffer, error) {
913	buf := new(bytes.Buffer)
914	var fw *flate.Writer
915	var err error
916	if len(dict) > 0 {
917		// There's no way to Reset a Writer with a new dictionary, so
918		// don't use the Pool
919		fw, err = flate.NewWriterDict(buf, z.compLevel, dict)
920	} else {
921		var ok bool
922		if fw, ok = z.compressorPool.Get().(*flate.Writer); ok {
923			fw.Reset(buf)
924		} else {
925			fw, err = flate.NewWriter(buf, z.compLevel)
926		}
927		defer z.compressorPool.Put(fw)
928	}
929	if err != nil {
930		return nil, err
931	}
932
933	_, err = io.Copy(fw, r)
934	if err != nil {
935		return nil, err
936	}
937	if last {
938		fw.Close()
939	} else {
940		fw.Flush()
941	}
942
943	return buf, nil
944}
945
946func (z *ZipWriter) compressWholeFile(ze *zipEntry, r io.ReadSeeker, compressChan chan *zipEntry) {
947	z.checksumFile(r, ze)
948
949	_, err := r.Seek(0, 0)
950	if err != nil {
951		z.errors <- err
952		return
953	}
954
955	readFile := func(reader io.ReadSeeker) ([]byte, error) {
956		_, err := reader.Seek(0, 0)
957		if err != nil {
958			return nil, err
959		}
960
961		buf, err := ioutil.ReadAll(reader)
962		if err != nil {
963			return nil, err
964		}
965
966		return buf, nil
967	}
968
969	ze.futureReaders = make(chan chan io.Reader, 1)
970	futureReader := make(chan io.Reader, 1)
971	ze.futureReaders <- futureReader
972	close(ze.futureReaders)
973
974	if ze.fh.Method == zip.Deflate {
975		compressed, err := z.compressBlock(r, nil, true)
976		if err != nil {
977			z.errors <- err
978			return
979		}
980		if uint64(compressed.Len()) < ze.fh.UncompressedSize64 {
981			futureReader <- compressed
982		} else {
983			buf, err := readFile(r)
984			if err != nil {
985				z.errors <- err
986				return
987			}
988			ze.fh.Method = zip.Store
989			futureReader <- bytes.NewReader(buf)
990		}
991	} else {
992		buf, err := readFile(r)
993		if err != nil {
994			z.errors <- err
995			return
996		}
997		ze.fh.Method = zip.Store
998		futureReader <- bytes.NewReader(buf)
999	}
1000
1001	z.cpuRateLimiter.Finish()
1002
1003	close(futureReader)
1004
1005	compressChan <- ze
1006	close(compressChan)
1007}
1008
1009// writeDirectory annotates that dir is a directory created for the src file or directory, and adds
1010// the directory entry to the zip file if directories are enabled.
1011func (z *ZipWriter) writeDirectory(dir string, src string, emulateJar bool) error {
1012	// clean the input
1013	dir = filepath.Clean(dir)
1014
1015	// discover any uncreated directories in the path
1016	var zipDirs []string
1017	for dir != "" && dir != "." {
1018		if _, exists := z.createdDirs[dir]; exists {
1019			break
1020		}
1021
1022		if prev, exists := z.createdFiles[dir]; exists {
1023			return fmt.Errorf("destination %q is both a directory %q and a file %q", dir, src, prev)
1024		}
1025
1026		z.createdDirs[dir] = src
1027		// parent directories precede their children
1028		zipDirs = append([]string{dir}, zipDirs...)
1029
1030		dir = filepath.Dir(dir)
1031	}
1032
1033	if z.directories {
1034		// make a directory entry for each uncreated directory
1035		for _, cleanDir := range zipDirs {
1036			var dirHeader *zip.FileHeader
1037
1038			if emulateJar && cleanDir+"/" == jar.MetaDir {
1039				dirHeader = jar.MetaDirFileHeader()
1040			} else {
1041				dirHeader = &zip.FileHeader{
1042					Name: cleanDir + "/",
1043				}
1044				dirHeader.SetMode(0755 | os.ModeDir)
1045			}
1046
1047			dirHeader.SetModTime(z.time)
1048
1049			ze := make(chan *zipEntry, 1)
1050			ze <- &zipEntry{
1051				fh: dirHeader,
1052			}
1053			close(ze)
1054			z.writeOps <- ze
1055		}
1056	}
1057
1058	return nil
1059}
1060
1061func (z *ZipWriter) writeSymlink(rel, file string) error {
1062	fileHeader := &zip.FileHeader{
1063		Name: rel,
1064	}
1065	fileHeader.SetModTime(z.time)
1066	fileHeader.SetMode(0777 | os.ModeSymlink)
1067
1068	dest, err := z.fs.Readlink(file)
1069	if err != nil {
1070		return err
1071	}
1072
1073	fileHeader.UncompressedSize64 = uint64(len(dest))
1074	fileHeader.CRC32 = crc32.ChecksumIEEE([]byte(dest))
1075
1076	ze := make(chan *zipEntry, 1)
1077	futureReaders := make(chan chan io.Reader, 1)
1078	futureReader := make(chan io.Reader, 1)
1079	futureReaders <- futureReader
1080	close(futureReaders)
1081	futureReader <- bytes.NewBufferString(dest)
1082	close(futureReader)
1083
1084	ze <- &zipEntry{
1085		fh:            fileHeader,
1086		futureReaders: futureReaders,
1087	}
1088	close(ze)
1089	z.writeOps <- ze
1090
1091	return nil
1092}
1093