• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2015 Google Inc. All rights reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package zip
16
17import (
18	"bytes"
19	"compress/flate"
20	"crypto/sha256"
21	"encoding/binary"
22	"errors"
23	"fmt"
24	"hash"
25	"hash/crc32"
26	"io"
27	"io/ioutil"
28	"os"
29	"path/filepath"
30	"sort"
31	"strings"
32	"sync"
33	"syscall"
34	"time"
35
36	"android/soong/response"
37
38	"github.com/google/blueprint/pathtools"
39
40	"android/soong/jar"
41	"android/soong/third_party/zip"
42)
43
44// Sha256HeaderID is a custom Header ID for the `extra` field in
45// the file header to store the SHA checksum.
46const Sha256HeaderID = 0x4967
47
48// Sha256HeaderSignature is the signature to verify that the extra
49// data block is used to store the SHA checksum.
50const Sha256HeaderSignature = 0x9514
51
52// Block size used during parallel compression of a single file.
53const parallelBlockSize = 1 * 1024 * 1024 // 1MB
54
55// Minimum file size to use parallel compression. It requires more
56// flate.Writer allocations, since we can't change the dictionary
57// during Reset
58const minParallelFileSize = parallelBlockSize * 6
59
60// Size of the ZIP compression window (32KB)
61const windowSize = 32 * 1024
62
63type nopCloser struct {
64	io.Writer
65}
66
67func (nopCloser) Close() error {
68	return nil
69}
70
71type byteReaderCloser struct {
72	*bytes.Reader
73	io.Closer
74}
75
76type pathMapping struct {
77	dest, src string
78	zipMethod uint16
79}
80
81type FileArg struct {
82	PathPrefixInZip, SourcePrefixToStrip string
83	ExplicitPathInZip                    string
84	SourceFiles                          []string
85	JunkPaths                            bool
86	GlobDir                              string
87}
88
89type FileArgsBuilder struct {
90	state FileArg
91	err   error
92	fs    pathtools.FileSystem
93
94	fileArgs []FileArg
95}
96
97func NewFileArgsBuilder() *FileArgsBuilder {
98	return &FileArgsBuilder{
99		fs: pathtools.OsFs,
100	}
101}
102
103func (b *FileArgsBuilder) JunkPaths(v bool) *FileArgsBuilder {
104	b.state.JunkPaths = v
105	b.state.SourcePrefixToStrip = ""
106	return b
107}
108
109func (b *FileArgsBuilder) SourcePrefixToStrip(prefixToStrip string) *FileArgsBuilder {
110	b.state.JunkPaths = false
111	b.state.SourcePrefixToStrip = prefixToStrip
112	return b
113}
114
115func (b *FileArgsBuilder) PathPrefixInZip(rootPrefix string) *FileArgsBuilder {
116	b.state.PathPrefixInZip = rootPrefix
117	return b
118}
119
120func (b *FileArgsBuilder) File(name string) *FileArgsBuilder {
121	if b.err != nil {
122		return b
123	}
124
125	arg := b.state
126	arg.SourceFiles = []string{name}
127	b.fileArgs = append(b.fileArgs, arg)
128
129	if b.state.ExplicitPathInZip != "" {
130		b.state.ExplicitPathInZip = ""
131	}
132	return b
133}
134
135func (b *FileArgsBuilder) Dir(name string) *FileArgsBuilder {
136	if b.err != nil {
137		return b
138	}
139
140	arg := b.state
141	arg.GlobDir = name
142	b.fileArgs = append(b.fileArgs, arg)
143	return b
144}
145
146// List reads the file names from the given file and adds them to the source files list.
147func (b *FileArgsBuilder) List(name string) *FileArgsBuilder {
148	if b.err != nil {
149		return b
150	}
151
152	f, err := b.fs.Open(name)
153	if err != nil {
154		b.err = err
155		return b
156	}
157	defer f.Close()
158
159	list, err := ioutil.ReadAll(f)
160	if err != nil {
161		b.err = err
162		return b
163	}
164
165	arg := b.state
166	arg.SourceFiles = strings.Fields(string(list))
167	b.fileArgs = append(b.fileArgs, arg)
168	return b
169}
170
171// RspFile reads the file names from given .rsp file and adds them to the source files list.
172func (b *FileArgsBuilder) RspFile(name string) *FileArgsBuilder {
173	if b.err != nil {
174		return b
175	}
176
177	f, err := b.fs.Open(name)
178	if err != nil {
179		b.err = err
180		return b
181	}
182	defer f.Close()
183
184	arg := b.state
185	arg.SourceFiles, err = response.ReadRspFile(f)
186	if err != nil {
187		b.err = err
188		return b
189	}
190	for i := range arg.SourceFiles {
191		arg.SourceFiles[i] = pathtools.MatchEscape(arg.SourceFiles[i])
192	}
193	b.fileArgs = append(b.fileArgs, arg)
194	return b
195}
196
197// ExplicitPathInZip sets the path in the zip file for the next File call.
198func (b *FileArgsBuilder) ExplicitPathInZip(s string) *FileArgsBuilder {
199	b.state.ExplicitPathInZip = s
200	return b
201}
202
203func (b *FileArgsBuilder) Error() error {
204	if b == nil {
205		return nil
206	}
207	return b.err
208}
209
210func (b *FileArgsBuilder) FileArgs() []FileArg {
211	if b == nil {
212		return nil
213	}
214	return b.fileArgs
215}
216
217type IncorrectRelativeRootError struct {
218	RelativeRoot string
219	Path         string
220}
221
222func (x IncorrectRelativeRootError) Error() string {
223	return fmt.Sprintf("path %q is outside relative root %q", x.Path, x.RelativeRoot)
224}
225
226type ConflictingFileError struct {
227	Dest string
228	Prev string
229	Src  string
230}
231
232func (x ConflictingFileError) Error() string {
233	return fmt.Sprintf("destination %q has two files %q and %q", x.Dest, x.Prev, x.Src)
234}
235
236type ZipWriter struct {
237	time         time.Time
238	createdFiles map[string]string
239	createdDirs  map[string]string
240	directories  bool
241
242	errors   chan error
243	writeOps chan chan *zipEntry
244
245	cpuRateLimiter    *CPURateLimiter
246	memoryRateLimiter *MemoryRateLimiter
247
248	compressorPool sync.Pool
249	compLevel      int
250
251	followSymlinks     pathtools.ShouldFollowSymlinks
252	ignoreMissingFiles bool
253
254	stderr io.Writer
255	fs     pathtools.FileSystem
256
257	sha256Checksum bool
258}
259
260type zipEntry struct {
261	fh *zip.FileHeader
262
263	// List of delayed io.Reader
264	futureReaders chan chan io.Reader
265
266	// Only used for passing into the MemoryRateLimiter to ensure we
267	// release as much memory as much as we request
268	allocatedSize int64
269}
270
271type ZipArgs struct {
272	FileArgs                 []FileArg
273	OutputFilePath           string
274	EmulateJar               bool
275	SrcJar                   bool
276	AddDirectoryEntriesToZip bool
277	CompressionLevel         int
278	ManifestSourcePath       string
279	NumParallelJobs          int
280	NonDeflatedFiles         map[string]bool
281	WriteIfChanged           bool
282	StoreSymlinks            bool
283	IgnoreMissingFiles       bool
284	Sha256Checksum           bool
285	DoNotWrite               bool
286	Quiet                    bool
287
288	Stderr     io.Writer
289	Filesystem pathtools.FileSystem
290}
291
292func zipTo(args ZipArgs, w io.Writer) error {
293	if args.EmulateJar {
294		args.AddDirectoryEntriesToZip = true
295	}
296
297	// Have Glob follow symlinks if they are not being stored as symlinks in the zip file.
298	followSymlinks := pathtools.ShouldFollowSymlinks(!args.StoreSymlinks)
299
300	z := &ZipWriter{
301		time:               jar.DefaultTime,
302		createdDirs:        make(map[string]string),
303		createdFiles:       make(map[string]string),
304		directories:        args.AddDirectoryEntriesToZip,
305		compLevel:          args.CompressionLevel,
306		followSymlinks:     followSymlinks,
307		ignoreMissingFiles: args.IgnoreMissingFiles,
308		stderr:             args.Stderr,
309		fs:                 args.Filesystem,
310		sha256Checksum:     args.Sha256Checksum,
311	}
312
313	if z.fs == nil {
314		z.fs = pathtools.OsFs
315	}
316
317	if z.stderr == nil {
318		z.stderr = os.Stderr
319	}
320
321	pathMappings := []pathMapping{}
322
323	noCompression := args.CompressionLevel == 0
324
325	for _, fa := range args.FileArgs {
326		var srcs []string
327		for _, s := range fa.SourceFiles {
328			s = strings.TrimSpace(s)
329			if s == "" {
330				continue
331			}
332
333			result, err := z.fs.Glob(s, nil, followSymlinks)
334			if err != nil {
335				return err
336			}
337			if len(result.Matches) == 0 {
338				err := &os.PathError{
339					Op:   "lstat",
340					Path: s,
341					Err:  os.ErrNotExist,
342				}
343				if args.IgnoreMissingFiles {
344					if !args.Quiet {
345						fmt.Fprintln(z.stderr, "warning:", err)
346					}
347				} else {
348					return err
349				}
350			}
351			srcs = append(srcs, result.Matches...)
352		}
353		if fa.GlobDir != "" {
354			if exists, isDir, err := z.fs.Exists(fa.GlobDir); err != nil {
355				return err
356			} else if !exists && !args.IgnoreMissingFiles {
357				err := &os.PathError{
358					Op:   "lstat",
359					Path: fa.GlobDir,
360					Err:  os.ErrNotExist,
361				}
362				if args.IgnoreMissingFiles {
363					if !args.Quiet {
364						fmt.Fprintln(z.stderr, "warning:", err)
365					}
366				} else {
367					return err
368				}
369			} else if !isDir && !args.IgnoreMissingFiles {
370				err := &os.PathError{
371					Op:   "lstat",
372					Path: fa.GlobDir,
373					Err:  syscall.ENOTDIR,
374				}
375				if args.IgnoreMissingFiles {
376					if !args.Quiet {
377						fmt.Fprintln(z.stderr, "warning:", err)
378					}
379				} else {
380					return err
381				}
382			}
383			result, err := z.fs.Glob(filepath.Join(fa.GlobDir, "**/*"), nil, followSymlinks)
384			if err != nil {
385				return err
386			}
387			srcs = append(srcs, result.Matches...)
388		}
389		for _, src := range srcs {
390			err := fillPathPairs(fa, src, &pathMappings, args.NonDeflatedFiles, noCompression)
391			if err != nil {
392				return err
393			}
394		}
395	}
396
397	return z.write(w, pathMappings, args.ManifestSourcePath, args.EmulateJar, args.SrcJar, args.NumParallelJobs)
398}
399
400// Zip creates an output zip archive from given sources.
401func Zip(args ZipArgs) error {
402	if args.OutputFilePath == "" {
403		return fmt.Errorf("output file path must be nonempty")
404	}
405
406	buf := &bytes.Buffer{}
407	var out io.Writer = buf
408
409	var zipErr error
410
411	if args.DoNotWrite {
412		out = io.Discard
413	} else if !args.WriteIfChanged {
414		f, err := os.Create(args.OutputFilePath)
415		if err != nil {
416			return err
417		}
418
419		defer f.Close()
420		defer func() {
421			if zipErr != nil {
422				os.Remove(args.OutputFilePath)
423			}
424		}()
425
426		out = f
427	}
428
429	zipErr = zipTo(args, out)
430	if zipErr != nil {
431		return zipErr
432	}
433
434	if args.WriteIfChanged && !args.DoNotWrite {
435		err := pathtools.WriteFileIfChanged(args.OutputFilePath, buf.Bytes(), 0666)
436		if err != nil {
437			return err
438		}
439	}
440
441	return nil
442}
443
444func fillPathPairs(fa FileArg, src string, pathMappings *[]pathMapping,
445	nonDeflatedFiles map[string]bool, noCompression bool) error {
446
447	var dest string
448
449	if fa.ExplicitPathInZip != "" {
450		dest = fa.ExplicitPathInZip
451	} else if fa.JunkPaths {
452		dest = filepath.Base(src)
453	} else {
454		var err error
455		dest, err = filepath.Rel(fa.SourcePrefixToStrip, src)
456		if err != nil {
457			return err
458		}
459		if strings.HasPrefix(dest, "../") {
460			return IncorrectRelativeRootError{
461				Path:         src,
462				RelativeRoot: fa.SourcePrefixToStrip,
463			}
464		}
465	}
466	dest = filepath.Join(fa.PathPrefixInZip, dest)
467
468	zipMethod := zip.Deflate
469	if _, found := nonDeflatedFiles[dest]; found || noCompression {
470		zipMethod = zip.Store
471	}
472	*pathMappings = append(*pathMappings,
473		pathMapping{dest: dest, src: src, zipMethod: zipMethod})
474
475	return nil
476}
477
478func (z *ZipWriter) moveJavaFileBasedOnPackage(mapping *pathMapping) error {
479	src := mapping.src
480	var s os.FileInfo
481	var err error
482	if z.followSymlinks {
483		s, err = z.fs.Stat(src)
484	} else {
485		s, err = z.fs.Lstat(src)
486	}
487	if err != nil {
488		if os.IsNotExist(err) && z.ignoreMissingFiles {
489			return nil
490		}
491		return err
492	}
493	if !s.Mode().IsRegular() {
494		return nil
495	}
496	r, err := z.fs.Open(src)
497	if err != nil {
498		return err
499	}
500	// rewrite the destination using the package path if it can be determined
501	pkg, err := jar.JavaPackage(r, src)
502	err2 := r.Close()
503	if err2 != nil {
504		return err2
505	}
506	if err != nil {
507		// ignore errors for now, leaving the file at in its original location in the zip
508	} else {
509		mapping.dest = filepath.Join(filepath.Join(strings.Split(pkg, ".")...), filepath.Base(src))
510	}
511	return nil
512}
513
514func jarSort(mappings []pathMapping) {
515	sort.SliceStable(mappings, func(i int, j int) bool {
516		return jar.EntryNamesLess(mappings[i].dest, mappings[j].dest)
517	})
518}
519
520func (z *ZipWriter) write(f io.Writer, pathMappings []pathMapping, manifest string,
521	emulateJar, srcJar bool,
522	parallelJobs int) error {
523
524	z.errors = make(chan error)
525	defer close(z.errors)
526
527	// This channel size can be essentially unlimited -- it's used as a fifo
528	// queue decouple the CPU and IO loads. Directories don't require any
529	// compression time, but still cost some IO. Similar with small files that
530	// can be very fast to compress. Some files that are more difficult to
531	// compress won't take a corresponding longer time writing out.
532	//
533	// The optimum size here depends on your CPU and IO characteristics, and
534	// the the layout of your zip file. 1000 was chosen mostly at random as
535	// something that worked reasonably well for a test file.
536	//
537	// The RateLimit object will put the upper bounds on the number of
538	// parallel compressions and outstanding buffers.
539	z.writeOps = make(chan chan *zipEntry, 1000)
540	z.cpuRateLimiter = NewCPURateLimiter(int64(parallelJobs))
541	z.memoryRateLimiter = NewMemoryRateLimiter(0)
542	defer func() {
543		z.cpuRateLimiter.Stop()
544		z.memoryRateLimiter.Stop()
545	}()
546
547	if manifest != "" && !emulateJar {
548		return errors.New("must specify --jar when specifying a manifest via -m")
549	}
550
551	// move java source files to the correct folder based on the package statement inside of them.
552	// This is done before the entry sorting so that they're still in the right order.
553	if srcJar {
554		var javaMoveErrors []error
555		var javaMoveErrorsLock sync.Mutex
556		var wg sync.WaitGroup
557		for i := range pathMappings {
558			if filepath.Ext(pathMappings[i].src) == ".java" {
559				wg.Add(1)
560				go func() {
561					err := z.moveJavaFileBasedOnPackage(&pathMappings[i])
562					if err != nil {
563						javaMoveErrorsLock.Lock()
564						javaMoveErrors = append(javaMoveErrors, err)
565						javaMoveErrorsLock.Unlock()
566					}
567					wg.Done()
568				}()
569			}
570		}
571		wg.Wait()
572		if len(javaMoveErrors) > 0 {
573			return errors.Join(javaMoveErrors...)
574		}
575	}
576
577	if emulateJar {
578		// manifest may be empty, in which case addManifest will fill in a default
579		pathMappings = append(pathMappings, pathMapping{jar.ManifestFile, manifest, zip.Deflate})
580
581		jarSort(pathMappings)
582	} else {
583		sort.SliceStable(pathMappings, func(i int, j int) bool {
584			return pathMappings[i].dest < pathMappings[j].dest
585		})
586	}
587
588	go func() {
589		var err error
590		defer close(z.writeOps)
591
592		for _, ele := range pathMappings {
593			if emulateJar && ele.dest == jar.ManifestFile {
594				err = z.addManifest(ele.dest, ele.src, ele.zipMethod)
595			} else {
596				err = z.addFile(ele.dest, ele.src, ele.zipMethod, emulateJar)
597			}
598			if err != nil {
599				z.errors <- err
600				return
601			}
602		}
603	}()
604
605	zipw := zip.NewWriter(f)
606
607	var currentWriteOpChan chan *zipEntry
608	var currentWriter io.WriteCloser
609	var currentReaders chan chan io.Reader
610	var currentReader chan io.Reader
611	var done bool
612
613	for !done {
614		var writeOpsChan chan chan *zipEntry
615		var writeOpChan chan *zipEntry
616		var readersChan chan chan io.Reader
617
618		if currentReader != nil {
619			// Only read and process errors
620		} else if currentReaders != nil {
621			readersChan = currentReaders
622		} else if currentWriteOpChan != nil {
623			writeOpChan = currentWriteOpChan
624		} else {
625			writeOpsChan = z.writeOps
626		}
627
628		select {
629		case writeOp, ok := <-writeOpsChan:
630			if !ok {
631				done = true
632			}
633
634			currentWriteOpChan = writeOp
635
636		case op := <-writeOpChan:
637			currentWriteOpChan = nil
638
639			var err error
640			if op.fh.Method == zip.Deflate {
641				currentWriter, err = zipw.CreateCompressedHeader(op.fh)
642			} else {
643				var zw io.Writer
644
645				op.fh.CompressedSize64 = op.fh.UncompressedSize64
646
647				zw, err = zipw.CreateHeaderAndroid(op.fh)
648				currentWriter = nopCloser{zw}
649			}
650			if err != nil {
651				return err
652			}
653
654			currentReaders = op.futureReaders
655			if op.futureReaders == nil {
656				currentWriter.Close()
657				currentWriter = nil
658			}
659			z.memoryRateLimiter.Finish(op.allocatedSize)
660
661		case futureReader, ok := <-readersChan:
662			if !ok {
663				// Done with reading
664				currentWriter.Close()
665				currentWriter = nil
666				currentReaders = nil
667			}
668
669			currentReader = futureReader
670
671		case reader := <-currentReader:
672			_, err := io.Copy(currentWriter, reader)
673			if err != nil {
674				return err
675			}
676
677			currentReader = nil
678
679		case err := <-z.errors:
680			return err
681		}
682	}
683
684	// One last chance to catch an error
685	select {
686	case err := <-z.errors:
687		return err
688	default:
689		zipw.Close()
690		return nil
691	}
692}
693
694// imports (possibly with compression) <src> into the zip at sub-path <dest>
695func (z *ZipWriter) addFile(dest, src string, method uint16, emulateJar bool) error {
696	var fileSize int64
697	var executable bool
698
699	var s os.FileInfo
700	var err error
701	if z.followSymlinks {
702		s, err = z.fs.Stat(src)
703	} else {
704		s, err = z.fs.Lstat(src)
705	}
706
707	if err != nil {
708		if os.IsNotExist(err) && z.ignoreMissingFiles {
709			fmt.Fprintln(z.stderr, "warning:", err)
710			return nil
711		}
712		return err
713	}
714
715	createParentDirs := func(dest, src string) error {
716		if err := z.writeDirectory(filepath.Dir(dest), src, emulateJar); err != nil {
717			return err
718		}
719
720		if prev, exists := z.createdDirs[dest]; exists {
721			return fmt.Errorf("destination %q is both a directory %q and a file %q", dest, prev, src)
722		}
723
724		return nil
725	}
726
727	checkDuplicateFiles := func(dest, src string) (bool, error) {
728		if prev, exists := z.createdFiles[dest]; exists {
729			if prev != src {
730				return true, ConflictingFileError{
731					Dest: dest,
732					Prev: prev,
733					Src:  src,
734				}
735			}
736			return true, nil
737		}
738
739		z.createdFiles[dest] = src
740		return false, nil
741	}
742
743	if s.IsDir() {
744		if z.directories {
745			return z.writeDirectory(dest, src, emulateJar)
746		}
747		return nil
748	} else if s.Mode()&os.ModeSymlink != 0 {
749		err = createParentDirs(dest, src)
750		if err != nil {
751			return err
752		}
753
754		duplicate, err := checkDuplicateFiles(dest, src)
755		if err != nil {
756			return err
757		}
758		if duplicate {
759			return nil
760		}
761
762		return z.writeSymlink(dest, src)
763	} else if s.Mode().IsRegular() {
764		r, err := z.fs.Open(src)
765		if err != nil {
766			return err
767		}
768
769		fileSize = s.Size()
770		executable = s.Mode()&0100 != 0
771
772		header := &zip.FileHeader{
773			Name:               dest,
774			Method:             method,
775			UncompressedSize64: uint64(fileSize),
776		}
777
778		mode := os.FileMode(0644)
779		if executable {
780			mode = 0755
781		}
782		header.SetMode(mode)
783
784		err = createParentDirs(dest, src)
785		if err != nil {
786			return err
787		}
788
789		duplicate, err := checkDuplicateFiles(dest, src)
790		if err != nil {
791			return err
792		}
793		if duplicate {
794			return nil
795		}
796
797		return z.writeFileContents(header, r)
798	} else {
799		return fmt.Errorf("%s is not a file, directory, or symlink", src)
800	}
801}
802
803func (z *ZipWriter) addManifest(dest string, src string, _ uint16) error {
804	if prev, exists := z.createdDirs[dest]; exists {
805		return fmt.Errorf("destination %q is both a directory %q and a file %q", dest, prev, src)
806	}
807	if prev, exists := z.createdFiles[dest]; exists {
808		if prev != src {
809			return ConflictingFileError{
810				Dest: dest,
811				Prev: prev,
812				Src:  src,
813			}
814		}
815		return nil
816	}
817
818	if err := z.writeDirectory(filepath.Dir(dest), src, true); err != nil {
819		return err
820	}
821
822	var contents []byte
823	if src != "" {
824		f, err := z.fs.Open(src)
825		if err != nil {
826			return err
827		}
828
829		contents, err = ioutil.ReadAll(f)
830		f.Close()
831		if err != nil {
832			return err
833		}
834	}
835
836	fh, buf, err := jar.ManifestFileContents(contents)
837	if err != nil {
838		return err
839	}
840
841	reader := &byteReaderCloser{bytes.NewReader(buf), ioutil.NopCloser(nil)}
842
843	return z.writeFileContents(fh, reader)
844}
845
846func (z *ZipWriter) writeFileContents(header *zip.FileHeader, r pathtools.ReaderAtSeekerCloser) (err error) {
847
848	header.SetModTime(z.time)
849
850	compressChan := make(chan *zipEntry, 1)
851	z.writeOps <- compressChan
852
853	// Pre-fill a zipEntry, it will be sent in the compressChan once
854	// we're sure about the Method and CRC.
855	ze := &zipEntry{
856		fh: header,
857	}
858
859	ze.allocatedSize = int64(header.UncompressedSize64)
860	z.cpuRateLimiter.Request()
861	z.memoryRateLimiter.Request(ze.allocatedSize)
862
863	fileSize := int64(header.UncompressedSize64)
864	if fileSize == 0 {
865		fileSize = int64(header.UncompressedSize)
866	}
867
868	if header.Method == zip.Deflate && fileSize >= minParallelFileSize {
869		wg := new(sync.WaitGroup)
870
871		// Allocate enough buffer to hold all readers. We'll limit
872		// this based on actual buffer sizes in RateLimit.
873		ze.futureReaders = make(chan chan io.Reader, (fileSize/parallelBlockSize)+1)
874
875		// Calculate the CRC and SHA256 in the background, since reading
876		// the entire file could take a while.
877		//
878		// We could split this up into chunks as well, but it's faster
879		// than the compression. Due to the Go Zip API, we also need to
880		// know the result before we can begin writing the compressed
881		// data out to the zipfile.
882		//
883		// We calculate SHA256 only if `-sha256` is set.
884		wg.Add(1)
885		go z.checksumFileAsync(r, ze, compressChan, wg)
886
887		for start := int64(0); start < fileSize; start += parallelBlockSize {
888			sr := io.NewSectionReader(r, start, parallelBlockSize)
889			resultChan := make(chan io.Reader, 1)
890			ze.futureReaders <- resultChan
891
892			z.cpuRateLimiter.Request()
893
894			last := !(start+parallelBlockSize < fileSize)
895			var dict []byte
896			if start >= windowSize {
897				dict, err = ioutil.ReadAll(io.NewSectionReader(r, start-windowSize, windowSize))
898				if err != nil {
899					return err
900				}
901			}
902
903			wg.Add(1)
904			go z.compressPartialFile(sr, dict, last, resultChan, wg)
905		}
906
907		close(ze.futureReaders)
908
909		// Close the file handle after all readers are done
910		go func(wg *sync.WaitGroup, closer io.Closer) {
911			wg.Wait()
912			closer.Close()
913		}(wg, r)
914	} else {
915		go func() {
916			z.compressWholeFile(ze, r, compressChan)
917			r.Close()
918		}()
919	}
920
921	return nil
922}
923
924func (z *ZipWriter) checksumFileAsync(r io.ReadSeeker, ze *zipEntry, resultChan chan *zipEntry, wg *sync.WaitGroup) {
925	defer wg.Done()
926	defer z.cpuRateLimiter.Finish()
927
928	z.checksumFile(r, ze)
929
930	resultChan <- ze
931	close(resultChan)
932}
933
934func (z *ZipWriter) checksumFile(r io.ReadSeeker, ze *zipEntry) {
935	crc := crc32.NewIEEE()
936	writers := []io.Writer{crc}
937
938	var shaHasher hash.Hash
939	if z.sha256Checksum && !ze.fh.Mode().IsDir() {
940		shaHasher = sha256.New()
941		writers = append(writers, shaHasher)
942	}
943
944	w := io.MultiWriter(writers...)
945
946	_, err := io.Copy(w, r)
947	if err != nil {
948		z.errors <- err
949		return
950	}
951
952	ze.fh.CRC32 = crc.Sum32()
953	if shaHasher != nil {
954		z.appendSHAToExtra(ze, shaHasher.Sum(nil))
955	}
956}
957
958func (z *ZipWriter) appendSHAToExtra(ze *zipEntry, checksum []byte) {
959	// The block of SHA256 checksum consist of:
960	// - Header ID, equals to Sha256HeaderID (2 bytes)
961	// - Data size (2 bytes)
962	// - Data block:
963	//   - Signature, equals to Sha256HeaderSignature (2 bytes)
964	//   - Data, SHA checksum value
965	var buf []byte
966	buf = binary.LittleEndian.AppendUint16(buf, Sha256HeaderID)
967	buf = binary.LittleEndian.AppendUint16(buf, uint16(len(checksum)+2))
968	buf = binary.LittleEndian.AppendUint16(buf, Sha256HeaderSignature)
969	buf = append(buf, checksum...)
970	ze.fh.Extra = append(ze.fh.Extra, buf...)
971}
972
973func (z *ZipWriter) compressPartialFile(r io.Reader, dict []byte, last bool, resultChan chan io.Reader, wg *sync.WaitGroup) {
974	defer wg.Done()
975
976	result, err := z.compressBlock(r, dict, last)
977	if err != nil {
978		z.errors <- err
979		return
980	}
981
982	z.cpuRateLimiter.Finish()
983
984	resultChan <- result
985}
986
987func (z *ZipWriter) compressBlock(r io.Reader, dict []byte, last bool) (*bytes.Buffer, error) {
988	buf := new(bytes.Buffer)
989	var fw *flate.Writer
990	var err error
991	if len(dict) > 0 {
992		// There's no way to Reset a Writer with a new dictionary, so
993		// don't use the Pool
994		fw, err = flate.NewWriterDict(buf, z.compLevel, dict)
995	} else {
996		var ok bool
997		if fw, ok = z.compressorPool.Get().(*flate.Writer); ok {
998			fw.Reset(buf)
999		} else {
1000			fw, err = flate.NewWriter(buf, z.compLevel)
1001		}
1002		defer z.compressorPool.Put(fw)
1003	}
1004	if err != nil {
1005		return nil, err
1006	}
1007
1008	_, err = io.Copy(fw, r)
1009	if err != nil {
1010		return nil, err
1011	}
1012	if last {
1013		fw.Close()
1014	} else {
1015		fw.Flush()
1016	}
1017
1018	return buf, nil
1019}
1020
1021func (z *ZipWriter) compressWholeFile(ze *zipEntry, r io.ReadSeeker, compressChan chan *zipEntry) {
1022	z.checksumFile(r, ze)
1023
1024	_, err := r.Seek(0, 0)
1025	if err != nil {
1026		z.errors <- err
1027		return
1028	}
1029
1030	readFile := func(reader io.ReadSeeker) ([]byte, error) {
1031		_, err := reader.Seek(0, 0)
1032		if err != nil {
1033			return nil, err
1034		}
1035
1036		buf, err := ioutil.ReadAll(reader)
1037		if err != nil {
1038			return nil, err
1039		}
1040
1041		return buf, nil
1042	}
1043
1044	ze.futureReaders = make(chan chan io.Reader, 1)
1045	futureReader := make(chan io.Reader, 1)
1046	ze.futureReaders <- futureReader
1047	close(ze.futureReaders)
1048
1049	if ze.fh.Method == zip.Deflate {
1050		compressed, err := z.compressBlock(r, nil, true)
1051		if err != nil {
1052			z.errors <- err
1053			return
1054		}
1055		if uint64(compressed.Len()) < ze.fh.UncompressedSize64 {
1056			futureReader <- compressed
1057		} else {
1058			buf, err := readFile(r)
1059			if err != nil {
1060				z.errors <- err
1061				return
1062			}
1063			ze.fh.Method = zip.Store
1064			futureReader <- bytes.NewReader(buf)
1065		}
1066	} else {
1067		buf, err := readFile(r)
1068		if err != nil {
1069			z.errors <- err
1070			return
1071		}
1072		ze.fh.Method = zip.Store
1073		futureReader <- bytes.NewReader(buf)
1074	}
1075
1076	z.cpuRateLimiter.Finish()
1077
1078	close(futureReader)
1079
1080	compressChan <- ze
1081	close(compressChan)
1082}
1083
1084// writeDirectory annotates that dir is a directory created for the src file or directory, and adds
1085// the directory entry to the zip file if directories are enabled.
1086func (z *ZipWriter) writeDirectory(dir string, src string, emulateJar bool) error {
1087	// clean the input
1088	dir = filepath.Clean(dir)
1089
1090	// discover any uncreated directories in the path
1091	var zipDirs []string
1092	for dir != "" && dir != "." {
1093		if _, exists := z.createdDirs[dir]; exists {
1094			break
1095		}
1096
1097		if prev, exists := z.createdFiles[dir]; exists {
1098			return fmt.Errorf("destination %q is both a directory %q and a file %q", dir, src, prev)
1099		}
1100
1101		z.createdDirs[dir] = src
1102		// parent directories precede their children
1103		zipDirs = append([]string{dir}, zipDirs...)
1104
1105		dir = filepath.Dir(dir)
1106	}
1107
1108	if z.directories {
1109		// make a directory entry for each uncreated directory
1110		for _, cleanDir := range zipDirs {
1111			var dirHeader *zip.FileHeader
1112
1113			if emulateJar && cleanDir+"/" == jar.MetaDir {
1114				dirHeader = jar.MetaDirFileHeader()
1115			} else {
1116				dirHeader = &zip.FileHeader{
1117					Name: cleanDir + "/",
1118				}
1119				dirHeader.SetMode(0755 | os.ModeDir)
1120			}
1121
1122			dirHeader.SetModTime(z.time)
1123
1124			ze := make(chan *zipEntry, 1)
1125			ze <- &zipEntry{
1126				fh: dirHeader,
1127			}
1128			close(ze)
1129			z.writeOps <- ze
1130		}
1131	}
1132
1133	return nil
1134}
1135
1136func (z *ZipWriter) writeSymlink(rel, file string) error {
1137	fileHeader := &zip.FileHeader{
1138		Name: rel,
1139	}
1140	fileHeader.SetModTime(z.time)
1141	fileHeader.SetMode(0777 | os.ModeSymlink)
1142
1143	dest, err := z.fs.Readlink(file)
1144	if err != nil {
1145		return err
1146	}
1147
1148	fileHeader.UncompressedSize64 = uint64(len(dest))
1149	fileHeader.CRC32 = crc32.ChecksumIEEE([]byte(dest))
1150
1151	ze := make(chan *zipEntry, 1)
1152	futureReaders := make(chan chan io.Reader, 1)
1153	futureReader := make(chan io.Reader, 1)
1154	futureReaders <- futureReader
1155	close(futureReaders)
1156	futureReader <- bytes.NewBufferString(dest)
1157	close(futureReader)
1158
1159	ze <- &zipEntry{
1160		fh:            fileHeader,
1161		futureReaders: futureReaders,
1162	}
1163	close(ze)
1164	z.writeOps <- ze
1165
1166	return nil
1167}
1168