1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "ziparchive/zip_writer.h"
18
19 #include <sys/param.h>
20 #include <sys/stat.h>
21 #include <zlib.h>
22 #include <cstdio>
23 #define DEF_MEM_LEVEL 8 // normally in zutil.h?
24
25 #include <memory>
26 #include <vector>
27
28 #include "android-base/logging.h"
29
30 #include "entry_name_utils-inl.h"
31 #include "zip_archive_common.h"
32
33 #undef powerof2
34 #define powerof2(x) \
35 ({ \
36 __typeof__(x) _x = (x); \
37 __typeof__(x) _x2; \
38 __builtin_add_overflow(_x, -1, &_x2) ? 1 : ((_x2 & _x) == 0); \
39 })
40
41 /* Zip compression methods we support */
42 enum {
43 kCompressStored = 0, // no compression
44 kCompressDeflated = 8, // standard deflate
45 };
46
47 // Size of the output buffer used for compression.
48 static const size_t kBufSize = 32768u;
49
50 // No error, operation completed successfully.
51 static const int32_t kNoError = 0;
52
53 // The ZipWriter is in a bad state.
54 static const int32_t kInvalidState = -1;
55
56 // There was an IO error while writing to disk.
57 static const int32_t kIoError = -2;
58
59 // The zip entry name was invalid.
60 static const int32_t kInvalidEntryName = -3;
61
62 // An error occurred in zlib.
63 static const int32_t kZlibError = -4;
64
65 // The start aligned function was called with the aligned flag.
66 static const int32_t kInvalidAlign32Flag = -5;
67
68 // The alignment parameter is not a power of 2.
69 static const int32_t kInvalidAlignment = -6;
70
71 static const char* sErrorCodes[] = {
72 "Invalid state", "IO error", "Invalid entry name", "Zlib error",
73 };
74
ErrorCodeString(int32_t error_code)75 const char* ZipWriter::ErrorCodeString(int32_t error_code) {
76 if (error_code < 0 && (-error_code) < static_cast<int32_t>(arraysize(sErrorCodes))) {
77 return sErrorCodes[-error_code];
78 }
79 return nullptr;
80 }
81
DeleteZStream(z_stream * stream)82 static void DeleteZStream(z_stream* stream) {
83 deflateEnd(stream);
84 delete stream;
85 }
86
ZipWriter(FILE * f)87 ZipWriter::ZipWriter(FILE* f)
88 : file_(f),
89 seekable_(false),
90 current_offset_(0),
91 state_(State::kWritingZip),
92 z_stream_(nullptr, DeleteZStream),
93 buffer_(kBufSize) {
94 // Check if the file is seekable (regular file). If fstat fails, that's fine, subsequent calls
95 // will fail as well.
96 struct stat file_stats;
97 if (fstat(fileno(f), &file_stats) == 0) {
98 seekable_ = S_ISREG(file_stats.st_mode);
99 }
100 }
101
ZipWriter(ZipWriter && writer)102 ZipWriter::ZipWriter(ZipWriter&& writer) noexcept
103 : file_(writer.file_),
104 seekable_(writer.seekable_),
105 current_offset_(writer.current_offset_),
106 state_(writer.state_),
107 files_(std::move(writer.files_)),
108 z_stream_(std::move(writer.z_stream_)),
109 buffer_(std::move(writer.buffer_)) {
110 writer.file_ = nullptr;
111 writer.state_ = State::kError;
112 }
113
operator =(ZipWriter && writer)114 ZipWriter& ZipWriter::operator=(ZipWriter&& writer) noexcept {
115 file_ = writer.file_;
116 seekable_ = writer.seekable_;
117 current_offset_ = writer.current_offset_;
118 state_ = writer.state_;
119 files_ = std::move(writer.files_);
120 z_stream_ = std::move(writer.z_stream_);
121 buffer_ = std::move(writer.buffer_);
122 writer.file_ = nullptr;
123 writer.state_ = State::kError;
124 return *this;
125 }
126
HandleError(int32_t error_code)127 int32_t ZipWriter::HandleError(int32_t error_code) {
128 state_ = State::kError;
129 z_stream_.reset();
130 return error_code;
131 }
132
StartEntry(std::string_view path,size_t flags)133 int32_t ZipWriter::StartEntry(std::string_view path, size_t flags) {
134 uint32_t alignment = 0;
135 if (flags & kAlign32) {
136 flags &= ~kAlign32;
137 alignment = 4;
138 }
139 return StartAlignedEntryWithTime(path, flags, time_t(), alignment);
140 }
141
StartAlignedEntry(std::string_view path,size_t flags,uint32_t alignment)142 int32_t ZipWriter::StartAlignedEntry(std::string_view path, size_t flags, uint32_t alignment) {
143 return StartAlignedEntryWithTime(path, flags, time_t(), alignment);
144 }
145
StartEntryWithTime(std::string_view path,size_t flags,time_t time)146 int32_t ZipWriter::StartEntryWithTime(std::string_view path, size_t flags, time_t time) {
147 uint32_t alignment = 0;
148 if (flags & kAlign32) {
149 flags &= ~kAlign32;
150 alignment = 4;
151 }
152 return StartAlignedEntryWithTime(path, flags, time, alignment);
153 }
154
ExtractTimeAndDate(time_t when,uint16_t * out_time,uint16_t * out_date)155 static void ExtractTimeAndDate(time_t when, uint16_t* out_time, uint16_t* out_date) {
156 /* round up to an even number of seconds */
157 when = static_cast<time_t>((static_cast<unsigned long>(when) + 1) & (~1));
158
159 struct tm* ptm;
160 #if !defined(_WIN32)
161 struct tm tm_result;
162 ptm = localtime_r(&when, &tm_result);
163 #else
164 ptm = localtime(&when);
165 #endif
166
167 int year = ptm->tm_year;
168 if (year < 80) {
169 year = 80;
170 }
171
172 *out_date = static_cast<uint16_t>((year - 80) << 9 | (ptm->tm_mon + 1) << 5 | ptm->tm_mday);
173 *out_time = static_cast<uint16_t>(ptm->tm_hour << 11 | ptm->tm_min << 5 | ptm->tm_sec >> 1);
174 }
175
CopyFromFileEntry(const ZipWriter::FileEntry & src,bool use_data_descriptor,LocalFileHeader * dst)176 static void CopyFromFileEntry(const ZipWriter::FileEntry& src, bool use_data_descriptor,
177 LocalFileHeader* dst) {
178 dst->lfh_signature = LocalFileHeader::kSignature;
179 if (use_data_descriptor) {
180 // Set this flag to denote that a DataDescriptor struct will appear after the data,
181 // containing the crc and size fields.
182 dst->gpb_flags |= kGPBDDFlagMask;
183
184 // The size and crc fields must be 0.
185 dst->compressed_size = 0u;
186 dst->uncompressed_size = 0u;
187 dst->crc32 = 0u;
188 } else {
189 dst->compressed_size = src.compressed_size;
190 dst->uncompressed_size = src.uncompressed_size;
191 dst->crc32 = src.crc32;
192 }
193 dst->compression_method = src.compression_method;
194 dst->last_mod_time = src.last_mod_time;
195 dst->last_mod_date = src.last_mod_date;
196 DCHECK_LE(src.path.size(), std::numeric_limits<uint16_t>::max());
197 dst->file_name_length = static_cast<uint16_t>(src.path.size());
198 dst->extra_field_length = src.padding_length;
199 }
200
StartAlignedEntryWithTime(std::string_view path,size_t flags,time_t time,uint32_t alignment)201 int32_t ZipWriter::StartAlignedEntryWithTime(std::string_view path, size_t flags, time_t time,
202 uint32_t alignment) {
203 if (state_ != State::kWritingZip) {
204 return kInvalidState;
205 }
206
207 // Can only have 16535 entries because of zip records.
208 if (files_.size() == std::numeric_limits<uint16_t>::max()) {
209 return HandleError(kIoError);
210 }
211
212 if (flags & kAlign32) {
213 return kInvalidAlign32Flag;
214 }
215
216 if (powerof2(alignment) == 0) {
217 return kInvalidAlignment;
218 }
219 if (alignment > std::numeric_limits<uint16_t>::max()) {
220 return kInvalidAlignment;
221 }
222
223 FileEntry file_entry = {};
224 file_entry.local_file_header_offset = current_offset_;
225 file_entry.path = path;
226 // No support for larger than 4GB files.
227 if (file_entry.local_file_header_offset > std::numeric_limits<uint32_t>::max()) {
228 return HandleError(kIoError);
229 }
230
231 if (!IsValidEntryName(reinterpret_cast<const uint8_t*>(file_entry.path.data()),
232 file_entry.path.size())) {
233 return kInvalidEntryName;
234 }
235
236 if (flags & ZipWriter::kCompress) {
237 file_entry.compression_method = kCompressDeflated;
238
239 int compression_level = (flags & ZipWriter::kDefaultCompression) ? 6 : 9;
240 int32_t result = PrepareDeflate(compression_level);
241 if (result != kNoError) {
242 return result;
243 }
244 } else {
245 file_entry.compression_method = kCompressStored;
246 }
247
248 ExtractTimeAndDate(time, &file_entry.last_mod_time, &file_entry.last_mod_date);
249
250 off_t offset = current_offset_ + sizeof(LocalFileHeader) + file_entry.path.size();
251 // prepare a pre-zeroed memory page in case when we need to pad some aligned data.
252 static constexpr auto kPageSize = 4096;
253 static constexpr char kSmallZeroPadding[kPageSize] = {};
254 // use this buffer if our preallocated one is too small
255 std::vector<char> zero_padding_big;
256 const char* zero_padding = nullptr;
257
258 if (alignment != 0 && (offset & (alignment - 1))) {
259 // Pad the extra field so the data will be aligned.
260 uint16_t padding = static_cast<uint16_t>(alignment - (offset % alignment));
261 file_entry.padding_length = padding;
262 offset += padding;
263 if (padding <= std::size(kSmallZeroPadding)) {
264 zero_padding = kSmallZeroPadding;
265 } else {
266 zero_padding_big.resize(padding, 0);
267 zero_padding = zero_padding_big.data();
268 }
269 }
270
271 LocalFileHeader header = {};
272 // Always start expecting a data descriptor. When the data has finished being written,
273 // if it is possible to seek back, the GPB flag will reset and the sizes written.
274 CopyFromFileEntry(file_entry, true /*use_data_descriptor*/, &header);
275
276 if (fwrite(&header, sizeof(header), 1, file_) != 1) {
277 return HandleError(kIoError);
278 }
279
280 if (fwrite(path.data(), 1, path.size(), file_) != path.size()) {
281 return HandleError(kIoError);
282 }
283
284 if (file_entry.padding_length != 0 && fwrite(zero_padding, 1, file_entry.padding_length,
285 file_) != file_entry.padding_length) {
286 return HandleError(kIoError);
287 }
288
289 current_file_entry_ = std::move(file_entry);
290 current_offset_ = offset;
291 state_ = State::kWritingEntry;
292 return kNoError;
293 }
294
DiscardLastEntry()295 int32_t ZipWriter::DiscardLastEntry() {
296 if (state_ != State::kWritingZip || files_.empty()) {
297 return kInvalidState;
298 }
299
300 FileEntry& last_entry = files_.back();
301 current_offset_ = last_entry.local_file_header_offset;
302 if (fseeko(file_, current_offset_, SEEK_SET) != 0) {
303 return HandleError(kIoError);
304 }
305 files_.pop_back();
306 return kNoError;
307 }
308
GetLastEntry(FileEntry * out_entry)309 int32_t ZipWriter::GetLastEntry(FileEntry* out_entry) {
310 CHECK(out_entry != nullptr);
311
312 if (files_.empty()) {
313 return kInvalidState;
314 }
315 *out_entry = files_.back();
316 return kNoError;
317 }
318
PrepareDeflate(int compression_level)319 int32_t ZipWriter::PrepareDeflate(int compression_level) {
320 CHECK(state_ == State::kWritingZip);
321
322 // Initialize the z_stream for compression.
323 z_stream_ = std::unique_ptr<z_stream, void (*)(z_stream*)>(new z_stream(), DeleteZStream);
324
325 #pragma GCC diagnostic push
326 #pragma GCC diagnostic ignored "-Wold-style-cast"
327 int zerr = deflateInit2(z_stream_.get(), compression_level, Z_DEFLATED,
328 -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
329 #pragma GCC diagnostic pop
330
331 if (zerr != Z_OK) {
332 if (zerr == Z_VERSION_ERROR) {
333 LOG(ERROR) << "Installed zlib is not compatible with linked version (" << ZLIB_VERSION << ")";
334 return HandleError(kZlibError);
335 } else {
336 LOG(ERROR) << "deflateInit2 failed (zerr=" << zerr << ")";
337 return HandleError(kZlibError);
338 }
339 }
340
341 z_stream_->next_out = buffer_.data();
342 DCHECK_EQ(buffer_.size(), kBufSize);
343 z_stream_->avail_out = static_cast<uint32_t>(buffer_.size());
344 return kNoError;
345 }
346
WriteBytes(const void * data,size_t len)347 int32_t ZipWriter::WriteBytes(const void* data, size_t len) {
348 if (state_ != State::kWritingEntry) {
349 return HandleError(kInvalidState);
350 }
351 // Need to be able to mark down data correctly.
352 if (len + static_cast<uint64_t>(current_file_entry_.uncompressed_size) >
353 std::numeric_limits<uint32_t>::max()) {
354 return HandleError(kIoError);
355 }
356 uint32_t len32 = static_cast<uint32_t>(len);
357
358 int32_t result = kNoError;
359 if (current_file_entry_.compression_method & kCompressDeflated) {
360 result = CompressBytes(¤t_file_entry_, data, len32);
361 } else {
362 result = StoreBytes(¤t_file_entry_, data, len32);
363 }
364
365 if (result != kNoError) {
366 return result;
367 }
368
369 current_file_entry_.crc32 = static_cast<uint32_t>(
370 crc32(current_file_entry_.crc32, reinterpret_cast<const Bytef*>(data), len32));
371 current_file_entry_.uncompressed_size += len32;
372 return kNoError;
373 }
374
StoreBytes(FileEntry * file,const void * data,uint32_t len)375 int32_t ZipWriter::StoreBytes(FileEntry* file, const void* data, uint32_t len) {
376 CHECK(state_ == State::kWritingEntry);
377
378 if (fwrite(data, 1, len, file_) != len) {
379 return HandleError(kIoError);
380 }
381 file->compressed_size += len;
382 current_offset_ += len;
383 return kNoError;
384 }
385
CompressBytes(FileEntry * file,const void * data,uint32_t len)386 int32_t ZipWriter::CompressBytes(FileEntry* file, const void* data, uint32_t len) {
387 CHECK(state_ == State::kWritingEntry);
388 CHECK(z_stream_);
389 CHECK(z_stream_->next_out != nullptr);
390 CHECK(z_stream_->avail_out != 0);
391
392 // Prepare the input.
393 z_stream_->next_in = reinterpret_cast<const uint8_t*>(data);
394 z_stream_->avail_in = len;
395
396 while (z_stream_->avail_in > 0) {
397 // We have more data to compress.
398 int zerr = deflate(z_stream_.get(), Z_NO_FLUSH);
399 if (zerr != Z_OK) {
400 return HandleError(kZlibError);
401 }
402
403 if (z_stream_->avail_out == 0) {
404 // The output is full, let's write it to disk.
405 size_t write_bytes = z_stream_->next_out - buffer_.data();
406 if (fwrite(buffer_.data(), 1, write_bytes, file_) != write_bytes) {
407 return HandleError(kIoError);
408 }
409 file->compressed_size += write_bytes;
410 current_offset_ += write_bytes;
411
412 // Reset the output buffer for the next input.
413 z_stream_->next_out = buffer_.data();
414 DCHECK_EQ(buffer_.size(), kBufSize);
415 z_stream_->avail_out = static_cast<uint32_t>(buffer_.size());
416 }
417 }
418 return kNoError;
419 }
420
FlushCompressedBytes(FileEntry * file)421 int32_t ZipWriter::FlushCompressedBytes(FileEntry* file) {
422 CHECK(state_ == State::kWritingEntry);
423 CHECK(z_stream_);
424 CHECK(z_stream_->next_out != nullptr);
425 CHECK(z_stream_->avail_out != 0);
426
427 // Keep deflating while there isn't enough space in the buffer to
428 // to complete the compress.
429 int zerr;
430 while ((zerr = deflate(z_stream_.get(), Z_FINISH)) == Z_OK) {
431 CHECK(z_stream_->avail_out == 0);
432 size_t write_bytes = z_stream_->next_out - buffer_.data();
433 if (fwrite(buffer_.data(), 1, write_bytes, file_) != write_bytes) {
434 return HandleError(kIoError);
435 }
436 file->compressed_size += write_bytes;
437 current_offset_ += write_bytes;
438
439 z_stream_->next_out = buffer_.data();
440 DCHECK_EQ(buffer_.size(), kBufSize);
441 z_stream_->avail_out = static_cast<uint32_t>(buffer_.size());
442 }
443 if (zerr != Z_STREAM_END) {
444 return HandleError(kZlibError);
445 }
446
447 size_t write_bytes = z_stream_->next_out - buffer_.data();
448 if (write_bytes != 0) {
449 if (fwrite(buffer_.data(), 1, write_bytes, file_) != write_bytes) {
450 return HandleError(kIoError);
451 }
452 file->compressed_size += write_bytes;
453 current_offset_ += write_bytes;
454 }
455 z_stream_.reset();
456 return kNoError;
457 }
458
ShouldUseDataDescriptor() const459 bool ZipWriter::ShouldUseDataDescriptor() const {
460 // Only use a trailing "data descriptor" if the output isn't seekable.
461 return !seekable_;
462 }
463
FinishEntry()464 int32_t ZipWriter::FinishEntry() {
465 if (state_ != State::kWritingEntry) {
466 return kInvalidState;
467 }
468
469 if (current_file_entry_.compression_method & kCompressDeflated) {
470 int32_t result = FlushCompressedBytes(¤t_file_entry_);
471 if (result != kNoError) {
472 return result;
473 }
474 }
475
476 if (ShouldUseDataDescriptor()) {
477 // Some versions of ZIP don't allow STORED data to have a trailing DataDescriptor.
478 // If this file is not seekable, or if the data is compressed, write a DataDescriptor.
479 // We haven't supported zip64 format yet. Write both uncompressed size and compressed
480 // size as uint32_t.
481 std::vector<uint32_t> dataDescriptor = {
482 DataDescriptor::kOptSignature, current_file_entry_.crc32,
483 current_file_entry_.compressed_size, current_file_entry_.uncompressed_size};
484 if (fwrite(dataDescriptor.data(), dataDescriptor.size() * sizeof(uint32_t), 1, file_) != 1) {
485 return HandleError(kIoError);
486 }
487
488 current_offset_ += sizeof(uint32_t) * dataDescriptor.size();
489 } else {
490 // Seek back to the header and rewrite to include the size.
491 if (fseeko(file_, current_file_entry_.local_file_header_offset, SEEK_SET) != 0) {
492 return HandleError(kIoError);
493 }
494
495 LocalFileHeader header = {};
496 CopyFromFileEntry(current_file_entry_, false /*use_data_descriptor*/, &header);
497
498 if (fwrite(&header, sizeof(header), 1, file_) != 1) {
499 return HandleError(kIoError);
500 }
501
502 if (fseeko(file_, current_offset_, SEEK_SET) != 0) {
503 return HandleError(kIoError);
504 }
505 }
506
507 files_.emplace_back(std::move(current_file_entry_));
508 state_ = State::kWritingZip;
509 return kNoError;
510 }
511
Finish()512 int32_t ZipWriter::Finish() {
513 if (state_ != State::kWritingZip) {
514 return kInvalidState;
515 }
516
517 off_t startOfCdr = current_offset_;
518 for (FileEntry& file : files_) {
519 CentralDirectoryRecord cdr = {};
520 cdr.record_signature = CentralDirectoryRecord::kSignature;
521 if (ShouldUseDataDescriptor()) {
522 cdr.gpb_flags |= kGPBDDFlagMask;
523 }
524 cdr.compression_method = file.compression_method;
525 cdr.last_mod_time = file.last_mod_time;
526 cdr.last_mod_date = file.last_mod_date;
527 cdr.crc32 = file.crc32;
528 cdr.compressed_size = file.compressed_size;
529 cdr.uncompressed_size = file.uncompressed_size;
530 // Checked in IsValidEntryName.
531 DCHECK_LE(file.path.size(), std::numeric_limits<uint16_t>::max());
532 cdr.file_name_length = static_cast<uint16_t>(file.path.size());
533 // Checked in StartAlignedEntryWithTime.
534 DCHECK_LE(file.local_file_header_offset, std::numeric_limits<uint32_t>::max());
535 cdr.local_file_header_offset = static_cast<uint32_t>(file.local_file_header_offset);
536 if (fwrite(&cdr, sizeof(cdr), 1, file_) != 1) {
537 return HandleError(kIoError);
538 }
539
540 if (fwrite(file.path.data(), 1, file.path.size(), file_) != file.path.size()) {
541 return HandleError(kIoError);
542 }
543
544 current_offset_ += sizeof(cdr) + file.path.size();
545 }
546
547 EocdRecord er = {};
548 er.eocd_signature = EocdRecord::kSignature;
549 er.disk_num = 0;
550 er.cd_start_disk = 0;
551 // Checked when adding entries.
552 DCHECK_LE(files_.size(), std::numeric_limits<uint16_t>::max());
553 er.num_records_on_disk = static_cast<uint16_t>(files_.size());
554 er.num_records = static_cast<uint16_t>(files_.size());
555 if (current_offset_ > std::numeric_limits<uint32_t>::max()) {
556 return HandleError(kIoError);
557 }
558 er.cd_size = static_cast<uint32_t>(current_offset_ - startOfCdr);
559 er.cd_start_offset = static_cast<uint32_t>(startOfCdr);
560
561 if (fwrite(&er, sizeof(er), 1, file_) != 1) {
562 return HandleError(kIoError);
563 }
564
565 current_offset_ += sizeof(er);
566
567 // Since we can BackUp() and potentially finish writing at an offset less than one we had
568 // already written at, we must truncate the file.
569
570 if (ftruncate(fileno(file_), current_offset_) != 0) {
571 return HandleError(kIoError);
572 }
573
574 if (fflush(file_) != 0) {
575 return HandleError(kIoError);
576 }
577
578 state_ = State::kDone;
579 return kNoError;
580 }
581