• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (C) 2020 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include <sys/types.h>
18 #include <unistd.h>
19 
20 #include <limits>
21 #include <queue>
22 
23 #include <android-base/file.h>
24 #include <android-base/logging.h>
25 #include <android-base/unique_fd.h>
26 #include <brotli/encode.h>
27 #include <libsnapshot/cow_reader.h>
28 #include <libsnapshot/cow_writer.h>
29 #include <zlib.h>
30 
31 namespace android {
32 namespace snapshot {
33 
34 static_assert(sizeof(off_t) == sizeof(uint64_t));
35 
36 using android::base::borrowed_fd;
37 using android::base::unique_fd;
38 
AddCopy(uint64_t new_block,uint64_t old_block)39 bool ICowWriter::AddCopy(uint64_t new_block, uint64_t old_block) {
40     if (!ValidateNewBlock(new_block)) {
41         return false;
42     }
43     return EmitCopy(new_block, old_block);
44 }
45 
AddRawBlocks(uint64_t new_block_start,const void * data,size_t size)46 bool ICowWriter::AddRawBlocks(uint64_t new_block_start, const void* data, size_t size) {
47     if (size % options_.block_size != 0) {
48         LOG(ERROR) << "AddRawBlocks: size " << size << " is not a multiple of "
49                    << options_.block_size;
50         return false;
51     }
52 
53     uint64_t num_blocks = size / options_.block_size;
54     uint64_t last_block = new_block_start + num_blocks - 1;
55     if (!ValidateNewBlock(last_block)) {
56         return false;
57     }
58     return EmitRawBlocks(new_block_start, data, size);
59 }
60 
AddXorBlocks(uint32_t new_block_start,const void * data,size_t size,uint32_t old_block,uint16_t offset)61 bool ICowWriter::AddXorBlocks(uint32_t new_block_start, const void* data, size_t size,
62                               uint32_t old_block, uint16_t offset) {
63     if (size % options_.block_size != 0) {
64         LOG(ERROR) << "AddRawBlocks: size " << size << " is not a multiple of "
65                    << options_.block_size;
66         return false;
67     }
68 
69     uint64_t num_blocks = size / options_.block_size;
70     uint64_t last_block = new_block_start + num_blocks - 1;
71     if (!ValidateNewBlock(last_block)) {
72         return false;
73     }
74     if (offset >= options_.block_size) {
75         LOG(ERROR) << "AddXorBlocks: offset " << offset << " is not less than "
76                    << options_.block_size;
77     }
78     return EmitXorBlocks(new_block_start, data, size, old_block, offset);
79 }
80 
AddZeroBlocks(uint64_t new_block_start,uint64_t num_blocks)81 bool ICowWriter::AddZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) {
82     uint64_t last_block = new_block_start + num_blocks - 1;
83     if (!ValidateNewBlock(last_block)) {
84         return false;
85     }
86     return EmitZeroBlocks(new_block_start, num_blocks);
87 }
88 
AddLabel(uint64_t label)89 bool ICowWriter::AddLabel(uint64_t label) {
90     return EmitLabel(label);
91 }
92 
AddSequenceData(size_t num_ops,const uint32_t * data)93 bool ICowWriter::AddSequenceData(size_t num_ops, const uint32_t* data) {
94     return EmitSequenceData(num_ops, data);
95 }
96 
ValidateNewBlock(uint64_t new_block)97 bool ICowWriter::ValidateNewBlock(uint64_t new_block) {
98     if (options_.max_blocks && new_block >= options_.max_blocks.value()) {
99         LOG(ERROR) << "New block " << new_block << " exceeds maximum block count "
100                    << options_.max_blocks.value();
101         return false;
102     }
103     return true;
104 }
105 
CowWriter(const CowOptions & options)106 CowWriter::CowWriter(const CowOptions& options) : ICowWriter(options), fd_(-1) {
107     SetupHeaders();
108 }
109 
SetupHeaders()110 void CowWriter::SetupHeaders() {
111     header_ = {};
112     header_.magic = kCowMagicNumber;
113     header_.major_version = kCowVersionMajor;
114     header_.minor_version = kCowVersionMinor;
115     header_.header_size = sizeof(CowHeader);
116     header_.footer_size = sizeof(CowFooter);
117     header_.op_size = sizeof(CowOperation);
118     header_.block_size = options_.block_size;
119     header_.num_merge_ops = options_.num_merge_ops;
120     header_.cluster_ops = options_.cluster_ops;
121     header_.buffer_size = 0;
122     footer_ = {};
123     footer_.op.data_length = 64;
124     footer_.op.type = kCowFooterOp;
125 }
126 
ParseOptions()127 bool CowWriter::ParseOptions() {
128     if (options_.compression == "gz") {
129         compression_ = kCowCompressGz;
130     } else if (options_.compression == "brotli") {
131         compression_ = kCowCompressBrotli;
132     } else if (options_.compression == "none") {
133         compression_ = kCowCompressNone;
134     } else if (!options_.compression.empty()) {
135         LOG(ERROR) << "unrecognized compression: " << options_.compression;
136         return false;
137     }
138     if (options_.cluster_ops == 1) {
139         LOG(ERROR) << "Clusters must contain at least two operations to function.";
140         return false;
141     }
142     return true;
143 }
144 
SetFd(android::base::borrowed_fd fd)145 bool CowWriter::SetFd(android::base::borrowed_fd fd) {
146     if (fd.get() < 0) {
147         owned_fd_.reset(open("/dev/null", O_RDWR | O_CLOEXEC));
148         if (owned_fd_ < 0) {
149             PLOG(ERROR) << "open /dev/null failed";
150             return false;
151         }
152         fd_ = owned_fd_;
153         is_dev_null_ = true;
154     } else {
155         fd_ = fd;
156 
157         struct stat stat;
158         if (fstat(fd.get(), &stat) < 0) {
159             PLOG(ERROR) << "fstat failed";
160             return false;
161         }
162         is_block_device_ = S_ISBLK(stat.st_mode);
163     }
164     return true;
165 }
166 
Initialize(unique_fd && fd)167 bool CowWriter::Initialize(unique_fd&& fd) {
168     owned_fd_ = std::move(fd);
169     return Initialize(borrowed_fd{owned_fd_});
170 }
171 
Initialize(borrowed_fd fd)172 bool CowWriter::Initialize(borrowed_fd fd) {
173     if (!SetFd(fd) || !ParseOptions()) {
174         return false;
175     }
176 
177     return OpenForWrite();
178 }
179 
InitializeAppend(android::base::unique_fd && fd,uint64_t label)180 bool CowWriter::InitializeAppend(android::base::unique_fd&& fd, uint64_t label) {
181     owned_fd_ = std::move(fd);
182     return InitializeAppend(android::base::borrowed_fd{owned_fd_}, label);
183 }
184 
InitializeAppend(android::base::borrowed_fd fd,uint64_t label)185 bool CowWriter::InitializeAppend(android::base::borrowed_fd fd, uint64_t label) {
186     if (!SetFd(fd) || !ParseOptions()) {
187         return false;
188     }
189 
190     return OpenForAppend(label);
191 }
192 
InitPos()193 void CowWriter::InitPos() {
194     next_op_pos_ = sizeof(header_) + header_.buffer_size;
195     cluster_size_ = header_.cluster_ops * sizeof(CowOperation);
196     if (header_.cluster_ops) {
197         next_data_pos_ = next_op_pos_ + cluster_size_;
198     } else {
199         next_data_pos_ = next_op_pos_ + sizeof(CowOperation);
200     }
201     ops_.clear();
202     current_cluster_size_ = 0;
203     current_data_size_ = 0;
204 }
205 
OpenForWrite()206 bool CowWriter::OpenForWrite() {
207     // This limitation is tied to the data field size in CowOperation.
208     if (header_.block_size > std::numeric_limits<uint16_t>::max()) {
209         LOG(ERROR) << "Block size is too large";
210         return false;
211     }
212 
213     if (lseek(fd_.get(), 0, SEEK_SET) < 0) {
214         PLOG(ERROR) << "lseek failed";
215         return false;
216     }
217 
218     if (options_.scratch_space) {
219         header_.buffer_size = BUFFER_REGION_DEFAULT_SIZE;
220     }
221 
222     // Headers are not complete, but this ensures the file is at the right
223     // position.
224     if (!android::base::WriteFully(fd_, &header_, sizeof(header_))) {
225         PLOG(ERROR) << "write failed";
226         return false;
227     }
228 
229     if (options_.scratch_space) {
230         // Initialize the scratch space
231         std::string data(header_.buffer_size, 0);
232         if (!android::base::WriteFully(fd_, data.data(), header_.buffer_size)) {
233             PLOG(ERROR) << "writing scratch space failed";
234             return false;
235         }
236     }
237 
238     if (!Sync()) {
239         LOG(ERROR) << "Header sync failed";
240         return false;
241     }
242 
243     if (lseek(fd_.get(), sizeof(header_) + header_.buffer_size, SEEK_SET) < 0) {
244         PLOG(ERROR) << "lseek failed";
245         return false;
246     }
247 
248     InitPos();
249 
250     return true;
251 }
252 
OpenForAppend(uint64_t label)253 bool CowWriter::OpenForAppend(uint64_t label) {
254     auto reader = std::make_unique<CowReader>();
255     std::queue<CowOperation> toAdd;
256 
257     if (!reader->Parse(fd_, {label}) || !reader->GetHeader(&header_)) {
258         return false;
259     }
260 
261     options_.block_size = header_.block_size;
262     options_.cluster_ops = header_.cluster_ops;
263 
264     // Reset this, since we're going to reimport all operations.
265     footer_.op.num_ops = 0;
266     InitPos();
267 
268     auto iter = reader->GetOpIter();
269 
270     while (!iter->Done()) {
271         AddOperation(iter->Get());
272         iter->Next();
273     }
274 
275     // Free reader so we own the descriptor position again.
276     reader = nullptr;
277 
278     if (lseek(fd_.get(), next_op_pos_, SEEK_SET) < 0) {
279         PLOG(ERROR) << "lseek failed";
280         return false;
281     }
282     return EmitClusterIfNeeded();
283 }
284 
EmitCopy(uint64_t new_block,uint64_t old_block)285 bool CowWriter::EmitCopy(uint64_t new_block, uint64_t old_block) {
286     CHECK(!merge_in_progress_);
287     CowOperation op = {};
288     op.type = kCowCopyOp;
289     op.new_block = new_block;
290     op.source = old_block;
291     return WriteOperation(op);
292 }
293 
EmitRawBlocks(uint64_t new_block_start,const void * data,size_t size)294 bool CowWriter::EmitRawBlocks(uint64_t new_block_start, const void* data, size_t size) {
295     return EmitBlocks(new_block_start, data, size, 0, 0, kCowReplaceOp);
296 }
297 
EmitXorBlocks(uint32_t new_block_start,const void * data,size_t size,uint32_t old_block,uint16_t offset)298 bool CowWriter::EmitXorBlocks(uint32_t new_block_start, const void* data, size_t size,
299                               uint32_t old_block, uint16_t offset) {
300     return EmitBlocks(new_block_start, data, size, old_block, offset, kCowXorOp);
301 }
302 
EmitBlocks(uint64_t new_block_start,const void * data,size_t size,uint64_t old_block,uint16_t offset,uint8_t type)303 bool CowWriter::EmitBlocks(uint64_t new_block_start, const void* data, size_t size,
304                            uint64_t old_block, uint16_t offset, uint8_t type) {
305     const uint8_t* iter = reinterpret_cast<const uint8_t*>(data);
306     CHECK(!merge_in_progress_);
307     for (size_t i = 0; i < size / header_.block_size; i++) {
308         CowOperation op = {};
309         op.new_block = new_block_start + i;
310         op.type = type;
311         if (type == kCowXorOp) {
312             op.source = (old_block + i) * header_.block_size + offset;
313         } else {
314             op.source = next_data_pos_;
315         }
316 
317         if (compression_) {
318             auto data = Compress(iter, header_.block_size);
319             if (data.empty()) {
320                 PLOG(ERROR) << "AddRawBlocks: compression failed";
321                 return false;
322             }
323             if (data.size() > std::numeric_limits<uint16_t>::max()) {
324                 LOG(ERROR) << "Compressed block is too large: " << data.size() << " bytes";
325                 return false;
326             }
327             op.compression = compression_;
328             op.data_length = static_cast<uint16_t>(data.size());
329 
330             if (!WriteOperation(op, data.data(), data.size())) {
331                 PLOG(ERROR) << "AddRawBlocks: write failed";
332                 return false;
333             }
334         } else {
335             op.data_length = static_cast<uint16_t>(header_.block_size);
336             if (!WriteOperation(op, iter, header_.block_size)) {
337                 PLOG(ERROR) << "AddRawBlocks: write failed";
338                 return false;
339             }
340         }
341 
342         iter += header_.block_size;
343     }
344     return true;
345 }
346 
EmitZeroBlocks(uint64_t new_block_start,uint64_t num_blocks)347 bool CowWriter::EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) {
348     CHECK(!merge_in_progress_);
349     for (uint64_t i = 0; i < num_blocks; i++) {
350         CowOperation op = {};
351         op.type = kCowZeroOp;
352         op.new_block = new_block_start + i;
353         op.source = 0;
354         WriteOperation(op);
355     }
356     return true;
357 }
358 
EmitLabel(uint64_t label)359 bool CowWriter::EmitLabel(uint64_t label) {
360     CHECK(!merge_in_progress_);
361     CowOperation op = {};
362     op.type = kCowLabelOp;
363     op.source = label;
364     return WriteOperation(op) && Sync();
365 }
366 
EmitSequenceData(size_t num_ops,const uint32_t * data)367 bool CowWriter::EmitSequenceData(size_t num_ops, const uint32_t* data) {
368     CHECK(!merge_in_progress_);
369     size_t to_add = 0;
370     size_t max_ops = std::numeric_limits<uint16_t>::max() / sizeof(uint32_t);
371     while (num_ops > 0) {
372         CowOperation op = {};
373         op.type = kCowSequenceOp;
374         op.source = next_data_pos_;
375         to_add = std::min(num_ops, max_ops);
376         op.data_length = static_cast<uint16_t>(to_add * sizeof(uint32_t));
377         if (!WriteOperation(op, data, op.data_length)) {
378             PLOG(ERROR) << "AddSequenceData: write failed";
379             return false;
380         }
381         num_ops -= to_add;
382         data += to_add;
383     }
384     return true;
385 }
386 
EmitCluster()387 bool CowWriter::EmitCluster() {
388     CowOperation op = {};
389     op.type = kCowClusterOp;
390     // Next cluster starts after remainder of current cluster and the next data block.
391     op.source = current_data_size_ + cluster_size_ - current_cluster_size_ - sizeof(CowOperation);
392     return WriteOperation(op);
393 }
394 
EmitClusterIfNeeded()395 bool CowWriter::EmitClusterIfNeeded() {
396     // If there isn't room for another op and the cluster end op, end the current cluster
397     if (cluster_size_ && cluster_size_ < current_cluster_size_ + 2 * sizeof(CowOperation)) {
398         if (!EmitCluster()) return false;
399     }
400     return true;
401 }
402 
Compress(const void * data,size_t length)403 std::basic_string<uint8_t> CowWriter::Compress(const void* data, size_t length) {
404     switch (compression_) {
405         case kCowCompressGz: {
406             auto bound = compressBound(length);
407             auto buffer = std::make_unique<uint8_t[]>(bound);
408 
409             uLongf dest_len = bound;
410             auto rv = compress2(buffer.get(), &dest_len, reinterpret_cast<const Bytef*>(data),
411                                 length, Z_BEST_COMPRESSION);
412             if (rv != Z_OK) {
413                 LOG(ERROR) << "compress2 returned: " << rv;
414                 return {};
415             }
416             return std::basic_string<uint8_t>(buffer.get(), dest_len);
417         }
418         case kCowCompressBrotli: {
419             auto bound = BrotliEncoderMaxCompressedSize(length);
420             if (!bound) {
421                 LOG(ERROR) << "BrotliEncoderMaxCompressedSize returned 0";
422                 return {};
423             }
424             auto buffer = std::make_unique<uint8_t[]>(bound);
425 
426             size_t encoded_size = bound;
427             auto rv = BrotliEncoderCompress(
428                     BROTLI_DEFAULT_QUALITY, BROTLI_DEFAULT_WINDOW, BROTLI_DEFAULT_MODE, length,
429                     reinterpret_cast<const uint8_t*>(data), &encoded_size, buffer.get());
430             if (!rv) {
431                 LOG(ERROR) << "BrotliEncoderCompress failed";
432                 return {};
433             }
434             return std::basic_string<uint8_t>(buffer.get(), encoded_size);
435         }
436         default:
437             LOG(ERROR) << "unhandled compression type: " << compression_;
438             break;
439     }
440     return {};
441 }
442 
443 // TODO: Fix compilation issues when linking libcrypto library
444 // when snapuserd is compiled as part of ramdisk.
SHA256(const void *,size_t,uint8_t[])445 static void SHA256(const void*, size_t, uint8_t[]) {
446 #if 0
447     SHA256_CTX c;
448     SHA256_Init(&c);
449     SHA256_Update(&c, data, length);
450     SHA256_Final(out, &c);
451 #endif
452 }
453 
Finalize()454 bool CowWriter::Finalize() {
455     auto continue_cluster_size = current_cluster_size_;
456     auto continue_data_size = current_data_size_;
457     auto continue_data_pos = next_data_pos_;
458     auto continue_op_pos = next_op_pos_;
459     auto continue_size = ops_.size();
460     auto continue_num_ops = footer_.op.num_ops;
461     bool extra_cluster = false;
462 
463     // Blank out extra ops, in case we're in append mode and dropped ops.
464     if (cluster_size_) {
465         auto unused_cluster_space = cluster_size_ - current_cluster_size_;
466         std::string clr;
467         clr.resize(unused_cluster_space, '\0');
468         if (lseek(fd_.get(), next_op_pos_, SEEK_SET) < 0) {
469             PLOG(ERROR) << "Failed to seek to footer position.";
470             return false;
471         }
472         if (!android::base::WriteFully(fd_, clr.data(), clr.size())) {
473             PLOG(ERROR) << "clearing unused cluster area failed";
474             return false;
475         }
476     }
477 
478     // Footer should be at the end of a file, so if there is data after the current block, end it
479     // and start a new cluster.
480     if (cluster_size_ && current_data_size_ > 0) {
481         EmitCluster();
482         extra_cluster = true;
483     }
484 
485     footer_.op.ops_size = ops_.size();
486     if (lseek(fd_.get(), next_op_pos_, SEEK_SET) < 0) {
487         PLOG(ERROR) << "Failed to seek to footer position.";
488         return false;
489     }
490     memset(&footer_.data.ops_checksum, 0, sizeof(uint8_t) * 32);
491     memset(&footer_.data.footer_checksum, 0, sizeof(uint8_t) * 32);
492 
493     SHA256(ops_.data(), ops_.size(), footer_.data.ops_checksum);
494     SHA256(&footer_.op, sizeof(footer_.op), footer_.data.footer_checksum);
495     // Write out footer at end of file
496     if (!android::base::WriteFully(fd_, reinterpret_cast<const uint8_t*>(&footer_),
497                                    sizeof(footer_))) {
498         PLOG(ERROR) << "write footer failed";
499         return false;
500     }
501 
502     // Remove excess data, if we're in append mode and threw away more data
503     // than we wrote before.
504     off_t offs = lseek(fd_.get(), 0, SEEK_CUR);
505     if (offs < 0) {
506         PLOG(ERROR) << "Failed to lseek to find current position";
507         return false;
508     }
509     if (!Truncate(offs)) {
510         return false;
511     }
512 
513     // Reposition for additional Writing
514     if (extra_cluster) {
515         current_cluster_size_ = continue_cluster_size;
516         current_data_size_ = continue_data_size;
517         next_data_pos_ = continue_data_pos;
518         next_op_pos_ = continue_op_pos;
519         footer_.op.num_ops = continue_num_ops;
520         ops_.resize(continue_size);
521     }
522     return Sync();
523 }
524 
GetCowSize()525 uint64_t CowWriter::GetCowSize() {
526     if (current_data_size_ > 0) {
527         return next_data_pos_ + sizeof(footer_);
528     } else {
529         return next_op_pos_ + sizeof(footer_);
530     }
531 }
532 
GetDataPos(uint64_t * pos)533 bool CowWriter::GetDataPos(uint64_t* pos) {
534     off_t offs = lseek(fd_.get(), 0, SEEK_CUR);
535     if (offs < 0) {
536         PLOG(ERROR) << "lseek failed";
537         return false;
538     }
539     *pos = offs;
540     return true;
541 }
542 
WriteOperation(const CowOperation & op,const void * data,size_t size)543 bool CowWriter::WriteOperation(const CowOperation& op, const void* data, size_t size) {
544     if (lseek(fd_.get(), next_op_pos_, SEEK_SET) < 0) {
545         PLOG(ERROR) << "lseek failed for writing operation.";
546         return false;
547     }
548     if (!android::base::WriteFully(fd_, reinterpret_cast<const uint8_t*>(&op), sizeof(op))) {
549         return false;
550     }
551     if (data != nullptr && size > 0) {
552         if (!WriteRawData(data, size)) return false;
553     }
554     AddOperation(op);
555     return EmitClusterIfNeeded();
556 }
557 
AddOperation(const CowOperation & op)558 void CowWriter::AddOperation(const CowOperation& op) {
559     footer_.op.num_ops++;
560 
561     if (op.type == kCowClusterOp) {
562         current_cluster_size_ = 0;
563         current_data_size_ = 0;
564     } else if (header_.cluster_ops) {
565         current_cluster_size_ += sizeof(op);
566         current_data_size_ += op.data_length;
567     }
568 
569     next_data_pos_ += op.data_length + GetNextDataOffset(op, header_.cluster_ops);
570     next_op_pos_ += sizeof(CowOperation) + GetNextOpOffset(op, header_.cluster_ops);
571     ops_.insert(ops_.size(), reinterpret_cast<const uint8_t*>(&op), sizeof(op));
572 }
573 
WriteRawData(const void * data,size_t size)574 bool CowWriter::WriteRawData(const void* data, size_t size) {
575     if (lseek(fd_.get(), next_data_pos_, SEEK_SET) < 0) {
576         PLOG(ERROR) << "lseek failed for writing data.";
577         return false;
578     }
579 
580     if (!android::base::WriteFully(fd_, data, size)) {
581         return false;
582     }
583     return true;
584 }
585 
Sync()586 bool CowWriter::Sync() {
587     if (is_dev_null_) {
588         return true;
589     }
590     if (fsync(fd_.get()) < 0) {
591         PLOG(ERROR) << "fsync failed";
592         return false;
593     }
594     return true;
595 }
596 
Truncate(off_t length)597 bool CowWriter::Truncate(off_t length) {
598     if (is_dev_null_ || is_block_device_) {
599         return true;
600     }
601     if (ftruncate(fd_.get(), length) < 0) {
602         PLOG(ERROR) << "Failed to truncate.";
603         return false;
604     }
605     return true;
606 }
607 
608 }  // namespace snapshot
609 }  // namespace android
610