• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (C) 2020 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include <sys/types.h>
18 #include <sys/uio.h>
19 #include <unistd.h>
20 
21 #include <limits>
22 #include <queue>
23 
24 #include <android-base/file.h>
25 #include <android-base/logging.h>
26 #include <android-base/properties.h>
27 #include <android-base/unique_fd.h>
28 #include <brotli/encode.h>
29 #include <libsnapshot/cow_format.h>
30 #include <libsnapshot/cow_reader.h>
31 #include <libsnapshot/cow_writer.h>
32 #include <lz4.h>
33 #include <zlib.h>
34 
35 #include <fcntl.h>
36 #include <linux/fs.h>
37 #include <sys/ioctl.h>
38 #include <unistd.h>
39 
40 namespace android {
41 namespace snapshot {
42 
43 namespace {
GetFdPath(int fd)44 std::string GetFdPath(int fd) {
45     const auto fd_path = "/proc/self/fd/" + std::to_string(fd);
46     std::string file_path(512, '\0');
47     const auto err = readlink(fd_path.c_str(), file_path.data(), file_path.size());
48     if (err <= 0) {
49         PLOG(ERROR) << "Failed to determine path for fd " << fd;
50         file_path.clear();
51     } else {
52         file_path.resize(err);
53     }
54     return file_path;
55 }
56 }  // namespace
57 
58 static_assert(sizeof(off_t) == sizeof(uint64_t));
59 
60 using android::base::borrowed_fd;
61 using android::base::unique_fd;
62 
AddCopy(uint64_t new_block,uint64_t old_block,uint64_t num_blocks)63 bool ICowWriter::AddCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks) {
64     CHECK(num_blocks != 0);
65 
66     for (size_t i = 0; i < num_blocks; i++) {
67         if (!ValidateNewBlock(new_block + i)) {
68             return false;
69         }
70     }
71 
72     return EmitCopy(new_block, old_block, num_blocks);
73 }
74 
AddRawBlocks(uint64_t new_block_start,const void * data,size_t size)75 bool ICowWriter::AddRawBlocks(uint64_t new_block_start, const void* data, size_t size) {
76     if (size % options_.block_size != 0) {
77         LOG(ERROR) << "AddRawBlocks: size " << size << " is not a multiple of "
78                    << options_.block_size;
79         return false;
80     }
81 
82     uint64_t num_blocks = size / options_.block_size;
83     uint64_t last_block = new_block_start + num_blocks - 1;
84     if (!ValidateNewBlock(last_block)) {
85         return false;
86     }
87     return EmitRawBlocks(new_block_start, data, size);
88 }
89 
AddXorBlocks(uint32_t new_block_start,const void * data,size_t size,uint32_t old_block,uint16_t offset)90 bool ICowWriter::AddXorBlocks(uint32_t new_block_start, const void* data, size_t size,
91                               uint32_t old_block, uint16_t offset) {
92     if (size % options_.block_size != 0) {
93         LOG(ERROR) << "AddRawBlocks: size " << size << " is not a multiple of "
94                    << options_.block_size;
95         return false;
96     }
97 
98     uint64_t num_blocks = size / options_.block_size;
99     uint64_t last_block = new_block_start + num_blocks - 1;
100     if (!ValidateNewBlock(last_block)) {
101         return false;
102     }
103     if (offset >= options_.block_size) {
104         LOG(ERROR) << "AddXorBlocks: offset " << offset << " is not less than "
105                    << options_.block_size;
106     }
107     return EmitXorBlocks(new_block_start, data, size, old_block, offset);
108 }
109 
AddZeroBlocks(uint64_t new_block_start,uint64_t num_blocks)110 bool ICowWriter::AddZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) {
111     uint64_t last_block = new_block_start + num_blocks - 1;
112     if (!ValidateNewBlock(last_block)) {
113         return false;
114     }
115     return EmitZeroBlocks(new_block_start, num_blocks);
116 }
117 
AddLabel(uint64_t label)118 bool ICowWriter::AddLabel(uint64_t label) {
119     return EmitLabel(label);
120 }
121 
AddSequenceData(size_t num_ops,const uint32_t * data)122 bool ICowWriter::AddSequenceData(size_t num_ops, const uint32_t* data) {
123     return EmitSequenceData(num_ops, data);
124 }
125 
ValidateNewBlock(uint64_t new_block)126 bool ICowWriter::ValidateNewBlock(uint64_t new_block) {
127     if (options_.max_blocks && new_block >= options_.max_blocks.value()) {
128         LOG(ERROR) << "New block " << new_block << " exceeds maximum block count "
129                    << options_.max_blocks.value();
130         return false;
131     }
132     return true;
133 }
134 
CowWriter(const CowOptions & options)135 CowWriter::CowWriter(const CowOptions& options) : ICowWriter(options), fd_(-1) {
136     SetupHeaders();
137     SetupWriteOptions();
138 }
139 
~CowWriter()140 CowWriter::~CowWriter() {
141     for (size_t i = 0; i < compress_threads_.size(); i++) {
142         CompressWorker* worker = compress_threads_[i].get();
143         if (worker) {
144             worker->Finalize();
145         }
146     }
147 
148     bool ret = true;
149     for (auto& t : threads_) {
150         ret = t.get() && ret;
151     }
152 
153     if (!ret) {
154         LOG(ERROR) << "Compression failed";
155     }
156     compress_threads_.clear();
157 }
158 
SetupWriteOptions()159 void CowWriter::SetupWriteOptions() {
160     num_compress_threads_ = options_.num_compress_threads;
161 
162     if (!num_compress_threads_) {
163         num_compress_threads_ = 1;
164         // We prefer not to have more than two threads as the overhead of additional
165         // threads is far greater than cutting down compression time.
166         if (header_.cluster_ops &&
167             android::base::GetBoolProperty("ro.virtual_ab.compression.threads", false)) {
168             num_compress_threads_ = 2;
169         }
170     }
171 
172     if (header_.cluster_ops &&
173         (android::base::GetBoolProperty("ro.virtual_ab.batch_writes", false) ||
174          options_.batch_write)) {
175         batch_write_ = true;
176     }
177 }
178 
SetupHeaders()179 void CowWriter::SetupHeaders() {
180     header_ = {};
181     header_.magic = kCowMagicNumber;
182     header_.major_version = kCowVersionMajor;
183     header_.minor_version = kCowVersionMinor;
184     header_.header_size = sizeof(CowHeader);
185     header_.footer_size = sizeof(CowFooter);
186     header_.op_size = sizeof(CowOperation);
187     header_.block_size = options_.block_size;
188     header_.num_merge_ops = options_.num_merge_ops;
189     header_.cluster_ops = options_.cluster_ops;
190     header_.buffer_size = 0;
191     footer_ = {};
192     footer_.op.data_length = 64;
193     footer_.op.type = kCowFooterOp;
194 }
195 
ParseOptions()196 bool CowWriter::ParseOptions() {
197     if (options_.compression == "gz") {
198         compression_ = kCowCompressGz;
199     } else if (options_.compression == "brotli") {
200         compression_ = kCowCompressBrotli;
201     } else if (options_.compression == "lz4") {
202         compression_ = kCowCompressLz4;
203     } else if (options_.compression == "none") {
204         compression_ = kCowCompressNone;
205     } else if (!options_.compression.empty()) {
206         LOG(ERROR) << "unrecognized compression: " << options_.compression;
207         return false;
208     }
209     if (options_.cluster_ops == 1) {
210         LOG(ERROR) << "Clusters must contain at least two operations to function.";
211         return false;
212     }
213     return true;
214 }
215 
SetFd(android::base::borrowed_fd fd)216 bool CowWriter::SetFd(android::base::borrowed_fd fd) {
217     if (fd.get() < 0) {
218         owned_fd_.reset(open("/dev/null", O_RDWR | O_CLOEXEC));
219         if (owned_fd_ < 0) {
220             PLOG(ERROR) << "open /dev/null failed";
221             return false;
222         }
223         fd_ = owned_fd_;
224         is_dev_null_ = true;
225     } else {
226         fd_ = fd;
227 
228         struct stat stat {};
229         if (fstat(fd.get(), &stat) < 0) {
230             PLOG(ERROR) << "fstat failed";
231             return false;
232         }
233         const auto file_path = GetFdPath(fd.get());
234         is_block_device_ = S_ISBLK(stat.st_mode);
235         if (is_block_device_) {
236             uint64_t size_in_bytes = 0;
237             if (ioctl(fd.get(), BLKGETSIZE64, &size_in_bytes)) {
238                 PLOG(ERROR) << "Failed to get total size for: " << fd.get();
239                 return false;
240             }
241             cow_image_size_ = size_in_bytes;
242             LOG(INFO) << "COW image " << file_path << " has size " << size_in_bytes;
243         } else {
244             LOG(INFO) << "COW image " << file_path
245                       << " is not a block device, assuming unlimited space.";
246         }
247     }
248     return true;
249 }
250 
InitBatchWrites()251 void CowWriter::InitBatchWrites() {
252     if (batch_write_) {
253         cowop_vec_ = std::make_unique<struct iovec[]>(header_.cluster_ops);
254         data_vec_ = std::make_unique<struct iovec[]>(header_.cluster_ops);
255         struct iovec* cowop_ptr = cowop_vec_.get();
256         struct iovec* data_ptr = data_vec_.get();
257         for (size_t i = 0; i < header_.cluster_ops; i++) {
258             std::unique_ptr<CowOperation> op = std::make_unique<CowOperation>();
259             cowop_ptr[i].iov_base = op.get();
260             cowop_ptr[i].iov_len = sizeof(CowOperation);
261             opbuffer_vec_.push_back(std::move(op));
262 
263             std::unique_ptr<uint8_t[]> buffer = std::make_unique<uint8_t[]>(header_.block_size * 2);
264             data_ptr[i].iov_base = buffer.get();
265             data_ptr[i].iov_len = header_.block_size * 2;
266             databuffer_vec_.push_back(std::move(buffer));
267         }
268 
269         current_op_pos_ = next_op_pos_;
270         current_data_pos_ = next_data_pos_;
271     }
272 
273     std::string batch_write = batch_write_ ? "enabled" : "disabled";
274     LOG(INFO) << "Batch writes: " << batch_write;
275 }
276 
InitWorkers()277 void CowWriter::InitWorkers() {
278     if (num_compress_threads_ <= 1) {
279         LOG(INFO) << "Not creating new threads for compression.";
280         return;
281     }
282     for (int i = 0; i < num_compress_threads_; i++) {
283         auto wt = std::make_unique<CompressWorker>(compression_, header_.block_size);
284         threads_.emplace_back(std::async(std::launch::async, &CompressWorker::RunThread, wt.get()));
285         compress_threads_.push_back(std::move(wt));
286     }
287 
288     LOG(INFO) << num_compress_threads_ << " thread used for compression";
289 }
290 
Initialize(unique_fd && fd)291 bool CowWriter::Initialize(unique_fd&& fd) {
292     owned_fd_ = std::move(fd);
293     return Initialize(borrowed_fd{owned_fd_});
294 }
295 
Initialize(borrowed_fd fd)296 bool CowWriter::Initialize(borrowed_fd fd) {
297     if (!SetFd(fd) || !ParseOptions()) {
298         return false;
299     }
300 
301     if (!OpenForWrite()) {
302         return false;
303     }
304 
305     InitWorkers();
306     return true;
307 }
308 
InitializeAppend(android::base::unique_fd && fd,uint64_t label)309 bool CowWriter::InitializeAppend(android::base::unique_fd&& fd, uint64_t label) {
310     owned_fd_ = std::move(fd);
311     return InitializeAppend(android::base::borrowed_fd{owned_fd_}, label);
312 }
313 
InitializeAppend(android::base::borrowed_fd fd,uint64_t label)314 bool CowWriter::InitializeAppend(android::base::borrowed_fd fd, uint64_t label) {
315     if (!SetFd(fd) || !ParseOptions()) {
316         return false;
317     }
318 
319     bool ret = OpenForAppend(label);
320 
321     if (ret && !compress_threads_.size()) {
322         InitWorkers();
323     }
324 
325     return ret;
326 }
327 
InitPos()328 void CowWriter::InitPos() {
329     next_op_pos_ = sizeof(header_) + header_.buffer_size;
330     cluster_size_ = header_.cluster_ops * sizeof(CowOperation);
331     if (header_.cluster_ops) {
332         next_data_pos_ = next_op_pos_ + cluster_size_;
333     } else {
334         next_data_pos_ = next_op_pos_ + sizeof(CowOperation);
335     }
336     current_cluster_size_ = 0;
337     current_data_size_ = 0;
338 }
339 
OpenForWrite()340 bool CowWriter::OpenForWrite() {
341     // This limitation is tied to the data field size in CowOperation.
342     if (header_.block_size > std::numeric_limits<uint16_t>::max()) {
343         LOG(ERROR) << "Block size is too large";
344         return false;
345     }
346 
347     if (lseek(fd_.get(), 0, SEEK_SET) < 0) {
348         PLOG(ERROR) << "lseek failed";
349         return false;
350     }
351 
352     if (options_.scratch_space) {
353         header_.buffer_size = BUFFER_REGION_DEFAULT_SIZE;
354     }
355 
356     // Headers are not complete, but this ensures the file is at the right
357     // position.
358     if (!android::base::WriteFully(fd_, &header_, sizeof(header_))) {
359         PLOG(ERROR) << "write failed";
360         return false;
361     }
362 
363     if (options_.scratch_space) {
364         // Initialize the scratch space
365         std::string data(header_.buffer_size, 0);
366         if (!android::base::WriteFully(fd_, data.data(), header_.buffer_size)) {
367             PLOG(ERROR) << "writing scratch space failed";
368             return false;
369         }
370     }
371 
372     if (!Sync()) {
373         LOG(ERROR) << "Header sync failed";
374         return false;
375     }
376 
377     if (lseek(fd_.get(), sizeof(header_) + header_.buffer_size, SEEK_SET) < 0) {
378         PLOG(ERROR) << "lseek failed";
379         return false;
380     }
381 
382     InitPos();
383     InitBatchWrites();
384 
385     return true;
386 }
387 
OpenForAppend(uint64_t label)388 bool CowWriter::OpenForAppend(uint64_t label) {
389     auto reader = std::make_unique<CowReader>();
390     std::queue<CowOperation> toAdd;
391 
392     if (!reader->Parse(fd_, {label}) || !reader->GetHeader(&header_)) {
393         return false;
394     }
395 
396     options_.block_size = header_.block_size;
397     options_.cluster_ops = header_.cluster_ops;
398 
399     // Reset this, since we're going to reimport all operations.
400     footer_.op.num_ops = 0;
401     InitPos();
402 
403     auto iter = reader->GetOpIter();
404 
405     while (!iter->Done()) {
406         AddOperation(iter->Get());
407         iter->Next();
408     }
409 
410     // Free reader so we own the descriptor position again.
411     reader = nullptr;
412 
413     if (lseek(fd_.get(), next_op_pos_, SEEK_SET) < 0) {
414         PLOG(ERROR) << "lseek failed";
415         return false;
416     }
417 
418     InitBatchWrites();
419 
420     return EmitClusterIfNeeded();
421 }
422 
EmitCopy(uint64_t new_block,uint64_t old_block,uint64_t num_blocks)423 bool CowWriter::EmitCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks) {
424     CHECK(!merge_in_progress_);
425 
426     for (size_t i = 0; i < num_blocks; i++) {
427         CowOperation op = {};
428         op.type = kCowCopyOp;
429         op.new_block = new_block + i;
430         op.source = old_block + i;
431         if (!WriteOperation(op)) {
432             return false;
433         }
434     }
435 
436     return true;
437 }
438 
EmitRawBlocks(uint64_t new_block_start,const void * data,size_t size)439 bool CowWriter::EmitRawBlocks(uint64_t new_block_start, const void* data, size_t size) {
440     return EmitBlocks(new_block_start, data, size, 0, 0, kCowReplaceOp);
441 }
442 
EmitXorBlocks(uint32_t new_block_start,const void * data,size_t size,uint32_t old_block,uint16_t offset)443 bool CowWriter::EmitXorBlocks(uint32_t new_block_start, const void* data, size_t size,
444                               uint32_t old_block, uint16_t offset) {
445     return EmitBlocks(new_block_start, data, size, old_block, offset, kCowXorOp);
446 }
447 
CompressBlocks(size_t num_blocks,const void * data)448 bool CowWriter::CompressBlocks(size_t num_blocks, const void* data) {
449     size_t num_threads = (num_blocks == 1) ? 1 : num_compress_threads_;
450     size_t num_blocks_per_thread = num_blocks / num_threads;
451     const uint8_t* iter = reinterpret_cast<const uint8_t*>(data);
452     compressed_buf_.clear();
453     if (num_threads <= 1) {
454         return CompressWorker::CompressBlocks(compression_, options_.block_size, data, num_blocks,
455                                               &compressed_buf_);
456     }
457 
458     // Submit the blocks per thread. The retrieval of
459     // compressed buffers has to be done in the same order.
460     // We should not poll for completed buffers in a different order as the
461     // buffers are tightly coupled with block ordering.
462     for (size_t i = 0; i < num_threads; i++) {
463         CompressWorker* worker = compress_threads_[i].get();
464         if (i == num_threads - 1) {
465             num_blocks_per_thread = num_blocks;
466         }
467         worker->EnqueueCompressBlocks(iter, num_blocks_per_thread);
468         iter += (num_blocks_per_thread * header_.block_size);
469         num_blocks -= num_blocks_per_thread;
470     }
471 
472     for (size_t i = 0; i < num_threads; i++) {
473         CompressWorker* worker = compress_threads_[i].get();
474         if (!worker->GetCompressedBuffers(&compressed_buf_)) {
475             return false;
476         }
477     }
478 
479     return true;
480 }
481 
EmitBlocks(uint64_t new_block_start,const void * data,size_t size,uint64_t old_block,uint16_t offset,uint8_t type)482 bool CowWriter::EmitBlocks(uint64_t new_block_start, const void* data, size_t size,
483                            uint64_t old_block, uint16_t offset, uint8_t type) {
484     CHECK(!merge_in_progress_);
485     const uint8_t* iter = reinterpret_cast<const uint8_t*>(data);
486 
487     // Update engine can potentially send 100MB of blocks at a time. We
488     // don't want to process all those blocks in one shot as it can
489     // stress the memory. Hence, process the blocks in chunks.
490     //
491     // 1024 blocks is reasonable given we will end up using max
492     // memory of ~4MB.
493     const size_t kProcessingBlocks = 1024;
494     size_t num_blocks = (size / header_.block_size);
495     size_t i = 0;
496 
497     while (num_blocks) {
498         size_t pending_blocks = (std::min(kProcessingBlocks, num_blocks));
499 
500         if (compression_ && num_compress_threads_ > 1) {
501             if (!CompressBlocks(pending_blocks, iter)) {
502                 return false;
503             }
504             buf_iter_ = compressed_buf_.begin();
505             CHECK(pending_blocks == compressed_buf_.size());
506         }
507 
508         num_blocks -= pending_blocks;
509 
510         while (i < size / header_.block_size && pending_blocks) {
511             CowOperation op = {};
512             op.new_block = new_block_start + i;
513             op.type = type;
514             if (type == kCowXorOp) {
515                 op.source = (old_block + i) * header_.block_size + offset;
516             } else {
517                 op.source = next_data_pos_;
518             }
519 
520             if (compression_) {
521                 auto data = [&, this]() {
522                     if (num_compress_threads_ > 1) {
523                         auto data = std::move(*buf_iter_);
524                         buf_iter_++;
525                         return data;
526                     } else {
527                         auto data =
528                                 CompressWorker::Compress(compression_, iter, header_.block_size);
529                         return data;
530                     }
531                 }();
532                 op.compression = compression_;
533                 op.data_length = static_cast<uint16_t>(data.size());
534 
535                 if (!WriteOperation(op, data.data(), data.size())) {
536                     PLOG(ERROR) << "AddRawBlocks: write failed";
537                     return false;
538                 }
539             } else {
540                 op.data_length = static_cast<uint16_t>(header_.block_size);
541                 if (!WriteOperation(op, iter, header_.block_size)) {
542                     PLOG(ERROR) << "AddRawBlocks: write failed";
543                     return false;
544                 }
545             }
546             iter += header_.block_size;
547 
548             i += 1;
549             pending_blocks -= 1;
550         }
551 
552         CHECK(pending_blocks == 0);
553     }
554     return true;
555 }
556 
EmitZeroBlocks(uint64_t new_block_start,uint64_t num_blocks)557 bool CowWriter::EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) {
558     CHECK(!merge_in_progress_);
559     for (uint64_t i = 0; i < num_blocks; i++) {
560         CowOperation op = {};
561         op.type = kCowZeroOp;
562         op.new_block = new_block_start + i;
563         op.source = 0;
564         WriteOperation(op);
565     }
566     return true;
567 }
568 
EmitLabel(uint64_t label)569 bool CowWriter::EmitLabel(uint64_t label) {
570     CHECK(!merge_in_progress_);
571     CowOperation op = {};
572     op.type = kCowLabelOp;
573     op.source = label;
574     return WriteOperation(op) && Sync();
575 }
576 
EmitSequenceData(size_t num_ops,const uint32_t * data)577 bool CowWriter::EmitSequenceData(size_t num_ops, const uint32_t* data) {
578     CHECK(!merge_in_progress_);
579     size_t to_add = 0;
580     size_t max_ops = (header_.block_size * 2) / sizeof(uint32_t);
581     while (num_ops > 0) {
582         CowOperation op = {};
583         op.type = kCowSequenceOp;
584         op.source = next_data_pos_;
585         to_add = std::min(num_ops, max_ops);
586         op.data_length = static_cast<uint16_t>(to_add * sizeof(uint32_t));
587         if (!WriteOperation(op, data, op.data_length)) {
588             PLOG(ERROR) << "AddSequenceData: write failed";
589             return false;
590         }
591         num_ops -= to_add;
592         data += to_add;
593     }
594     return true;
595 }
596 
EmitCluster()597 bool CowWriter::EmitCluster() {
598     CowOperation op = {};
599     op.type = kCowClusterOp;
600     // Next cluster starts after remainder of current cluster and the next data block.
601     op.source = current_data_size_ + cluster_size_ - current_cluster_size_ - sizeof(CowOperation);
602     return WriteOperation(op);
603 }
604 
EmitClusterIfNeeded()605 bool CowWriter::EmitClusterIfNeeded() {
606     // If there isn't room for another op and the cluster end op, end the current cluster
607     if (cluster_size_ && cluster_size_ < current_cluster_size_ + 2 * sizeof(CowOperation)) {
608         if (!EmitCluster()) return false;
609     }
610     return true;
611 }
612 
613 // TODO: Fix compilation issues when linking libcrypto library
614 // when snapuserd is compiled as part of ramdisk.
SHA256(const void *,size_t,uint8_t[])615 static void SHA256(const void*, size_t, uint8_t[]) {
616 #if 0
617     SHA256_CTX c;
618     SHA256_Init(&c);
619     SHA256_Update(&c, data, length);
620     SHA256_Final(out, &c);
621 #endif
622 }
623 
Finalize()624 bool CowWriter::Finalize() {
625     if (!FlushCluster()) {
626         LOG(ERROR) << "Finalize: FlushCluster() failed";
627         return false;
628     }
629 
630     auto continue_cluster_size = current_cluster_size_;
631     auto continue_data_size = current_data_size_;
632     auto continue_data_pos = next_data_pos_;
633     auto continue_op_pos = next_op_pos_;
634     auto continue_num_ops = footer_.op.num_ops;
635     bool extra_cluster = false;
636 
637     // Blank out extra ops, in case we're in append mode and dropped ops.
638     if (cluster_size_) {
639         auto unused_cluster_space = cluster_size_ - current_cluster_size_;
640         std::string clr;
641         clr.resize(unused_cluster_space, '\0');
642         if (lseek(fd_.get(), next_op_pos_, SEEK_SET) < 0) {
643             PLOG(ERROR) << "Failed to seek to footer position.";
644             return false;
645         }
646         if (!android::base::WriteFully(fd_, clr.data(), clr.size())) {
647             PLOG(ERROR) << "clearing unused cluster area failed";
648             return false;
649         }
650     }
651 
652     // Footer should be at the end of a file, so if there is data after the current block, end it
653     // and start a new cluster.
654     if (cluster_size_ && current_data_size_ > 0) {
655         EmitCluster();
656         extra_cluster = true;
657     }
658 
659     footer_.op.ops_size = footer_.op.num_ops * sizeof(CowOperation);
660     if (lseek(fd_.get(), next_op_pos_, SEEK_SET) < 0) {
661         PLOG(ERROR) << "Failed to seek to footer position.";
662         return false;
663     }
664     memset(&footer_.data.ops_checksum, 0, sizeof(uint8_t) * 32);
665     memset(&footer_.data.footer_checksum, 0, sizeof(uint8_t) * 32);
666 
667     SHA256(&footer_.op, sizeof(footer_.op), footer_.data.footer_checksum);
668     // Write out footer at end of file
669     if (!android::base::WriteFully(fd_, reinterpret_cast<const uint8_t*>(&footer_),
670                                    sizeof(footer_))) {
671         PLOG(ERROR) << "write footer failed";
672         return false;
673     }
674 
675     // Remove excess data, if we're in append mode and threw away more data
676     // than we wrote before.
677     off_t offs = lseek(fd_.get(), 0, SEEK_CUR);
678     if (offs < 0) {
679         PLOG(ERROR) << "Failed to lseek to find current position";
680         return false;
681     }
682     if (!Truncate(offs)) {
683         return false;
684     }
685 
686     // Reposition for additional Writing
687     if (extra_cluster) {
688         current_cluster_size_ = continue_cluster_size;
689         current_data_size_ = continue_data_size;
690         next_data_pos_ = continue_data_pos;
691         next_op_pos_ = continue_op_pos;
692         footer_.op.num_ops = continue_num_ops;
693     }
694 
695     FlushCluster();
696 
697     return Sync();
698 }
699 
GetCowSize()700 uint64_t CowWriter::GetCowSize() {
701     if (current_data_size_ > 0) {
702         return next_data_pos_ + sizeof(footer_);
703     } else {
704         return next_op_pos_ + sizeof(footer_);
705     }
706 }
707 
GetDataPos(uint64_t * pos)708 bool CowWriter::GetDataPos(uint64_t* pos) {
709     off_t offs = lseek(fd_.get(), 0, SEEK_CUR);
710     if (offs < 0) {
711         PLOG(ERROR) << "lseek failed";
712         return false;
713     }
714     *pos = offs;
715     return true;
716 }
717 
EnsureSpaceAvailable(const uint64_t bytes_needed) const718 bool CowWriter::EnsureSpaceAvailable(const uint64_t bytes_needed) const {
719     if (bytes_needed > cow_image_size_) {
720         LOG(ERROR) << "No space left on COW device. Required: " << bytes_needed
721                    << ", available: " << cow_image_size_;
722         errno = ENOSPC;
723         return false;
724     }
725     return true;
726 }
727 
FlushCluster()728 bool CowWriter::FlushCluster() {
729     ssize_t ret;
730 
731     if (op_vec_index_) {
732         ret = pwritev(fd_.get(), cowop_vec_.get(), op_vec_index_, current_op_pos_);
733         if (ret != (op_vec_index_ * sizeof(CowOperation))) {
734             PLOG(ERROR) << "pwritev failed for CowOperation. Expected: "
735                         << (op_vec_index_ * sizeof(CowOperation));
736             return false;
737         }
738     }
739 
740     if (data_vec_index_) {
741         ret = pwritev(fd_.get(), data_vec_.get(), data_vec_index_, current_data_pos_);
742         if (ret != total_data_written_) {
743             PLOG(ERROR) << "pwritev failed for data. Expected: " << total_data_written_;
744             return false;
745         }
746     }
747 
748     total_data_written_ = 0;
749     op_vec_index_ = 0;
750     data_vec_index_ = 0;
751     current_op_pos_ = next_op_pos_;
752     current_data_pos_ = next_data_pos_;
753 
754     return true;
755 }
756 
WriteOperation(const CowOperation & op,const void * data,size_t size)757 bool CowWriter::WriteOperation(const CowOperation& op, const void* data, size_t size) {
758     if (!EnsureSpaceAvailable(next_op_pos_ + sizeof(op))) {
759         return false;
760     }
761     if (!EnsureSpaceAvailable(next_data_pos_ + size)) {
762         return false;
763     }
764 
765     if (batch_write_) {
766         CowOperation* cow_op = reinterpret_cast<CowOperation*>(cowop_vec_[op_vec_index_].iov_base);
767         std::memcpy(cow_op, &op, sizeof(CowOperation));
768         op_vec_index_ += 1;
769 
770         if (data != nullptr && size > 0) {
771             struct iovec* data_ptr = data_vec_.get();
772             std::memcpy(data_ptr[data_vec_index_].iov_base, data, size);
773             data_ptr[data_vec_index_].iov_len = size;
774             data_vec_index_ += 1;
775             total_data_written_ += size;
776         }
777     } else {
778         if (lseek(fd_.get(), next_op_pos_, SEEK_SET) < 0) {
779             PLOG(ERROR) << "lseek failed for writing operation.";
780             return false;
781         }
782         if (!android::base::WriteFully(fd_, reinterpret_cast<const uint8_t*>(&op), sizeof(op))) {
783             return false;
784         }
785         if (data != nullptr && size > 0) {
786             if (!WriteRawData(data, size)) return false;
787         }
788     }
789 
790     AddOperation(op);
791 
792     if (batch_write_) {
793         if (op_vec_index_ == header_.cluster_ops || data_vec_index_ == header_.cluster_ops ||
794             op.type == kCowLabelOp || op.type == kCowClusterOp) {
795             if (!FlushCluster()) {
796                 LOG(ERROR) << "Failed to flush cluster data";
797                 return false;
798             }
799         }
800     }
801 
802     return EmitClusterIfNeeded();
803 }
804 
AddOperation(const CowOperation & op)805 void CowWriter::AddOperation(const CowOperation& op) {
806     footer_.op.num_ops++;
807 
808     if (op.type == kCowClusterOp) {
809         current_cluster_size_ = 0;
810         current_data_size_ = 0;
811     } else if (header_.cluster_ops) {
812         current_cluster_size_ += sizeof(op);
813         current_data_size_ += op.data_length;
814     }
815 
816     next_data_pos_ += op.data_length + GetNextDataOffset(op, header_.cluster_ops);
817     next_op_pos_ += sizeof(CowOperation) + GetNextOpOffset(op, header_.cluster_ops);
818 }
819 
WriteRawData(const void * data,const size_t size)820 bool CowWriter::WriteRawData(const void* data, const size_t size) {
821     if (!android::base::WriteFullyAtOffset(fd_, data, size, next_data_pos_)) {
822         return false;
823     }
824     return true;
825 }
826 
Sync()827 bool CowWriter::Sync() {
828     if (is_dev_null_) {
829         return true;
830     }
831     if (fsync(fd_.get()) < 0) {
832         PLOG(ERROR) << "fsync failed";
833         return false;
834     }
835     return true;
836 }
837 
Truncate(off_t length)838 bool CowWriter::Truncate(off_t length) {
839     if (is_dev_null_ || is_block_device_) {
840         return true;
841     }
842     if (ftruncate(fd_.get(), length) < 0) {
843         PLOG(ERROR) << "Failed to truncate.";
844         return false;
845     }
846     return true;
847 }
848 
849 }  // namespace snapshot
850 }  // namespace android
851