• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (C) 2012 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include "update_engine/payload_consumer/delta_performer.h"
18 
19 #include <errno.h>
20 #include <linux/fs.h>
21 
22 #include <algorithm>
23 #include <cstring>
24 #include <memory>
25 #include <string>
26 #include <utility>
27 #include <vector>
28 
29 #include <base/files/file_util.h>
30 #include <base/format_macros.h>
31 #include <base/metrics/histogram_macros.h>
32 #include <base/strings/string_number_conversions.h>
33 #include <base/strings/string_util.h>
34 #include <base/strings/stringprintf.h>
35 #include <base/time/time.h>
36 #include <brillo/data_encoding.h>
37 #include <bsdiff/bspatch.h>
38 #include <google/protobuf/repeated_field.h>
39 #include <puffin/puffpatch.h>
40 
41 #include "update_engine/common/constants.h"
42 #include "update_engine/common/hardware_interface.h"
43 #include "update_engine/common/prefs_interface.h"
44 #include "update_engine/common/subprocess.h"
45 #include "update_engine/common/terminator.h"
46 #include "update_engine/payload_consumer/bzip_extent_writer.h"
47 #include "update_engine/payload_consumer/cached_file_descriptor.h"
48 #include "update_engine/payload_consumer/download_action.h"
49 #include "update_engine/payload_consumer/extent_reader.h"
50 #include "update_engine/payload_consumer/extent_writer.h"
51 #include "update_engine/payload_consumer/file_descriptor_utils.h"
52 #include "update_engine/payload_consumer/mount_history.h"
53 #if USE_MTD
54 #include "update_engine/payload_consumer/mtd_file_descriptor.h"
55 #endif
56 #include "update_engine/payload_consumer/payload_constants.h"
57 #include "update_engine/payload_consumer/payload_verifier.h"
58 #include "update_engine/payload_consumer/xz_extent_writer.h"
59 
60 using google::protobuf::RepeatedPtrField;
61 using std::min;
62 using std::string;
63 using std::vector;
64 
65 namespace chromeos_update_engine {
66 
67 const uint64_t DeltaPerformer::kSupportedMajorPayloadVersion = 2;
68 const uint32_t DeltaPerformer::kSupportedMinorPayloadVersion = 5;
69 
70 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
71 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
72 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
73 const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
74 
75 namespace {
76 const int kUpdateStateOperationInvalid = -1;
77 const int kMaxResumedUpdateFailures = 10;
78 #if USE_MTD
79 const int kUbiVolumeAttachTimeout = 5 * 60;
80 #endif
81 
82 const uint64_t kCacheSize = 1024 * 1024;  // 1MB
83 
CreateFileDescriptor(const char * path)84 FileDescriptorPtr CreateFileDescriptor(const char* path) {
85   FileDescriptorPtr ret;
86 #if USE_MTD
87   if (strstr(path, "/dev/ubi") == path) {
88     if (!UbiFileDescriptor::IsUbi(path)) {
89       // The volume might not have been attached at boot time.
90       int volume_no;
91       if (utils::SplitPartitionName(path, nullptr, &volume_no)) {
92         utils::TryAttachingUbiVolume(volume_no, kUbiVolumeAttachTimeout);
93       }
94     }
95     if (UbiFileDescriptor::IsUbi(path)) {
96       LOG(INFO) << path << " is a UBI device.";
97       ret.reset(new UbiFileDescriptor);
98     }
99   } else if (MtdFileDescriptor::IsMtd(path)) {
100     LOG(INFO) << path << " is an MTD device.";
101     ret.reset(new MtdFileDescriptor);
102   } else {
103     LOG(INFO) << path << " is not an MTD nor a UBI device.";
104 #endif
105     ret.reset(new EintrSafeFileDescriptor);
106 #if USE_MTD
107   }
108 #endif
109   return ret;
110 }
111 
112 // Opens path for read/write. On success returns an open FileDescriptor
113 // and sets *err to 0. On failure, sets *err to errno and returns nullptr.
OpenFile(const char * path,int mode,bool cache_writes,int * err)114 FileDescriptorPtr OpenFile(const char* path,
115                            int mode,
116                            bool cache_writes,
117                            int* err) {
118   // Try to mark the block device read-only based on the mode. Ignore any
119   // failure since this won't work when passing regular files.
120   bool read_only = (mode & O_ACCMODE) == O_RDONLY;
121   utils::SetBlockDeviceReadOnly(path, read_only);
122 
123   FileDescriptorPtr fd = CreateFileDescriptor(path);
124   if (cache_writes && !read_only) {
125     fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize));
126     LOG(INFO) << "Caching writes.";
127   }
128 #if USE_MTD
129   // On NAND devices, we can either read, or write, but not both. So here we
130   // use O_WRONLY.
131   if (UbiFileDescriptor::IsUbi(path) || MtdFileDescriptor::IsMtd(path)) {
132     mode = O_WRONLY;
133   }
134 #endif
135   if (!fd->Open(path, mode, 000)) {
136     *err = errno;
137     PLOG(ERROR) << "Unable to open file " << path;
138     return nullptr;
139   }
140   *err = 0;
141   return fd;
142 }
143 
144 // Discard the tail of the block device referenced by |fd|, from the offset
145 // |data_size| until the end of the block device. Returns whether the data was
146 // discarded.
DiscardPartitionTail(const FileDescriptorPtr & fd,uint64_t data_size)147 bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) {
148   uint64_t part_size = fd->BlockDevSize();
149   if (!part_size || part_size <= data_size)
150     return false;
151 
152   struct blkioctl_request {
153     int number;
154     const char* name;
155   };
156   const vector<blkioctl_request> blkioctl_requests = {
157       {BLKDISCARD, "BLKDISCARD"},
158       {BLKSECDISCARD, "BLKSECDISCARD"},
159 #ifdef BLKZEROOUT
160       {BLKZEROOUT, "BLKZEROOUT"},
161 #endif
162   };
163   for (const auto& req : blkioctl_requests) {
164     int error = 0;
165     if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) &&
166         error == 0) {
167       return true;
168     }
169     LOG(WARNING) << "Error discarding the last "
170                  << (part_size - data_size) / 1024 << " KiB using ioctl("
171                  << req.name << ")";
172   }
173   return false;
174 }
175 
176 }  // namespace
177 
178 
179 // Computes the ratio of |part| and |total|, scaled to |norm|, using integer
180 // arithmetic.
IntRatio(uint64_t part,uint64_t total,uint64_t norm)181 static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
182   return part * norm / total;
183 }
184 
LogProgress(const char * message_prefix)185 void DeltaPerformer::LogProgress(const char* message_prefix) {
186   // Format operations total count and percentage.
187   string total_operations_str("?");
188   string completed_percentage_str("");
189   if (num_total_operations_) {
190     total_operations_str = std::to_string(num_total_operations_);
191     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
192     completed_percentage_str =
193         base::StringPrintf(" (%" PRIu64 "%%)",
194                            IntRatio(next_operation_num_, num_total_operations_,
195                                     100));
196   }
197 
198   // Format download total count and percentage.
199   size_t payload_size = payload_->size;
200   string payload_size_str("?");
201   string downloaded_percentage_str("");
202   if (payload_size) {
203     payload_size_str = std::to_string(payload_size);
204     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
205     downloaded_percentage_str =
206         base::StringPrintf(" (%" PRIu64 "%%)",
207                            IntRatio(total_bytes_received_, payload_size, 100));
208   }
209 
210   LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
211             << "/" << total_operations_str << " operations"
212             << completed_percentage_str << ", " << total_bytes_received_
213             << "/" << payload_size_str << " bytes downloaded"
214             << downloaded_percentage_str << ", overall progress "
215             << overall_progress_ << "%";
216 }
217 
UpdateOverallProgress(bool force_log,const char * message_prefix)218 void DeltaPerformer::UpdateOverallProgress(bool force_log,
219                                            const char* message_prefix) {
220   // Compute our download and overall progress.
221   unsigned new_overall_progress = 0;
222   static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100,
223                 "Progress weights don't add up");
224   // Only consider download progress if its total size is known; otherwise
225   // adjust the operations weight to compensate for the absence of download
226   // progress. Also, make sure to cap the download portion at
227   // kProgressDownloadWeight, in case we end up downloading more than we
228   // initially expected (this indicates a problem, but could generally happen).
229   // TODO(garnold) the correction of operations weight when we do not have the
230   // total payload size, as well as the conditional guard below, should both be
231   // eliminated once we ensure that the payload_size in the install plan is
232   // always given and is non-zero. This currently isn't the case during unit
233   // tests (see chromium-os:37969).
234   size_t payload_size = payload_->size;
235   unsigned actual_operations_weight = kProgressOperationsWeight;
236   if (payload_size)
237     new_overall_progress += min(
238         static_cast<unsigned>(IntRatio(total_bytes_received_, payload_size,
239                                        kProgressDownloadWeight)),
240         kProgressDownloadWeight);
241   else
242     actual_operations_weight += kProgressDownloadWeight;
243 
244   // Only add completed operations if their total number is known; we definitely
245   // expect an update to have at least one operation, so the expectation is that
246   // this will eventually reach |actual_operations_weight|.
247   if (num_total_operations_)
248     new_overall_progress += IntRatio(next_operation_num_, num_total_operations_,
249                                      actual_operations_weight);
250 
251   // Progress ratio cannot recede, unless our assumptions about the total
252   // payload size, total number of operations, or the monotonicity of progress
253   // is breached.
254   if (new_overall_progress < overall_progress_) {
255     LOG(WARNING) << "progress counter receded from " << overall_progress_
256                  << "% down to " << new_overall_progress << "%; this is a bug";
257     force_log = true;
258   }
259   overall_progress_ = new_overall_progress;
260 
261   // Update chunk index, log as needed: if forced by called, or we completed a
262   // progress chunk, or a timeout has expired.
263   base::Time curr_time = base::Time::Now();
264   unsigned curr_progress_chunk =
265       overall_progress_ * kProgressLogMaxChunks / 100;
266   if (force_log || curr_progress_chunk > last_progress_chunk_ ||
267       curr_time > forced_progress_log_time_) {
268     forced_progress_log_time_ = curr_time + forced_progress_log_wait_;
269     LogProgress(message_prefix);
270   }
271   last_progress_chunk_ = curr_progress_chunk;
272 }
273 
274 
CopyDataToBuffer(const char ** bytes_p,size_t * count_p,size_t max)275 size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p, size_t* count_p,
276                                         size_t max) {
277   const size_t count = *count_p;
278   if (!count)
279     return 0;  // Special case shortcut.
280   size_t read_len = min(count, max - buffer_.size());
281   const char* bytes_start = *bytes_p;
282   const char* bytes_end = bytes_start + read_len;
283   buffer_.reserve(max);
284   buffer_.insert(buffer_.end(), bytes_start, bytes_end);
285   *bytes_p = bytes_end;
286   *count_p = count - read_len;
287   return read_len;
288 }
289 
290 
HandleOpResult(bool op_result,const char * op_type_name,ErrorCode * error)291 bool DeltaPerformer::HandleOpResult(bool op_result, const char* op_type_name,
292                                     ErrorCode* error) {
293   if (op_result)
294     return true;
295 
296   size_t partition_first_op_num =
297       current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0;
298   LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
299              << next_operation_num_ << ", which is the operation "
300              << next_operation_num_ - partition_first_op_num
301              << " in partition \""
302              << partitions_[current_partition_].partition_name() << "\"";
303   if (*error == ErrorCode::kSuccess)
304     *error = ErrorCode::kDownloadOperationExecutionError;
305   return false;
306 }
307 
Close()308 int DeltaPerformer::Close() {
309   int err = -CloseCurrentPartition();
310   LOG_IF(ERROR, !payload_hash_calculator_.Finalize() ||
311                 !signed_hash_calculator_.Finalize())
312       << "Unable to finalize the hash.";
313   if (!buffer_.empty()) {
314     LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
315     if (err >= 0)
316       err = 1;
317   }
318   return -err;
319 }
320 
CloseCurrentPartition()321 int DeltaPerformer::CloseCurrentPartition() {
322   int err = 0;
323   if (source_fd_ && !source_fd_->Close()) {
324     err = errno;
325     PLOG(ERROR) << "Error closing source partition";
326     if (!err)
327       err = 1;
328   }
329   source_fd_.reset();
330   source_path_.clear();
331 
332   if (target_fd_ && !target_fd_->Close()) {
333     err = errno;
334     PLOG(ERROR) << "Error closing target partition";
335     if (!err)
336       err = 1;
337   }
338   target_fd_.reset();
339   target_path_.clear();
340   return -err;
341 }
342 
OpenCurrentPartition()343 bool DeltaPerformer::OpenCurrentPartition() {
344   if (current_partition_ >= partitions_.size())
345     return false;
346 
347   const PartitionUpdate& partition = partitions_[current_partition_];
348   size_t num_previous_partitions =
349       install_plan_->partitions.size() - partitions_.size();
350   const InstallPlan::Partition& install_part =
351       install_plan_->partitions[num_previous_partitions + current_partition_];
352   // Open source fds if we have a delta payload with minor version >= 2.
353   if (payload_->type == InstallPayloadType::kDelta &&
354       GetMinorVersion() != kInPlaceMinorPayloadVersion) {
355     source_path_ = install_part.source_path;
356     int err;
357     source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
358     if (!source_fd_) {
359       LOG(ERROR) << "Unable to open source partition "
360                  << partition.partition_name() << " on slot "
361                  << BootControlInterface::SlotName(install_plan_->source_slot)
362                  << ", file " << source_path_;
363       return false;
364     }
365   }
366 
367   target_path_ = install_part.target_path;
368   int err;
369 
370   int flags = O_RDWR;
371   if (!is_interactive_)
372     flags |= O_DSYNC;
373 
374   LOG(INFO) << "Opening " << target_path_ << " partition with"
375             << (is_interactive_ ? "out" : "") << " O_DSYNC";
376 
377   target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err);
378   if (!target_fd_) {
379     LOG(ERROR) << "Unable to open target partition "
380                << partition.partition_name() << " on slot "
381                << BootControlInterface::SlotName(install_plan_->target_slot)
382                << ", file " << target_path_;
383     return false;
384   }
385 
386   LOG(INFO) << "Applying " << partition.operations().size()
387             << " operations to partition \"" << partition.partition_name()
388             << "\"";
389 
390   // Discard the end of the partition, but ignore failures.
391   DiscardPartitionTail(target_fd_, install_part.target_size);
392 
393   return true;
394 }
395 
396 namespace {
397 
LogPartitionInfoHash(const PartitionInfo & info,const string & tag)398 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
399   string sha256 = brillo::data_encoding::Base64Encode(info.hash());
400   LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256
401             << " size: " << info.size();
402 }
403 
LogPartitionInfo(const vector<PartitionUpdate> & partitions)404 void LogPartitionInfo(const vector<PartitionUpdate>& partitions) {
405   for (const PartitionUpdate& partition : partitions) {
406     if (partition.has_old_partition_info()) {
407       LogPartitionInfoHash(partition.old_partition_info(),
408                            "old " + partition.partition_name());
409     }
410     LogPartitionInfoHash(partition.new_partition_info(),
411                          "new " + partition.partition_name());
412   }
413 }
414 
415 }  // namespace
416 
GetMinorVersion() const417 uint32_t DeltaPerformer::GetMinorVersion() const {
418   if (manifest_.has_minor_version()) {
419     return manifest_.minor_version();
420   } else {
421     return payload_->type == InstallPayloadType::kDelta
422                ? kSupportedMinorPayloadVersion
423                : kFullPayloadMinorVersion;
424   }
425 }
426 
IsHeaderParsed() const427 bool DeltaPerformer::IsHeaderParsed() const {
428   return metadata_size_ != 0;
429 }
430 
ParsePayloadMetadata(const brillo::Blob & payload,ErrorCode * error)431 MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
432     const brillo::Blob& payload, ErrorCode* error) {
433   *error = ErrorCode::kSuccess;
434 
435   if (!IsHeaderParsed()) {
436     MetadataParseResult result = payload_metadata_.ParsePayloadHeader(
437         payload, supported_major_version_, error);
438     if (result != MetadataParseResult::kSuccess)
439       return result;
440 
441     metadata_size_ = payload_metadata_.GetMetadataSize();
442     metadata_signature_size_ = payload_metadata_.GetMetadataSignatureSize();
443     major_payload_version_ = payload_metadata_.GetMajorVersion();
444 
445     // If the metadata size is present in install plan, check for it immediately
446     // even before waiting for that many number of bytes to be downloaded in the
447     // payload. This will prevent any attack which relies on us downloading data
448     // beyond the expected metadata size.
449     if (install_plan_->hash_checks_mandatory) {
450       if (payload_->metadata_size != metadata_size_) {
451         LOG(ERROR) << "Mandatory metadata size in Omaha response ("
452                    << payload_->metadata_size
453                    << ") is missing/incorrect, actual = " << metadata_size_;
454         *error = ErrorCode::kDownloadInvalidMetadataSize;
455         return MetadataParseResult::kError;
456       }
457     }
458   }
459 
460   // Now that we have validated the metadata size, we should wait for the full
461   // metadata and its signature (if exist) to be read in before we can parse it.
462   if (payload.size() < metadata_size_ + metadata_signature_size_)
463     return MetadataParseResult::kInsufficientData;
464 
465   // Log whether we validated the size or simply trusting what's in the payload
466   // here. This is logged here (after we received the full metadata data) so
467   // that we just log once (instead of logging n times) if it takes n
468   // DeltaPerformer::Write calls to download the full manifest.
469   if (payload_->metadata_size == metadata_size_) {
470     LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
471   } else {
472     // For mandatory-cases, we'd have already returned a kMetadataParseError
473     // above. We'll be here only for non-mandatory cases. Just send a UMA stat.
474     LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
475                  << payload_->metadata_size
476                  << ") in Omaha response as validation is not mandatory. "
477                  << "Trusting metadata size in payload = " << metadata_size_;
478   }
479 
480   // See if we should use the public RSA key in the Omaha response.
481   base::FilePath path_to_public_key(public_key_path_);
482   base::FilePath tmp_key;
483   if (GetPublicKeyFromResponse(&tmp_key))
484     path_to_public_key = tmp_key;
485   ScopedPathUnlinker tmp_key_remover(tmp_key.value());
486   if (tmp_key.empty())
487     tmp_key_remover.set_should_remove(false);
488 
489   // We have the full metadata in |payload|. Verify its integrity
490   // and authenticity based on the information we have in Omaha response.
491   *error = payload_metadata_.ValidateMetadataSignature(
492       payload, payload_->metadata_signature, path_to_public_key);
493   if (*error != ErrorCode::kSuccess) {
494     if (install_plan_->hash_checks_mandatory) {
495       // The autoupdate_CatchBadSignatures test checks for this string
496       // in log-files. Keep in sync.
497       LOG(ERROR) << "Mandatory metadata signature validation failed";
498       return MetadataParseResult::kError;
499     }
500 
501     // For non-mandatory cases, just send a UMA stat.
502     LOG(WARNING) << "Ignoring metadata signature validation failures";
503     *error = ErrorCode::kSuccess;
504   }
505 
506   // The payload metadata is deemed valid, it's safe to parse the protobuf.
507   if (!payload_metadata_.GetManifest(payload, &manifest_)) {
508     LOG(ERROR) << "Unable to parse manifest in update file.";
509     *error = ErrorCode::kDownloadManifestParseError;
510     return MetadataParseResult::kError;
511   }
512 
513   manifest_parsed_ = true;
514   return MetadataParseResult::kSuccess;
515 }
516 
517 #define OP_DURATION_HISTOGRAM(_op_name, _start_time)      \
518     LOCAL_HISTOGRAM_CUSTOM_TIMES(                         \
519         "UpdateEngine.DownloadAction.InstallOperation::"  \
520         _op_name ".Duration",                             \
521         base::TimeTicks::Now() - _start_time,             \
522         base::TimeDelta::FromMilliseconds(10),            \
523         base::TimeDelta::FromMinutes(5),                  \
524         20);
525 
526 // Wrapper around write. Returns true if all requested bytes
527 // were written, or false on any error, regardless of progress
528 // and stores an action exit code in |error|.
Write(const void * bytes,size_t count,ErrorCode * error)529 bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode *error) {
530   *error = ErrorCode::kSuccess;
531   const char* c_bytes = reinterpret_cast<const char*>(bytes);
532 
533   // Update the total byte downloaded count and the progress logs.
534   total_bytes_received_ += count;
535   UpdateOverallProgress(false, "Completed ");
536 
537   while (!manifest_valid_) {
538     // Read data up to the needed limit; this is either maximium payload header
539     // size, or the full metadata size (once it becomes known).
540     const bool do_read_header = !IsHeaderParsed();
541     CopyDataToBuffer(&c_bytes, &count,
542                      (do_read_header ? kMaxPayloadHeaderSize :
543                       metadata_size_ + metadata_signature_size_));
544 
545     MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
546     if (result == MetadataParseResult::kError)
547       return false;
548     if (result == MetadataParseResult::kInsufficientData) {
549       // If we just processed the header, make an attempt on the manifest.
550       if (do_read_header && IsHeaderParsed())
551         continue;
552 
553       return true;
554     }
555 
556     // Checks the integrity of the payload manifest.
557     if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
558       return false;
559     manifest_valid_ = true;
560 
561     // Clear the download buffer.
562     DiscardBuffer(false, metadata_size_);
563 
564     // This populates |partitions_| and the |install_plan.partitions| with the
565     // list of partitions from the manifest.
566     if (!ParseManifestPartitions(error))
567       return false;
568 
569     // |install_plan.partitions| was filled in, nothing need to be done here if
570     // the payload was already applied, returns false to terminate http fetcher,
571     // but keep |error| as ErrorCode::kSuccess.
572     if (payload_->already_applied)
573       return false;
574 
575     num_total_operations_ = 0;
576     for (const auto& partition : partitions_) {
577       num_total_operations_ += partition.operations_size();
578       acc_num_operations_.push_back(num_total_operations_);
579     }
580 
581     LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestMetadataSize,
582                                       metadata_size_))
583         << "Unable to save the manifest metadata size.";
584     LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestSignatureSize,
585                                       metadata_signature_size_))
586         << "Unable to save the manifest signature size.";
587 
588     if (!PrimeUpdateState()) {
589       *error = ErrorCode::kDownloadStateInitializationError;
590       LOG(ERROR) << "Unable to prime the update state.";
591       return false;
592     }
593 
594     if (!OpenCurrentPartition()) {
595       *error = ErrorCode::kInstallDeviceOpenError;
596       return false;
597     }
598 
599     if (next_operation_num_ > 0)
600       UpdateOverallProgress(true, "Resuming after ");
601     LOG(INFO) << "Starting to apply update payload operations";
602   }
603 
604   while (next_operation_num_ < num_total_operations_) {
605     // Check if we should cancel the current attempt for any reason.
606     // In this case, *error will have already been populated with the reason
607     // why we're canceling.
608     if (download_delegate_ && download_delegate_->ShouldCancel(error))
609       return false;
610 
611     // We know there are more operations to perform because we didn't reach the
612     // |num_total_operations_| limit yet.
613     while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
614       CloseCurrentPartition();
615       current_partition_++;
616       if (!OpenCurrentPartition()) {
617         *error = ErrorCode::kInstallDeviceOpenError;
618         return false;
619       }
620     }
621     const size_t partition_operation_num = next_operation_num_ - (
622         current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
623 
624     const InstallOperation& op =
625         partitions_[current_partition_].operations(partition_operation_num);
626 
627     CopyDataToBuffer(&c_bytes, &count, op.data_length());
628 
629     // Check whether we received all of the next operation's data payload.
630     if (!CanPerformInstallOperation(op))
631       return true;
632 
633     // Validate the operation only if the metadata signature is present.
634     // Otherwise, keep the old behavior. This serves as a knob to disable
635     // the validation logic in case we find some regression after rollout.
636     // NOTE: If hash checks are mandatory and if metadata_signature is empty,
637     // we would have already failed in ParsePayloadMetadata method and thus not
638     // even be here. So no need to handle that case again here.
639     if (!payload_->metadata_signature.empty()) {
640       // Note: Validate must be called only if CanPerformInstallOperation is
641       // called. Otherwise, we might be failing operations before even if there
642       // isn't sufficient data to compute the proper hash.
643       *error = ValidateOperationHash(op);
644       if (*error != ErrorCode::kSuccess) {
645         if (install_plan_->hash_checks_mandatory) {
646           LOG(ERROR) << "Mandatory operation hash check failed";
647           return false;
648         }
649 
650         // For non-mandatory cases, just send a UMA stat.
651         LOG(WARNING) << "Ignoring operation validation errors";
652         *error = ErrorCode::kSuccess;
653       }
654     }
655 
656     // Makes sure we unblock exit when this operation completes.
657     ScopedTerminatorExitUnblocker exit_unblocker =
658         ScopedTerminatorExitUnblocker();  // Avoids a compiler unused var bug.
659 
660     base::TimeTicks op_start_time = base::TimeTicks::Now();
661 
662     bool op_result;
663     switch (op.type()) {
664       case InstallOperation::REPLACE:
665       case InstallOperation::REPLACE_BZ:
666       case InstallOperation::REPLACE_XZ:
667         op_result = PerformReplaceOperation(op);
668         OP_DURATION_HISTOGRAM("REPLACE", op_start_time);
669         break;
670       case InstallOperation::ZERO:
671       case InstallOperation::DISCARD:
672         op_result = PerformZeroOrDiscardOperation(op);
673         OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time);
674         break;
675       case InstallOperation::MOVE:
676         op_result = PerformMoveOperation(op);
677         OP_DURATION_HISTOGRAM("MOVE", op_start_time);
678         break;
679       case InstallOperation::BSDIFF:
680         op_result = PerformBsdiffOperation(op);
681         OP_DURATION_HISTOGRAM("BSDIFF", op_start_time);
682         break;
683       case InstallOperation::SOURCE_COPY:
684         op_result = PerformSourceCopyOperation(op, error);
685         OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time);
686         break;
687       case InstallOperation::SOURCE_BSDIFF:
688       case InstallOperation::BROTLI_BSDIFF:
689         op_result = PerformSourceBsdiffOperation(op, error);
690         OP_DURATION_HISTOGRAM("SOURCE_BSDIFF", op_start_time);
691         break;
692       case InstallOperation::PUFFDIFF:
693         op_result = PerformPuffDiffOperation(op, error);
694         OP_DURATION_HISTOGRAM("PUFFDIFF", op_start_time);
695         break;
696       default:
697         op_result = false;
698     }
699     if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error))
700       return false;
701 
702     if (!target_fd_->Flush()) {
703       return false;
704     }
705 
706     next_operation_num_++;
707     UpdateOverallProgress(false, "Completed ");
708     CheckpointUpdateProgress();
709   }
710 
711   // In major version 2, we don't add dummy operation to the payload.
712   // If we already extracted the signature we should skip this step.
713   if (major_payload_version_ == kBrilloMajorPayloadVersion &&
714       manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
715       signatures_message_data_.empty()) {
716     if (manifest_.signatures_offset() != buffer_offset_) {
717       LOG(ERROR) << "Payload signatures offset points to blob offset "
718                  << manifest_.signatures_offset()
719                  << " but signatures are expected at offset "
720                  << buffer_offset_;
721       *error = ErrorCode::kDownloadPayloadVerificationError;
722       return false;
723     }
724     CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
725     // Needs more data to cover entire signature.
726     if (buffer_.size() < manifest_.signatures_size())
727       return true;
728     if (!ExtractSignatureMessage()) {
729       LOG(ERROR) << "Extract payload signature failed.";
730       *error = ErrorCode::kDownloadPayloadVerificationError;
731       return false;
732     }
733     DiscardBuffer(true, 0);
734     // Since we extracted the SignatureMessage we need to advance the
735     // checkpoint, otherwise we would reload the signature and try to extract
736     // it again.
737     CheckpointUpdateProgress();
738   }
739 
740   return true;
741 }
742 
IsManifestValid()743 bool DeltaPerformer::IsManifestValid() {
744   return manifest_valid_;
745 }
746 
ParseManifestPartitions(ErrorCode * error)747 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
748   if (major_payload_version_ == kBrilloMajorPayloadVersion) {
749     partitions_.clear();
750     for (const PartitionUpdate& partition : manifest_.partitions()) {
751       partitions_.push_back(partition);
752     }
753     manifest_.clear_partitions();
754   } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
755     LOG(INFO) << "Converting update information from old format.";
756     PartitionUpdate root_part;
757     root_part.set_partition_name(kLegacyPartitionNameRoot);
758 #ifdef __ANDROID__
759     LOG(WARNING) << "Legacy payload major version provided to an Android "
760                     "build. Assuming no post-install. Please use major version "
761                     "2 or newer.";
762     root_part.set_run_postinstall(false);
763 #else
764     root_part.set_run_postinstall(true);
765 #endif  // __ANDROID__
766     if (manifest_.has_old_rootfs_info()) {
767       *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info();
768       manifest_.clear_old_rootfs_info();
769     }
770     if (manifest_.has_new_rootfs_info()) {
771       *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info();
772       manifest_.clear_new_rootfs_info();
773     }
774     *root_part.mutable_operations() = manifest_.install_operations();
775     manifest_.clear_install_operations();
776     partitions_.push_back(std::move(root_part));
777 
778     PartitionUpdate kern_part;
779     kern_part.set_partition_name(kLegacyPartitionNameKernel);
780     kern_part.set_run_postinstall(false);
781     if (manifest_.has_old_kernel_info()) {
782       *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info();
783       manifest_.clear_old_kernel_info();
784     }
785     if (manifest_.has_new_kernel_info()) {
786       *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info();
787       manifest_.clear_new_kernel_info();
788     }
789     *kern_part.mutable_operations() = manifest_.kernel_install_operations();
790     manifest_.clear_kernel_install_operations();
791     partitions_.push_back(std::move(kern_part));
792   }
793 
794   // Fill in the InstallPlan::partitions based on the partitions from the
795   // payload.
796   for (const auto& partition : partitions_) {
797     InstallPlan::Partition install_part;
798     install_part.name = partition.partition_name();
799     install_part.run_postinstall =
800         partition.has_run_postinstall() && partition.run_postinstall();
801     if (install_part.run_postinstall) {
802       install_part.postinstall_path =
803           (partition.has_postinstall_path() ? partition.postinstall_path()
804                                             : kPostinstallDefaultScript);
805       install_part.filesystem_type = partition.filesystem_type();
806       install_part.postinstall_optional = partition.postinstall_optional();
807     }
808 
809     if (partition.has_old_partition_info()) {
810       const PartitionInfo& info = partition.old_partition_info();
811       install_part.source_size = info.size();
812       install_part.source_hash.assign(info.hash().begin(), info.hash().end());
813     }
814 
815     if (!partition.has_new_partition_info()) {
816       LOG(ERROR) << "Unable to get new partition hash info on partition "
817                  << install_part.name << ".";
818       *error = ErrorCode::kDownloadNewPartitionInfoError;
819       return false;
820     }
821     const PartitionInfo& info = partition.new_partition_info();
822     install_part.target_size = info.size();
823     install_part.target_hash.assign(info.hash().begin(), info.hash().end());
824 
825     install_plan_->partitions.push_back(install_part);
826   }
827 
828   if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
829     LOG(ERROR) << "Unable to determine all the partition devices.";
830     *error = ErrorCode::kInstallDeviceOpenError;
831     return false;
832   }
833   LogPartitionInfo(partitions_);
834   return true;
835 }
836 
CanPerformInstallOperation(const chromeos_update_engine::InstallOperation & operation)837 bool DeltaPerformer::CanPerformInstallOperation(
838     const chromeos_update_engine::InstallOperation& operation) {
839   // If we don't have a data blob we can apply it right away.
840   if (!operation.has_data_offset() && !operation.has_data_length())
841     return true;
842 
843   // See if we have the entire data blob in the buffer
844   if (operation.data_offset() < buffer_offset_) {
845     LOG(ERROR) << "we threw away data it seems?";
846     return false;
847   }
848 
849   return (operation.data_offset() + operation.data_length() <=
850           buffer_offset_ + buffer_.size());
851 }
852 
PerformReplaceOperation(const InstallOperation & operation)853 bool DeltaPerformer::PerformReplaceOperation(
854     const InstallOperation& operation) {
855   CHECK(operation.type() == InstallOperation::REPLACE ||
856         operation.type() == InstallOperation::REPLACE_BZ ||
857         operation.type() == InstallOperation::REPLACE_XZ);
858 
859   // Since we delete data off the beginning of the buffer as we use it,
860   // the data we need should be exactly at the beginning of the buffer.
861   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
862   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
863 
864   // Extract the signature message if it's in this operation.
865   if (ExtractSignatureMessageFromOperation(operation)) {
866     // If this is dummy replace operation, we ignore it after extracting the
867     // signature.
868     DiscardBuffer(true, 0);
869     return true;
870   }
871 
872   // Setup the ExtentWriter stack based on the operation type.
873   std::unique_ptr<ExtentWriter> writer = std::make_unique<ZeroPadExtentWriter>(
874       std::make_unique<DirectExtentWriter>());
875 
876   if (operation.type() == InstallOperation::REPLACE_BZ) {
877     writer.reset(new BzipExtentWriter(std::move(writer)));
878   } else if (operation.type() == InstallOperation::REPLACE_XZ) {
879     writer.reset(new XzExtentWriter(std::move(writer)));
880   }
881 
882   TEST_AND_RETURN_FALSE(
883       writer->Init(target_fd_, operation.dst_extents(), block_size_));
884   TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
885   TEST_AND_RETURN_FALSE(writer->End());
886 
887   // Update buffer
888   DiscardBuffer(true, buffer_.size());
889   return true;
890 }
891 
PerformZeroOrDiscardOperation(const InstallOperation & operation)892 bool DeltaPerformer::PerformZeroOrDiscardOperation(
893     const InstallOperation& operation) {
894   CHECK(operation.type() == InstallOperation::DISCARD ||
895         operation.type() == InstallOperation::ZERO);
896 
897   // These operations have no blob.
898   TEST_AND_RETURN_FALSE(!operation.has_data_offset());
899   TEST_AND_RETURN_FALSE(!operation.has_data_length());
900 
901 #ifdef BLKZEROOUT
902   bool attempt_ioctl = true;
903   int request =
904       (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
905 #else  // !defined(BLKZEROOUT)
906   bool attempt_ioctl = false;
907   int request = 0;
908 #endif  // !defined(BLKZEROOUT)
909 
910   brillo::Blob zeros;
911   for (const Extent& extent : operation.dst_extents()) {
912     const uint64_t start = extent.start_block() * block_size_;
913     const uint64_t length = extent.num_blocks() * block_size_;
914     if (attempt_ioctl) {
915       int result = 0;
916       if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0)
917         continue;
918       attempt_ioctl = false;
919     }
920     // In case of failure, we fall back to writing 0 to the selected region.
921     zeros.resize(16 * block_size_);
922     for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
923       uint64_t chunk_length = min(length - offset,
924                                   static_cast<uint64_t>(zeros.size()));
925       TEST_AND_RETURN_FALSE(utils::PWriteAll(
926           target_fd_, zeros.data(), chunk_length, start + offset));
927     }
928   }
929   return true;
930 }
931 
PerformMoveOperation(const InstallOperation & operation)932 bool DeltaPerformer::PerformMoveOperation(const InstallOperation& operation) {
933   // Calculate buffer size. Note, this function doesn't do a sliding
934   // window to copy in case the source and destination blocks overlap.
935   // If we wanted to do a sliding window, we could program the server
936   // to generate deltas that effectively did a sliding window.
937 
938   uint64_t blocks_to_read = 0;
939   for (int i = 0; i < operation.src_extents_size(); i++)
940     blocks_to_read += operation.src_extents(i).num_blocks();
941 
942   uint64_t blocks_to_write = 0;
943   for (int i = 0; i < operation.dst_extents_size(); i++)
944     blocks_to_write += operation.dst_extents(i).num_blocks();
945 
946   DCHECK_EQ(blocks_to_write, blocks_to_read);
947   brillo::Blob buf(blocks_to_write * block_size_);
948 
949   // Read in bytes.
950   ssize_t bytes_read = 0;
951   for (int i = 0; i < operation.src_extents_size(); i++) {
952     ssize_t bytes_read_this_iteration = 0;
953     const Extent& extent = operation.src_extents(i);
954     const size_t bytes = extent.num_blocks() * block_size_;
955     TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
956     TEST_AND_RETURN_FALSE(utils::PReadAll(target_fd_,
957                                           &buf[bytes_read],
958                                           bytes,
959                                           extent.start_block() * block_size_,
960                                           &bytes_read_this_iteration));
961     TEST_AND_RETURN_FALSE(
962         bytes_read_this_iteration == static_cast<ssize_t>(bytes));
963     bytes_read += bytes_read_this_iteration;
964   }
965 
966   // Write bytes out.
967   ssize_t bytes_written = 0;
968   for (int i = 0; i < operation.dst_extents_size(); i++) {
969     const Extent& extent = operation.dst_extents(i);
970     const size_t bytes = extent.num_blocks() * block_size_;
971     TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
972     TEST_AND_RETURN_FALSE(utils::PWriteAll(target_fd_,
973                                            &buf[bytes_written],
974                                            bytes,
975                                            extent.start_block() * block_size_));
976     bytes_written += bytes;
977   }
978   DCHECK_EQ(bytes_written, bytes_read);
979   DCHECK_EQ(bytes_written, static_cast<ssize_t>(buf.size()));
980   return true;
981 }
982 
ValidateSourceHash(const brillo::Blob & calculated_hash,const InstallOperation & operation,const FileDescriptorPtr source_fd,ErrorCode * error)983 bool DeltaPerformer::ValidateSourceHash(const brillo::Blob& calculated_hash,
984                                         const InstallOperation& operation,
985                                         const FileDescriptorPtr source_fd,
986                                         ErrorCode* error) {
987   brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
988                                     operation.src_sha256_hash().end());
989   if (calculated_hash != expected_source_hash) {
990     LOG(ERROR) << "The hash of the source data on disk for this operation "
991                << "doesn't match the expected value. This could mean that the "
992                << "delta update payload was targeted for another version, or "
993                << "that the source partition was modified after it was "
994                << "installed, for example, by mounting a filesystem.";
995     LOG(ERROR) << "Expected:   sha256|hex = "
996                << base::HexEncode(expected_source_hash.data(),
997                                   expected_source_hash.size());
998     LOG(ERROR) << "Calculated: sha256|hex = "
999                << base::HexEncode(calculated_hash.data(),
1000                                   calculated_hash.size());
1001 
1002     vector<string> source_extents;
1003     for (const Extent& ext : operation.src_extents()) {
1004       source_extents.push_back(
1005           base::StringPrintf("%" PRIu64 ":%" PRIu64,
1006                              static_cast<uint64_t>(ext.start_block()),
1007                              static_cast<uint64_t>(ext.num_blocks())));
1008     }
1009     LOG(ERROR) << "Operation source (offset:size) in blocks: "
1010                << base::JoinString(source_extents, ",");
1011 
1012     // Log remount history if this device is an ext4 partition.
1013     LogMountHistory(source_fd);
1014 
1015     *error = ErrorCode::kDownloadStateInitializationError;
1016     return false;
1017   }
1018   return true;
1019 }
1020 
PerformSourceCopyOperation(const InstallOperation & operation,ErrorCode * error)1021 bool DeltaPerformer::PerformSourceCopyOperation(
1022     const InstallOperation& operation, ErrorCode* error) {
1023   if (operation.has_src_length())
1024     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1025   if (operation.has_dst_length())
1026     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1027 
1028   brillo::Blob source_hash;
1029   TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
1030                                                      operation.src_extents(),
1031                                                      target_fd_,
1032                                                      operation.dst_extents(),
1033                                                      block_size_,
1034                                                      &source_hash));
1035 
1036   if (operation.has_src_sha256_hash()) {
1037     TEST_AND_RETURN_FALSE(
1038         ValidateSourceHash(source_hash, operation, source_fd_, error));
1039   }
1040 
1041   return true;
1042 }
1043 
ExtentsToBsdiffPositionsString(const RepeatedPtrField<Extent> & extents,uint64_t block_size,uint64_t full_length,string * positions_string)1044 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
1045     const RepeatedPtrField<Extent>& extents,
1046     uint64_t block_size,
1047     uint64_t full_length,
1048     string* positions_string) {
1049   string ret;
1050   uint64_t length = 0;
1051   for (const Extent& extent : extents) {
1052     int64_t start = extent.start_block() * block_size;
1053     uint64_t this_length =
1054         min(full_length - length,
1055             static_cast<uint64_t>(extent.num_blocks()) * block_size);
1056     ret += base::StringPrintf("%" PRIi64 ":%" PRIu64 ",", start, this_length);
1057     length += this_length;
1058   }
1059   TEST_AND_RETURN_FALSE(length == full_length);
1060   if (!ret.empty())
1061     ret.resize(ret.size() - 1);  // Strip trailing comma off
1062   *positions_string = ret;
1063   return true;
1064 }
1065 
PerformBsdiffOperation(const InstallOperation & operation)1066 bool DeltaPerformer::PerformBsdiffOperation(const InstallOperation& operation) {
1067   // Since we delete data off the beginning of the buffer as we use it,
1068   // the data we need should be exactly at the beginning of the buffer.
1069   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1070   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1071 
1072   string input_positions;
1073   TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
1074                                                        block_size_,
1075                                                        operation.src_length(),
1076                                                        &input_positions));
1077   string output_positions;
1078   TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
1079                                                        block_size_,
1080                                                        operation.dst_length(),
1081                                                        &output_positions));
1082 
1083   TEST_AND_RETURN_FALSE(bsdiff::bspatch(target_path_.c_str(),
1084                                         target_path_.c_str(),
1085                                         buffer_.data(),
1086                                         buffer_.size(),
1087                                         input_positions.c_str(),
1088                                         output_positions.c_str()) == 0);
1089   DiscardBuffer(true, buffer_.size());
1090 
1091   if (operation.dst_length() % block_size_) {
1092     // Zero out rest of final block.
1093     // TODO(adlr): build this into bspatch; it's more efficient that way.
1094     const Extent& last_extent =
1095         operation.dst_extents(operation.dst_extents_size() - 1);
1096     const uint64_t end_byte =
1097         (last_extent.start_block() + last_extent.num_blocks()) * block_size_;
1098     const uint64_t begin_byte =
1099         end_byte - (block_size_ - operation.dst_length() % block_size_);
1100     brillo::Blob zeros(end_byte - begin_byte);
1101     TEST_AND_RETURN_FALSE(utils::PWriteAll(
1102         target_fd_, zeros.data(), end_byte - begin_byte, begin_byte));
1103   }
1104   return true;
1105 }
1106 
1107 namespace {
1108 
1109 class BsdiffExtentFile : public bsdiff::FileInterface {
1110  public:
BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,size_t size)1111   BsdiffExtentFile(std::unique_ptr<ExtentReader> reader, size_t size)
1112       : BsdiffExtentFile(std::move(reader), nullptr, size) {}
BsdiffExtentFile(std::unique_ptr<ExtentWriter> writer,size_t size)1113   BsdiffExtentFile(std::unique_ptr<ExtentWriter> writer, size_t size)
1114       : BsdiffExtentFile(nullptr, std::move(writer), size) {}
1115 
1116   ~BsdiffExtentFile() override = default;
1117 
Read(void * buf,size_t count,size_t * bytes_read)1118   bool Read(void* buf, size_t count, size_t* bytes_read) override {
1119     TEST_AND_RETURN_FALSE(reader_->Read(buf, count));
1120     *bytes_read = count;
1121     offset_ += count;
1122     return true;
1123   }
1124 
Write(const void * buf,size_t count,size_t * bytes_written)1125   bool Write(const void* buf, size_t count, size_t* bytes_written) override {
1126     TEST_AND_RETURN_FALSE(writer_->Write(buf, count));
1127     *bytes_written = count;
1128     offset_ += count;
1129     return true;
1130   }
1131 
Seek(off_t pos)1132   bool Seek(off_t pos) override {
1133     if (reader_ != nullptr) {
1134       TEST_AND_RETURN_FALSE(reader_->Seek(pos));
1135       offset_ = pos;
1136     } else {
1137       // For writes technically there should be no change of position, or it
1138       // should be equivalent of current offset.
1139       TEST_AND_RETURN_FALSE(offset_ == static_cast<uint64_t>(pos));
1140     }
1141     return true;
1142   }
1143 
Close()1144   bool Close() override {
1145     if (writer_ != nullptr) {
1146       TEST_AND_RETURN_FALSE(writer_->End());
1147     }
1148     return true;
1149   }
1150 
GetSize(uint64_t * size)1151   bool GetSize(uint64_t* size) override {
1152     *size = size_;
1153     return true;
1154   }
1155 
1156  private:
BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,std::unique_ptr<ExtentWriter> writer,size_t size)1157   BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,
1158                    std::unique_ptr<ExtentWriter> writer,
1159                    size_t size)
1160       : reader_(std::move(reader)),
1161         writer_(std::move(writer)),
1162         size_(size),
1163         offset_(0) {}
1164 
1165   std::unique_ptr<ExtentReader> reader_;
1166   std::unique_ptr<ExtentWriter> writer_;
1167   uint64_t size_;
1168   uint64_t offset_;
1169 
1170   DISALLOW_COPY_AND_ASSIGN(BsdiffExtentFile);
1171 };
1172 
1173 }  // namespace
1174 
PerformSourceBsdiffOperation(const InstallOperation & operation,ErrorCode * error)1175 bool DeltaPerformer::PerformSourceBsdiffOperation(
1176     const InstallOperation& operation, ErrorCode* error) {
1177   // Since we delete data off the beginning of the buffer as we use it,
1178   // the data we need should be exactly at the beginning of the buffer.
1179   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1180   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1181   if (operation.has_src_length())
1182     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1183   if (operation.has_dst_length())
1184     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1185 
1186   if (operation.has_src_sha256_hash()) {
1187     brillo::Blob source_hash;
1188     TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
1189         source_fd_, operation.src_extents(), block_size_, &source_hash));
1190     TEST_AND_RETURN_FALSE(
1191         ValidateSourceHash(source_hash, operation, source_fd_, error));
1192   }
1193 
1194   auto reader = std::make_unique<DirectExtentReader>();
1195   TEST_AND_RETURN_FALSE(
1196       reader->Init(source_fd_, operation.src_extents(), block_size_));
1197   auto src_file = std::make_unique<BsdiffExtentFile>(
1198       std::move(reader),
1199       utils::BlocksInExtents(operation.src_extents()) * block_size_);
1200 
1201   auto writer = std::make_unique<DirectExtentWriter>();
1202   TEST_AND_RETURN_FALSE(
1203       writer->Init(target_fd_, operation.dst_extents(), block_size_));
1204   auto dst_file = std::make_unique<BsdiffExtentFile>(
1205       std::move(writer),
1206       utils::BlocksInExtents(operation.dst_extents()) * block_size_);
1207 
1208   TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file),
1209                                         std::move(dst_file),
1210                                         buffer_.data(),
1211                                         buffer_.size()) == 0);
1212   DiscardBuffer(true, buffer_.size());
1213   return true;
1214 }
1215 
1216 namespace {
1217 
1218 // A class to be passed to |puffpatch| for reading from |source_fd_| and writing
1219 // into |target_fd_|.
1220 class PuffinExtentStream : public puffin::StreamInterface {
1221  public:
1222   // Constructor for creating a stream for reading from an |ExtentReader|.
PuffinExtentStream(std::unique_ptr<ExtentReader> reader,uint64_t size)1223   PuffinExtentStream(std::unique_ptr<ExtentReader> reader, uint64_t size)
1224       : PuffinExtentStream(std::move(reader), nullptr, size) {}
1225 
1226   // Constructor for creating a stream for writing to an |ExtentWriter|.
PuffinExtentStream(std::unique_ptr<ExtentWriter> writer,uint64_t size)1227   PuffinExtentStream(std::unique_ptr<ExtentWriter> writer, uint64_t size)
1228       : PuffinExtentStream(nullptr, std::move(writer), size) {}
1229 
1230   ~PuffinExtentStream() override = default;
1231 
GetSize(uint64_t * size) const1232   bool GetSize(uint64_t* size) const override {
1233     *size = size_;
1234     return true;
1235   }
1236 
GetOffset(uint64_t * offset) const1237   bool GetOffset(uint64_t* offset) const override {
1238     *offset = offset_;
1239     return true;
1240   }
1241 
Seek(uint64_t offset)1242   bool Seek(uint64_t offset) override {
1243     if (is_read_) {
1244       TEST_AND_RETURN_FALSE(reader_->Seek(offset));
1245       offset_ = offset;
1246     } else {
1247       // For writes technically there should be no change of position, or it
1248       // should equivalent of current offset.
1249       TEST_AND_RETURN_FALSE(offset_ == offset);
1250     }
1251     return true;
1252   }
1253 
Read(void * buffer,size_t count)1254   bool Read(void* buffer, size_t count) override {
1255     TEST_AND_RETURN_FALSE(is_read_);
1256     TEST_AND_RETURN_FALSE(reader_->Read(buffer, count));
1257     offset_ += count;
1258     return true;
1259   }
1260 
Write(const void * buffer,size_t count)1261   bool Write(const void* buffer, size_t count) override {
1262     TEST_AND_RETURN_FALSE(!is_read_);
1263     TEST_AND_RETURN_FALSE(writer_->Write(buffer, count));
1264     offset_ += count;
1265     return true;
1266   }
1267 
Close()1268   bool Close() override {
1269     if (!is_read_) {
1270       TEST_AND_RETURN_FALSE(writer_->End());
1271     }
1272     return true;
1273   }
1274 
1275  private:
PuffinExtentStream(std::unique_ptr<ExtentReader> reader,std::unique_ptr<ExtentWriter> writer,uint64_t size)1276   PuffinExtentStream(std::unique_ptr<ExtentReader> reader,
1277                      std::unique_ptr<ExtentWriter> writer,
1278                      uint64_t size)
1279       : reader_(std::move(reader)),
1280         writer_(std::move(writer)),
1281         size_(size),
1282         offset_(0),
1283         is_read_(reader_ ? true : false) {}
1284 
1285   std::unique_ptr<ExtentReader> reader_;
1286   std::unique_ptr<ExtentWriter> writer_;
1287   uint64_t size_;
1288   uint64_t offset_;
1289   bool is_read_;
1290 
1291   DISALLOW_COPY_AND_ASSIGN(PuffinExtentStream);
1292 };
1293 
1294 }  // namespace
1295 
PerformPuffDiffOperation(const InstallOperation & operation,ErrorCode * error)1296 bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation,
1297                                               ErrorCode* error) {
1298   // Since we delete data off the beginning of the buffer as we use it,
1299   // the data we need should be exactly at the beginning of the buffer.
1300   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1301   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1302 
1303   if (operation.has_src_sha256_hash()) {
1304     brillo::Blob source_hash;
1305     TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
1306         source_fd_, operation.src_extents(), block_size_, &source_hash));
1307     TEST_AND_RETURN_FALSE(
1308         ValidateSourceHash(source_hash, operation, source_fd_, error));
1309   }
1310 
1311   auto reader = std::make_unique<DirectExtentReader>();
1312   TEST_AND_RETURN_FALSE(
1313       reader->Init(source_fd_, operation.src_extents(), block_size_));
1314   puffin::UniqueStreamPtr src_stream(new PuffinExtentStream(
1315       std::move(reader),
1316       utils::BlocksInExtents(operation.src_extents()) * block_size_));
1317 
1318   auto writer = std::make_unique<DirectExtentWriter>();
1319   TEST_AND_RETURN_FALSE(
1320       writer->Init(target_fd_, operation.dst_extents(), block_size_));
1321   puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream(
1322       std::move(writer),
1323       utils::BlocksInExtents(operation.dst_extents()) * block_size_));
1324 
1325   const size_t kMaxCacheSize = 5 * 1024 * 1024;  // Total 5MB cache.
1326   TEST_AND_RETURN_FALSE(puffin::PuffPatch(std::move(src_stream),
1327                                           std::move(dst_stream),
1328                                           buffer_.data(),
1329                                           buffer_.size(),
1330                                           kMaxCacheSize));
1331   DiscardBuffer(true, buffer_.size());
1332   return true;
1333 }
1334 
ExtractSignatureMessageFromOperation(const InstallOperation & operation)1335 bool DeltaPerformer::ExtractSignatureMessageFromOperation(
1336     const InstallOperation& operation) {
1337   if (operation.type() != InstallOperation::REPLACE ||
1338       !manifest_.has_signatures_offset() ||
1339       manifest_.signatures_offset() != operation.data_offset()) {
1340     return false;
1341   }
1342   TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() &&
1343                         manifest_.signatures_size() == operation.data_length());
1344   TEST_AND_RETURN_FALSE(ExtractSignatureMessage());
1345   return true;
1346 }
1347 
ExtractSignatureMessage()1348 bool DeltaPerformer::ExtractSignatureMessage() {
1349   TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
1350   TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
1351   TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
1352   signatures_message_data_.assign(
1353       buffer_.begin(),
1354       buffer_.begin() + manifest_.signatures_size());
1355 
1356   // Save the signature blob because if the update is interrupted after the
1357   // download phase we don't go through this path anymore. Some alternatives to
1358   // consider:
1359   //
1360   // 1. On resume, re-download the signature blob from the server and re-verify
1361   // it.
1362   //
1363   // 2. Verify the signature as soon as it's received and don't checkpoint the
1364   // blob and the signed sha-256 context.
1365   LOG_IF(WARNING, !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
1366                                      string(signatures_message_data_.begin(),
1367                                             signatures_message_data_.end())))
1368       << "Unable to store the signature blob.";
1369 
1370   LOG(INFO) << "Extracted signature data of size "
1371             << manifest_.signatures_size() << " at "
1372             << manifest_.signatures_offset();
1373   return true;
1374 }
1375 
GetPublicKeyFromResponse(base::FilePath * out_tmp_key)1376 bool DeltaPerformer::GetPublicKeyFromResponse(base::FilePath *out_tmp_key) {
1377   if (hardware_->IsOfficialBuild() ||
1378       utils::FileExists(public_key_path_.c_str()) ||
1379       install_plan_->public_key_rsa.empty())
1380     return false;
1381 
1382   if (!utils::DecodeAndStoreBase64String(install_plan_->public_key_rsa,
1383                                          out_tmp_key))
1384     return false;
1385 
1386   return true;
1387 }
1388 
ValidateManifest()1389 ErrorCode DeltaPerformer::ValidateManifest() {
1390   // Perform assorted checks to sanity check the manifest, make sure it
1391   // matches data from other sources, and that it is a supported version.
1392 
1393   bool has_old_fields =
1394       (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info());
1395   for (const PartitionUpdate& partition : manifest_.partitions()) {
1396     has_old_fields = has_old_fields || partition.has_old_partition_info();
1397   }
1398 
1399   // The presence of an old partition hash is the sole indicator for a delta
1400   // update.
1401   InstallPayloadType actual_payload_type =
1402       has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
1403 
1404   if (payload_->type == InstallPayloadType::kUnknown) {
1405     LOG(INFO) << "Detected a '"
1406               << InstallPayloadTypeToString(actual_payload_type)
1407               << "' payload.";
1408     payload_->type = actual_payload_type;
1409   } else if (payload_->type != actual_payload_type) {
1410     LOG(ERROR) << "InstallPlan expected a '"
1411                << InstallPayloadTypeToString(payload_->type)
1412                << "' payload but the downloaded manifest contains a '"
1413                << InstallPayloadTypeToString(actual_payload_type)
1414                << "' payload.";
1415     return ErrorCode::kPayloadMismatchedType;
1416   }
1417 
1418   // Check that the minor version is compatible.
1419   if (actual_payload_type == InstallPayloadType::kFull) {
1420     if (manifest_.minor_version() != kFullPayloadMinorVersion) {
1421       LOG(ERROR) << "Manifest contains minor version "
1422                  << manifest_.minor_version()
1423                  << ", but all full payloads should have version "
1424                  << kFullPayloadMinorVersion << ".";
1425       return ErrorCode::kUnsupportedMinorPayloadVersion;
1426     }
1427   } else {
1428     if (manifest_.minor_version() != supported_minor_version_) {
1429       LOG(ERROR) << "Manifest contains minor version "
1430                  << manifest_.minor_version()
1431                  << " not the supported "
1432                  << supported_minor_version_;
1433       return ErrorCode::kUnsupportedMinorPayloadVersion;
1434     }
1435   }
1436 
1437   if (major_payload_version_ != kChromeOSMajorPayloadVersion) {
1438     if (manifest_.has_old_rootfs_info() ||
1439         manifest_.has_new_rootfs_info() ||
1440         manifest_.has_old_kernel_info() ||
1441         manifest_.has_new_kernel_info() ||
1442         manifest_.install_operations_size() != 0 ||
1443         manifest_.kernel_install_operations_size() != 0) {
1444       LOG(ERROR) << "Manifest contains deprecated field only supported in "
1445                  << "major payload version 1, but the payload major version is "
1446                  << major_payload_version_;
1447       return ErrorCode::kPayloadMismatchedType;
1448     }
1449   }
1450 
1451   if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
1452     LOG(ERROR) << "The current OS build timestamp ("
1453                << hardware_->GetBuildTimestamp()
1454                << ") is newer than the maximum timestamp in the manifest ("
1455                << manifest_.max_timestamp() << ")";
1456     return ErrorCode::kPayloadTimestampError;
1457   }
1458 
1459   // TODO(garnold) we should be adding more and more manifest checks, such as
1460   // partition boundaries etc (see chromium-os:37661).
1461 
1462   return ErrorCode::kSuccess;
1463 }
1464 
ValidateOperationHash(const InstallOperation & operation)1465 ErrorCode DeltaPerformer::ValidateOperationHash(
1466     const InstallOperation& operation) {
1467   if (!operation.data_sha256_hash().size()) {
1468     if (!operation.data_length()) {
1469       // Operations that do not have any data blob won't have any operation hash
1470       // either. So, these operations are always considered validated since the
1471       // metadata that contains all the non-data-blob portions of the operation
1472       // has already been validated. This is true for both HTTP and HTTPS cases.
1473       return ErrorCode::kSuccess;
1474     }
1475 
1476     // No hash is present for an operation that has data blobs. This shouldn't
1477     // happen normally for any client that has this code, because the
1478     // corresponding update should have been produced with the operation
1479     // hashes. So if it happens it means either we've turned operation hash
1480     // generation off in DeltaDiffGenerator or it's a regression of some sort.
1481     // One caveat though: The last operation is a dummy signature operation
1482     // that doesn't have a hash at the time the manifest is created. So we
1483     // should not complaint about that operation. This operation can be
1484     // recognized by the fact that it's offset is mentioned in the manifest.
1485     if (manifest_.signatures_offset() &&
1486         manifest_.signatures_offset() == operation.data_offset()) {
1487       LOG(INFO) << "Skipping hash verification for signature operation "
1488                 << next_operation_num_ + 1;
1489     } else {
1490       if (install_plan_->hash_checks_mandatory) {
1491         LOG(ERROR) << "Missing mandatory operation hash for operation "
1492                    << next_operation_num_ + 1;
1493         return ErrorCode::kDownloadOperationHashMissingError;
1494       }
1495 
1496       LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1
1497                    << " as there's no operation hash in manifest";
1498     }
1499     return ErrorCode::kSuccess;
1500   }
1501 
1502   brillo::Blob expected_op_hash;
1503   expected_op_hash.assign(operation.data_sha256_hash().data(),
1504                           (operation.data_sha256_hash().data() +
1505                            operation.data_sha256_hash().size()));
1506 
1507   brillo::Blob calculated_op_hash;
1508   if (!HashCalculator::RawHashOfBytes(
1509           buffer_.data(), operation.data_length(), &calculated_op_hash)) {
1510     LOG(ERROR) << "Unable to compute actual hash of operation "
1511                << next_operation_num_;
1512     return ErrorCode::kDownloadOperationHashVerificationError;
1513   }
1514 
1515   if (calculated_op_hash != expected_op_hash) {
1516     LOG(ERROR) << "Hash verification failed for operation "
1517                << next_operation_num_ << ". Expected hash = ";
1518     utils::HexDumpVector(expected_op_hash);
1519     LOG(ERROR) << "Calculated hash over " << operation.data_length()
1520                << " bytes at offset: " << operation.data_offset() << " = ";
1521     utils::HexDumpVector(calculated_op_hash);
1522     return ErrorCode::kDownloadOperationHashMismatch;
1523   }
1524 
1525   return ErrorCode::kSuccess;
1526 }
1527 
1528 #define TEST_AND_RETURN_VAL(_retval, _condition)                \
1529   do {                                                          \
1530     if (!(_condition)) {                                        \
1531       LOG(ERROR) << "VerifyPayload failure: " << #_condition;   \
1532       return _retval;                                           \
1533     }                                                           \
1534   } while (0);
1535 
VerifyPayload(const brillo::Blob & update_check_response_hash,const uint64_t update_check_response_size)1536 ErrorCode DeltaPerformer::VerifyPayload(
1537     const brillo::Blob& update_check_response_hash,
1538     const uint64_t update_check_response_size) {
1539 
1540   // See if we should use the public RSA key in the Omaha response.
1541   base::FilePath path_to_public_key(public_key_path_);
1542   base::FilePath tmp_key;
1543   if (GetPublicKeyFromResponse(&tmp_key))
1544     path_to_public_key = tmp_key;
1545   ScopedPathUnlinker tmp_key_remover(tmp_key.value());
1546   if (tmp_key.empty())
1547     tmp_key_remover.set_should_remove(false);
1548 
1549   LOG(INFO) << "Verifying payload using public key: "
1550             << path_to_public_key.value();
1551 
1552   // Verifies the download size.
1553   TEST_AND_RETURN_VAL(ErrorCode::kPayloadSizeMismatchError,
1554                       update_check_response_size ==
1555                       metadata_size_ + metadata_signature_size_ +
1556                       buffer_offset_);
1557 
1558   // Verifies the payload hash.
1559   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
1560                       !payload_hash_calculator_.raw_hash().empty());
1561   TEST_AND_RETURN_VAL(
1562       ErrorCode::kPayloadHashMismatchError,
1563       payload_hash_calculator_.raw_hash() == update_check_response_hash);
1564 
1565   // Verifies the signed payload hash.
1566   if (!utils::FileExists(path_to_public_key.value().c_str())) {
1567     LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
1568     return ErrorCode::kSuccess;
1569   }
1570   TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
1571                       !signatures_message_data_.empty());
1572   brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
1573   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1574                       PayloadVerifier::PadRSA2048SHA256Hash(&hash_data));
1575   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1576                       !hash_data.empty());
1577 
1578   if (!PayloadVerifier::VerifySignature(
1579       signatures_message_data_, path_to_public_key.value(), hash_data)) {
1580     // The autoupdate_CatchBadSignatures test checks for this string
1581     // in log-files. Keep in sync.
1582     LOG(ERROR) << "Public key verification failed, thus update failed.";
1583     return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1584   }
1585 
1586   LOG(INFO) << "Payload hash matches value in payload.";
1587 
1588   // At this point, we are guaranteed to have downloaded a full payload, i.e
1589   // the one whose size matches the size mentioned in Omaha response. If any
1590   // errors happen after this, it's likely a problem with the payload itself or
1591   // the state of the system and not a problem with the URL or network.  So,
1592   // indicate that to the download delegate so that AU can backoff
1593   // appropriately.
1594   if (download_delegate_)
1595     download_delegate_->DownloadComplete();
1596 
1597   return ErrorCode::kSuccess;
1598 }
1599 
DiscardBuffer(bool do_advance_offset,size_t signed_hash_buffer_size)1600 void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
1601                                    size_t signed_hash_buffer_size) {
1602   // Update the buffer offset.
1603   if (do_advance_offset)
1604     buffer_offset_ += buffer_.size();
1605 
1606   // Hash the content.
1607   payload_hash_calculator_.Update(buffer_.data(), buffer_.size());
1608   signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size);
1609 
1610   // Swap content with an empty vector to ensure that all memory is released.
1611   brillo::Blob().swap(buffer_);
1612 }
1613 
CanResumeUpdate(PrefsInterface * prefs,const string & update_check_response_hash)1614 bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
1615                                      const string& update_check_response_hash) {
1616   int64_t next_operation = kUpdateStateOperationInvalid;
1617   if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
1618         next_operation != kUpdateStateOperationInvalid &&
1619         next_operation > 0))
1620     return false;
1621 
1622   string interrupted_hash;
1623   if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
1624         !interrupted_hash.empty() &&
1625         interrupted_hash == update_check_response_hash))
1626     return false;
1627 
1628   int64_t resumed_update_failures;
1629   // Note that storing this value is optional, but if it is there it should not
1630   // be more than the limit.
1631   if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
1632       resumed_update_failures > kMaxResumedUpdateFailures)
1633     return false;
1634 
1635   // Sanity check the rest.
1636   int64_t next_data_offset = -1;
1637   if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1638         next_data_offset >= 0))
1639     return false;
1640 
1641   string sha256_context;
1642   if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
1643         !sha256_context.empty()))
1644     return false;
1645 
1646   int64_t manifest_metadata_size = 0;
1647   if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1648         manifest_metadata_size > 0))
1649     return false;
1650 
1651   int64_t manifest_signature_size = 0;
1652   if (!(prefs->GetInt64(kPrefsManifestSignatureSize,
1653                         &manifest_signature_size) &&
1654         manifest_signature_size >= 0))
1655     return false;
1656 
1657   return true;
1658 }
1659 
ResetUpdateProgress(PrefsInterface * prefs,bool quick)1660 bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) {
1661   TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
1662                                         kUpdateStateOperationInvalid));
1663   if (!quick) {
1664     prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
1665     prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
1666     prefs->SetString(kPrefsUpdateStateSHA256Context, "");
1667     prefs->SetString(kPrefsUpdateStateSignedSHA256Context, "");
1668     prefs->SetString(kPrefsUpdateStateSignatureBlob, "");
1669     prefs->SetInt64(kPrefsManifestMetadataSize, -1);
1670     prefs->SetInt64(kPrefsManifestSignatureSize, -1);
1671     prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
1672     prefs->Delete(kPrefsPostInstallSucceeded);
1673   }
1674   return true;
1675 }
1676 
CheckpointUpdateProgress()1677 bool DeltaPerformer::CheckpointUpdateProgress() {
1678   Terminator::set_exit_blocked(true);
1679   if (last_updated_buffer_offset_ != buffer_offset_) {
1680     // Resets the progress in case we die in the middle of the state update.
1681     ResetUpdateProgress(prefs_, true);
1682     TEST_AND_RETURN_FALSE(
1683         prefs_->SetString(kPrefsUpdateStateSHA256Context,
1684                           payload_hash_calculator_.GetContext()));
1685     TEST_AND_RETURN_FALSE(
1686         prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
1687                           signed_hash_calculator_.GetContext()));
1688     TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataOffset,
1689                                            buffer_offset_));
1690     last_updated_buffer_offset_ = buffer_offset_;
1691 
1692     if (next_operation_num_ < num_total_operations_) {
1693       size_t partition_index = current_partition_;
1694       while (next_operation_num_ >= acc_num_operations_[partition_index])
1695         partition_index++;
1696       const size_t partition_operation_num = next_operation_num_ - (
1697           partition_index ? acc_num_operations_[partition_index - 1] : 0);
1698       const InstallOperation& op =
1699           partitions_[partition_index].operations(partition_operation_num);
1700       TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
1701                                              op.data_length()));
1702     } else {
1703       TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
1704                                              0));
1705     }
1706   }
1707   TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextOperation,
1708                                          next_operation_num_));
1709   return true;
1710 }
1711 
PrimeUpdateState()1712 bool DeltaPerformer::PrimeUpdateState() {
1713   CHECK(manifest_valid_);
1714   block_size_ = manifest_.block_size();
1715 
1716   int64_t next_operation = kUpdateStateOperationInvalid;
1717   if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
1718       next_operation == kUpdateStateOperationInvalid ||
1719       next_operation <= 0) {
1720     // Initiating a new update, no more state needs to be initialized.
1721     return true;
1722   }
1723   next_operation_num_ = next_operation;
1724 
1725   // Resuming an update -- load the rest of the update state.
1726   int64_t next_data_offset = -1;
1727   TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsUpdateStateNextDataOffset,
1728                                          &next_data_offset) &&
1729                         next_data_offset >= 0);
1730   buffer_offset_ = next_data_offset;
1731 
1732   // The signed hash context and the signature blob may be empty if the
1733   // interrupted update didn't reach the signature.
1734   string signed_hash_context;
1735   if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context,
1736                         &signed_hash_context)) {
1737     TEST_AND_RETURN_FALSE(
1738         signed_hash_calculator_.SetContext(signed_hash_context));
1739   }
1740 
1741   string signature_blob;
1742   if (prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signature_blob)) {
1743     signatures_message_data_.assign(signature_blob.begin(),
1744                                     signature_blob.end());
1745   }
1746 
1747   string hash_context;
1748   TEST_AND_RETURN_FALSE(prefs_->GetString(kPrefsUpdateStateSHA256Context,
1749                                           &hash_context) &&
1750                         payload_hash_calculator_.SetContext(hash_context));
1751 
1752   int64_t manifest_metadata_size = 0;
1753   TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsManifestMetadataSize,
1754                                          &manifest_metadata_size) &&
1755                         manifest_metadata_size > 0);
1756   metadata_size_ = manifest_metadata_size;
1757 
1758   int64_t manifest_signature_size = 0;
1759   TEST_AND_RETURN_FALSE(
1760       prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size) &&
1761       manifest_signature_size >= 0);
1762   metadata_signature_size_ = manifest_signature_size;
1763 
1764   // Advance the download progress to reflect what doesn't need to be
1765   // re-downloaded.
1766   total_bytes_received_ += buffer_offset_;
1767 
1768   // Speculatively count the resume as a failure.
1769   int64_t resumed_update_failures;
1770   if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
1771     resumed_update_failures++;
1772   } else {
1773     resumed_update_failures = 1;
1774   }
1775   prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures);
1776   return true;
1777 }
1778 
1779 }  // namespace chromeos_update_engine
1780