• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (C) 2012 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include "update_engine/payload_consumer/delta_performer.h"
18 
19 #include <endian.h>
20 #include <errno.h>
21 #include <linux/fs.h>
22 
23 #include <algorithm>
24 #include <cstring>
25 #include <memory>
26 #include <string>
27 #include <vector>
28 
29 #include <base/files/file_util.h>
30 #include <base/format_macros.h>
31 #include <base/strings/string_number_conversions.h>
32 #include <base/strings/string_util.h>
33 #include <base/strings/stringprintf.h>
34 #include <brillo/data_encoding.h>
35 #include <brillo/make_unique_ptr.h>
36 #include <google/protobuf/repeated_field.h>
37 
38 #include "update_engine/common/constants.h"
39 #include "update_engine/common/hardware_interface.h"
40 #include "update_engine/common/prefs_interface.h"
41 #include "update_engine/common/subprocess.h"
42 #include "update_engine/common/terminator.h"
43 #include "update_engine/payload_consumer/bzip_extent_writer.h"
44 #include "update_engine/payload_consumer/download_action.h"
45 #include "update_engine/payload_consumer/extent_writer.h"
46 #if USE_MTD
47 #include "update_engine/payload_consumer/mtd_file_descriptor.h"
48 #endif
49 #include "update_engine/payload_consumer/payload_constants.h"
50 #include "update_engine/payload_consumer/payload_verifier.h"
51 #include "update_engine/payload_consumer/xz_extent_writer.h"
52 
53 using google::protobuf::RepeatedPtrField;
54 using std::min;
55 using std::string;
56 using std::vector;
57 
58 namespace chromeos_update_engine {
59 
60 const uint64_t DeltaPerformer::kDeltaVersionOffset = sizeof(kDeltaMagic);
61 const uint64_t DeltaPerformer::kDeltaVersionSize = 8;
62 const uint64_t DeltaPerformer::kDeltaManifestSizeOffset =
63     kDeltaVersionOffset + kDeltaVersionSize;
64 const uint64_t DeltaPerformer::kDeltaManifestSizeSize = 8;
65 const uint64_t DeltaPerformer::kDeltaMetadataSignatureSizeSize = 4;
66 const uint64_t DeltaPerformer::kMaxPayloadHeaderSize = 24;
67 const uint64_t DeltaPerformer::kSupportedMajorPayloadVersion = 2;
68 const uint32_t DeltaPerformer::kSupportedMinorPayloadVersion = 3;
69 
70 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
71 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
72 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
73 const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
74 
75 namespace {
76 const int kUpdateStateOperationInvalid = -1;
77 const int kMaxResumedUpdateFailures = 10;
78 #if USE_MTD
79 const int kUbiVolumeAttachTimeout = 5 * 60;
80 #endif
81 
CreateFileDescriptor(const char * path)82 FileDescriptorPtr CreateFileDescriptor(const char* path) {
83   FileDescriptorPtr ret;
84 #if USE_MTD
85   if (strstr(path, "/dev/ubi") == path) {
86     if (!UbiFileDescriptor::IsUbi(path)) {
87       // The volume might not have been attached at boot time.
88       int volume_no;
89       if (utils::SplitPartitionName(path, nullptr, &volume_no)) {
90         utils::TryAttachingUbiVolume(volume_no, kUbiVolumeAttachTimeout);
91       }
92     }
93     if (UbiFileDescriptor::IsUbi(path)) {
94       LOG(INFO) << path << " is a UBI device.";
95       ret.reset(new UbiFileDescriptor);
96     }
97   } else if (MtdFileDescriptor::IsMtd(path)) {
98     LOG(INFO) << path << " is an MTD device.";
99     ret.reset(new MtdFileDescriptor);
100   } else {
101     LOG(INFO) << path << " is not an MTD nor a UBI device.";
102 #endif
103     ret.reset(new EintrSafeFileDescriptor);
104 #if USE_MTD
105   }
106 #endif
107   return ret;
108 }
109 
110 // Opens path for read/write. On success returns an open FileDescriptor
111 // and sets *err to 0. On failure, sets *err to errno and returns nullptr.
OpenFile(const char * path,int mode,int * err)112 FileDescriptorPtr OpenFile(const char* path, int mode, int* err) {
113   // Try to mark the block device read-only based on the mode. Ignore any
114   // failure since this won't work when passing regular files.
115   utils::SetBlockDeviceReadOnly(path, (mode & O_ACCMODE) == O_RDONLY);
116 
117   FileDescriptorPtr fd = CreateFileDescriptor(path);
118 #if USE_MTD
119   // On NAND devices, we can either read, or write, but not both. So here we
120   // use O_WRONLY.
121   if (UbiFileDescriptor::IsUbi(path) || MtdFileDescriptor::IsMtd(path)) {
122     mode = O_WRONLY;
123   }
124 #endif
125   if (!fd->Open(path, mode, 000)) {
126     *err = errno;
127     PLOG(ERROR) << "Unable to open file " << path;
128     return nullptr;
129   }
130   *err = 0;
131   return fd;
132 }
133 
134 // Discard the tail of the block device referenced by |fd|, from the offset
135 // |data_size| until the end of the block device. Returns whether the data was
136 // discarded.
DiscardPartitionTail(FileDescriptorPtr fd,uint64_t data_size)137 bool DiscardPartitionTail(FileDescriptorPtr fd, uint64_t data_size) {
138   uint64_t part_size = fd->BlockDevSize();
139   if (!part_size || part_size <= data_size)
140     return false;
141 
142   const vector<int> requests = {
143       BLKSECDISCARD,
144       BLKDISCARD,
145 #ifdef BLKZEROOUT
146       BLKZEROOUT,
147 #endif
148   };
149   for (int request : requests) {
150     int error = 0;
151     if (fd->BlkIoctl(request, data_size, part_size - data_size, &error) &&
152         error == 0) {
153       return true;
154     }
155     LOG(WARNING) << "Error discarding the last "
156                  << (part_size - data_size) / 1024 << " KiB using ioctl("
157                  << request << ")";
158   }
159   return false;
160 }
161 
162 }  // namespace
163 
164 
165 // Computes the ratio of |part| and |total|, scaled to |norm|, using integer
166 // arithmetic.
IntRatio(uint64_t part,uint64_t total,uint64_t norm)167 static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
168   return part * norm / total;
169 }
170 
LogProgress(const char * message_prefix)171 void DeltaPerformer::LogProgress(const char* message_prefix) {
172   // Format operations total count and percentage.
173   string total_operations_str("?");
174   string completed_percentage_str("");
175   if (num_total_operations_) {
176     total_operations_str = std::to_string(num_total_operations_);
177     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
178     completed_percentage_str =
179         base::StringPrintf(" (%" PRIu64 "%%)",
180                            IntRatio(next_operation_num_, num_total_operations_,
181                                     100));
182   }
183 
184   // Format download total count and percentage.
185   size_t payload_size = install_plan_->payload_size;
186   string payload_size_str("?");
187   string downloaded_percentage_str("");
188   if (payload_size) {
189     payload_size_str = std::to_string(payload_size);
190     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
191     downloaded_percentage_str =
192         base::StringPrintf(" (%" PRIu64 "%%)",
193                            IntRatio(total_bytes_received_, payload_size, 100));
194   }
195 
196   LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
197             << "/" << total_operations_str << " operations"
198             << completed_percentage_str << ", " << total_bytes_received_
199             << "/" << payload_size_str << " bytes downloaded"
200             << downloaded_percentage_str << ", overall progress "
201             << overall_progress_ << "%";
202 }
203 
UpdateOverallProgress(bool force_log,const char * message_prefix)204 void DeltaPerformer::UpdateOverallProgress(bool force_log,
205                                            const char* message_prefix) {
206   // Compute our download and overall progress.
207   unsigned new_overall_progress = 0;
208   static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100,
209                 "Progress weights don't add up");
210   // Only consider download progress if its total size is known; otherwise
211   // adjust the operations weight to compensate for the absence of download
212   // progress. Also, make sure to cap the download portion at
213   // kProgressDownloadWeight, in case we end up downloading more than we
214   // initially expected (this indicates a problem, but could generally happen).
215   // TODO(garnold) the correction of operations weight when we do not have the
216   // total payload size, as well as the conditional guard below, should both be
217   // eliminated once we ensure that the payload_size in the install plan is
218   // always given and is non-zero. This currently isn't the case during unit
219   // tests (see chromium-os:37969).
220   size_t payload_size = install_plan_->payload_size;
221   unsigned actual_operations_weight = kProgressOperationsWeight;
222   if (payload_size)
223     new_overall_progress += min(
224         static_cast<unsigned>(IntRatio(total_bytes_received_, payload_size,
225                                        kProgressDownloadWeight)),
226         kProgressDownloadWeight);
227   else
228     actual_operations_weight += kProgressDownloadWeight;
229 
230   // Only add completed operations if their total number is known; we definitely
231   // expect an update to have at least one operation, so the expectation is that
232   // this will eventually reach |actual_operations_weight|.
233   if (num_total_operations_)
234     new_overall_progress += IntRatio(next_operation_num_, num_total_operations_,
235                                      actual_operations_weight);
236 
237   // Progress ratio cannot recede, unless our assumptions about the total
238   // payload size, total number of operations, or the monotonicity of progress
239   // is breached.
240   if (new_overall_progress < overall_progress_) {
241     LOG(WARNING) << "progress counter receded from " << overall_progress_
242                  << "% down to " << new_overall_progress << "%; this is a bug";
243     force_log = true;
244   }
245   overall_progress_ = new_overall_progress;
246 
247   // Update chunk index, log as needed: if forced by called, or we completed a
248   // progress chunk, or a timeout has expired.
249   base::Time curr_time = base::Time::Now();
250   unsigned curr_progress_chunk =
251       overall_progress_ * kProgressLogMaxChunks / 100;
252   if (force_log || curr_progress_chunk > last_progress_chunk_ ||
253       curr_time > forced_progress_log_time_) {
254     forced_progress_log_time_ = curr_time + forced_progress_log_wait_;
255     LogProgress(message_prefix);
256   }
257   last_progress_chunk_ = curr_progress_chunk;
258 }
259 
260 
CopyDataToBuffer(const char ** bytes_p,size_t * count_p,size_t max)261 size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p, size_t* count_p,
262                                         size_t max) {
263   const size_t count = *count_p;
264   if (!count)
265     return 0;  // Special case shortcut.
266   size_t read_len = min(count, max - buffer_.size());
267   const char* bytes_start = *bytes_p;
268   const char* bytes_end = bytes_start + read_len;
269   buffer_.insert(buffer_.end(), bytes_start, bytes_end);
270   *bytes_p = bytes_end;
271   *count_p = count - read_len;
272   return read_len;
273 }
274 
275 
HandleOpResult(bool op_result,const char * op_type_name,ErrorCode * error)276 bool DeltaPerformer::HandleOpResult(bool op_result, const char* op_type_name,
277                                     ErrorCode* error) {
278   if (op_result)
279     return true;
280 
281   size_t partition_first_op_num =
282       current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0;
283   LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
284              << next_operation_num_ << ", which is the operation "
285              << next_operation_num_ - partition_first_op_num
286              << " in partition \""
287              << partitions_[current_partition_].partition_name() << "\"";
288   if (*error == ErrorCode::kSuccess)
289     *error = ErrorCode::kDownloadOperationExecutionError;
290   return false;
291 }
292 
Close()293 int DeltaPerformer::Close() {
294   int err = -CloseCurrentPartition();
295   LOG_IF(ERROR, !payload_hash_calculator_.Finalize() ||
296                 !signed_hash_calculator_.Finalize())
297       << "Unable to finalize the hash.";
298   if (!buffer_.empty()) {
299     LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
300     if (err >= 0)
301       err = 1;
302   }
303   return -err;
304 }
305 
CloseCurrentPartition()306 int DeltaPerformer::CloseCurrentPartition() {
307   int err = 0;
308   if (source_fd_ && !source_fd_->Close()) {
309     err = errno;
310     PLOG(ERROR) << "Error closing source partition";
311     if (!err)
312       err = 1;
313   }
314   source_fd_.reset();
315   source_path_.clear();
316 
317   if (target_fd_ && !target_fd_->Close()) {
318     err = errno;
319     PLOG(ERROR) << "Error closing target partition";
320     if (!err)
321       err = 1;
322   }
323   target_fd_.reset();
324   target_path_.clear();
325   return -err;
326 }
327 
OpenCurrentPartition()328 bool DeltaPerformer::OpenCurrentPartition() {
329   if (current_partition_ >= partitions_.size())
330     return false;
331 
332   const PartitionUpdate& partition = partitions_[current_partition_];
333   // Open source fds if we have a delta payload with minor version >= 2.
334   if (install_plan_->payload_type == InstallPayloadType::kDelta &&
335       GetMinorVersion() != kInPlaceMinorPayloadVersion) {
336     source_path_ = install_plan_->partitions[current_partition_].source_path;
337     int err;
338     source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, &err);
339     if (!source_fd_) {
340       LOG(ERROR) << "Unable to open source partition "
341                  << partition.partition_name() << " on slot "
342                  << BootControlInterface::SlotName(install_plan_->source_slot)
343                  << ", file " << source_path_;
344       return false;
345     }
346   }
347 
348   target_path_ = install_plan_->partitions[current_partition_].target_path;
349   int err;
350   target_fd_ = OpenFile(target_path_.c_str(), O_RDWR, &err);
351   if (!target_fd_) {
352     LOG(ERROR) << "Unable to open target partition "
353                << partition.partition_name() << " on slot "
354                << BootControlInterface::SlotName(install_plan_->target_slot)
355                << ", file " << target_path_;
356     return false;
357   }
358 
359   LOG(INFO) << "Applying " << partition.operations().size()
360             << " operations to partition \"" << partition.partition_name()
361             << "\"";
362 
363   // Discard the end of the partition, but ignore failures.
364   DiscardPartitionTail(
365       target_fd_, install_plan_->partitions[current_partition_].target_size);
366 
367   return true;
368 }
369 
370 namespace {
371 
LogPartitionInfoHash(const PartitionInfo & info,const string & tag)372 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
373   string sha256 = brillo::data_encoding::Base64Encode(info.hash());
374   LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256
375             << " size: " << info.size();
376 }
377 
LogPartitionInfo(const vector<PartitionUpdate> & partitions)378 void LogPartitionInfo(const vector<PartitionUpdate>& partitions) {
379   for (const PartitionUpdate& partition : partitions) {
380     LogPartitionInfoHash(partition.old_partition_info(),
381                          "old " + partition.partition_name());
382     LogPartitionInfoHash(partition.new_partition_info(),
383                          "new " + partition.partition_name());
384   }
385 }
386 
387 }  // namespace
388 
GetMetadataSignatureSizeOffset(uint64_t * out_offset) const389 bool DeltaPerformer::GetMetadataSignatureSizeOffset(
390     uint64_t* out_offset) const {
391   if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
392     *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
393     return true;
394   }
395   return false;
396 }
397 
GetManifestOffset(uint64_t * out_offset) const398 bool DeltaPerformer::GetManifestOffset(uint64_t* out_offset) const {
399   // Actual manifest begins right after the manifest size field or
400   // metadata signature size field if major version >= 2.
401   if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
402     *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
403     return true;
404   }
405   if (major_payload_version_ == kBrilloMajorPayloadVersion) {
406     *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
407                   kDeltaMetadataSignatureSizeSize;
408     return true;
409   }
410   LOG(ERROR) << "Unknown major payload version: " << major_payload_version_;
411   return false;
412 }
413 
GetMetadataSize() const414 uint64_t DeltaPerformer::GetMetadataSize() const {
415   return metadata_size_;
416 }
417 
GetMajorVersion() const418 uint64_t DeltaPerformer::GetMajorVersion() const {
419   return major_payload_version_;
420 }
421 
GetMinorVersion() const422 uint32_t DeltaPerformer::GetMinorVersion() const {
423   if (manifest_.has_minor_version()) {
424     return manifest_.minor_version();
425   } else {
426     return install_plan_->payload_type == InstallPayloadType::kDelta
427                ? kSupportedMinorPayloadVersion
428                : kFullPayloadMinorVersion;
429   }
430 }
431 
GetManifest(DeltaArchiveManifest * out_manifest_p) const432 bool DeltaPerformer::GetManifest(DeltaArchiveManifest* out_manifest_p) const {
433   if (!manifest_parsed_)
434     return false;
435   *out_manifest_p = manifest_;
436   return true;
437 }
438 
IsHeaderParsed() const439 bool DeltaPerformer::IsHeaderParsed() const {
440   return metadata_size_ != 0;
441 }
442 
ParsePayloadMetadata(const brillo::Blob & payload,ErrorCode * error)443 DeltaPerformer::MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
444     const brillo::Blob& payload, ErrorCode* error) {
445   *error = ErrorCode::kSuccess;
446   uint64_t manifest_offset;
447 
448   if (!IsHeaderParsed()) {
449     // Ensure we have data to cover the major payload version.
450     if (payload.size() < kDeltaManifestSizeOffset)
451       return kMetadataParseInsufficientData;
452 
453     // Validate the magic string.
454     if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
455       LOG(ERROR) << "Bad payload format -- invalid delta magic.";
456       *error = ErrorCode::kDownloadInvalidMetadataMagicString;
457       return kMetadataParseError;
458     }
459 
460     // Extract the payload version from the metadata.
461     static_assert(sizeof(major_payload_version_) == kDeltaVersionSize,
462                   "Major payload version size mismatch");
463     memcpy(&major_payload_version_,
464            &payload[kDeltaVersionOffset],
465            kDeltaVersionSize);
466     // switch big endian to host
467     major_payload_version_ = be64toh(major_payload_version_);
468 
469     if (major_payload_version_ != supported_major_version_ &&
470         major_payload_version_ != kChromeOSMajorPayloadVersion) {
471       LOG(ERROR) << "Bad payload format -- unsupported payload version: "
472           << major_payload_version_;
473       *error = ErrorCode::kUnsupportedMajorPayloadVersion;
474       return kMetadataParseError;
475     }
476 
477     // Get the manifest offset now that we have payload version.
478     if (!GetManifestOffset(&manifest_offset)) {
479       *error = ErrorCode::kUnsupportedMajorPayloadVersion;
480       return kMetadataParseError;
481     }
482     // Check again with the manifest offset.
483     if (payload.size() < manifest_offset)
484       return kMetadataParseInsufficientData;
485 
486     // Next, parse the manifest size.
487     static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize,
488                   "manifest_size size mismatch");
489     memcpy(&manifest_size_,
490            &payload[kDeltaManifestSizeOffset],
491            kDeltaManifestSizeSize);
492     manifest_size_ = be64toh(manifest_size_);  // switch big endian to host
493 
494     if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
495       // Parse the metadata signature size.
496       static_assert(sizeof(metadata_signature_size_) ==
497                     kDeltaMetadataSignatureSizeSize,
498                     "metadata_signature_size size mismatch");
499       uint64_t metadata_signature_size_offset;
500       if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) {
501         *error = ErrorCode::kError;
502         return kMetadataParseError;
503       }
504       memcpy(&metadata_signature_size_,
505              &payload[metadata_signature_size_offset],
506              kDeltaMetadataSignatureSizeSize);
507       metadata_signature_size_ = be32toh(metadata_signature_size_);
508     }
509 
510     // If the metadata size is present in install plan, check for it immediately
511     // even before waiting for that many number of bytes to be downloaded in the
512     // payload. This will prevent any attack which relies on us downloading data
513     // beyond the expected metadata size.
514     metadata_size_ = manifest_offset + manifest_size_;
515     if (install_plan_->hash_checks_mandatory) {
516       if (install_plan_->metadata_size != metadata_size_) {
517         LOG(ERROR) << "Mandatory metadata size in Omaha response ("
518                    << install_plan_->metadata_size
519                    << ") is missing/incorrect, actual = " << metadata_size_;
520         *error = ErrorCode::kDownloadInvalidMetadataSize;
521         return kMetadataParseError;
522       }
523     }
524   }
525 
526   // Now that we have validated the metadata size, we should wait for the full
527   // metadata and its signature (if exist) to be read in before we can parse it.
528   if (payload.size() < metadata_size_ + metadata_signature_size_)
529     return kMetadataParseInsufficientData;
530 
531   // Log whether we validated the size or simply trusting what's in the payload
532   // here. This is logged here (after we received the full metadata data) so
533   // that we just log once (instead of logging n times) if it takes n
534   // DeltaPerformer::Write calls to download the full manifest.
535   if (install_plan_->metadata_size == metadata_size_) {
536     LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
537   } else {
538     // For mandatory-cases, we'd have already returned a kMetadataParseError
539     // above. We'll be here only for non-mandatory cases. Just send a UMA stat.
540     LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
541                  << install_plan_->metadata_size
542                  << ") in Omaha response as validation is not mandatory. "
543                  << "Trusting metadata size in payload = " << metadata_size_;
544   }
545 
546   // We have the full metadata in |payload|. Verify its integrity
547   // and authenticity based on the information we have in Omaha response.
548   *error = ValidateMetadataSignature(payload);
549   if (*error != ErrorCode::kSuccess) {
550     if (install_plan_->hash_checks_mandatory) {
551       // The autoupdate_CatchBadSignatures test checks for this string
552       // in log-files. Keep in sync.
553       LOG(ERROR) << "Mandatory metadata signature validation failed";
554       return kMetadataParseError;
555     }
556 
557     // For non-mandatory cases, just send a UMA stat.
558     LOG(WARNING) << "Ignoring metadata signature validation failures";
559     *error = ErrorCode::kSuccess;
560   }
561 
562   if (!GetManifestOffset(&manifest_offset)) {
563     *error = ErrorCode::kUnsupportedMajorPayloadVersion;
564     return kMetadataParseError;
565   }
566   // The payload metadata is deemed valid, it's safe to parse the protobuf.
567   if (!manifest_.ParseFromArray(&payload[manifest_offset], manifest_size_)) {
568     LOG(ERROR) << "Unable to parse manifest in update file.";
569     *error = ErrorCode::kDownloadManifestParseError;
570     return kMetadataParseError;
571   }
572 
573   manifest_parsed_ = true;
574   return kMetadataParseSuccess;
575 }
576 
577 // Wrapper around write. Returns true if all requested bytes
578 // were written, or false on any error, regardless of progress
579 // and stores an action exit code in |error|.
Write(const void * bytes,size_t count,ErrorCode * error)580 bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode *error) {
581   *error = ErrorCode::kSuccess;
582 
583   const char* c_bytes = reinterpret_cast<const char*>(bytes);
584 
585   // Update the total byte downloaded count and the progress logs.
586   total_bytes_received_ += count;
587   UpdateOverallProgress(false, "Completed ");
588 
589   while (!manifest_valid_) {
590     // Read data up to the needed limit; this is either maximium payload header
591     // size, or the full metadata size (once it becomes known).
592     const bool do_read_header = !IsHeaderParsed();
593     CopyDataToBuffer(&c_bytes, &count,
594                      (do_read_header ? kMaxPayloadHeaderSize :
595                       metadata_size_ + metadata_signature_size_));
596 
597     MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
598     if (result == kMetadataParseError)
599       return false;
600     if (result == kMetadataParseInsufficientData) {
601       // If we just processed the header, make an attempt on the manifest.
602       if (do_read_header && IsHeaderParsed())
603         continue;
604 
605       return true;
606     }
607 
608     // Checks the integrity of the payload manifest.
609     if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
610       return false;
611     manifest_valid_ = true;
612 
613     // Clear the download buffer.
614     DiscardBuffer(false, metadata_size_);
615 
616     // This populates |partitions_| and the |install_plan.partitions| with the
617     // list of partitions from the manifest.
618     if (!ParseManifestPartitions(error))
619       return false;
620 
621     num_total_operations_ = 0;
622     for (const auto& partition : partitions_) {
623       num_total_operations_ += partition.operations_size();
624       acc_num_operations_.push_back(num_total_operations_);
625     }
626 
627     LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestMetadataSize,
628                                       metadata_size_))
629         << "Unable to save the manifest metadata size.";
630     LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestSignatureSize,
631                                       metadata_signature_size_))
632         << "Unable to save the manifest signature size.";
633 
634     if (!PrimeUpdateState()) {
635       *error = ErrorCode::kDownloadStateInitializationError;
636       LOG(ERROR) << "Unable to prime the update state.";
637       return false;
638     }
639 
640     if (!OpenCurrentPartition()) {
641       *error = ErrorCode::kInstallDeviceOpenError;
642       return false;
643     }
644 
645     if (next_operation_num_ > 0)
646       UpdateOverallProgress(true, "Resuming after ");
647     LOG(INFO) << "Starting to apply update payload operations";
648   }
649 
650   while (next_operation_num_ < num_total_operations_) {
651     // Check if we should cancel the current attempt for any reason.
652     // In this case, *error will have already been populated with the reason
653     // why we're canceling.
654     if (download_delegate_ && download_delegate_->ShouldCancel(error))
655       return false;
656 
657     // We know there are more operations to perform because we didn't reach the
658     // |num_total_operations_| limit yet.
659     while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
660       CloseCurrentPartition();
661       current_partition_++;
662       if (!OpenCurrentPartition()) {
663         *error = ErrorCode::kInstallDeviceOpenError;
664         return false;
665       }
666     }
667     const size_t partition_operation_num = next_operation_num_ - (
668         current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
669 
670     const InstallOperation& op =
671         partitions_[current_partition_].operations(partition_operation_num);
672 
673     CopyDataToBuffer(&c_bytes, &count, op.data_length());
674 
675     // Check whether we received all of the next operation's data payload.
676     if (!CanPerformInstallOperation(op))
677       return true;
678 
679     // Validate the operation only if the metadata signature is present.
680     // Otherwise, keep the old behavior. This serves as a knob to disable
681     // the validation logic in case we find some regression after rollout.
682     // NOTE: If hash checks are mandatory and if metadata_signature is empty,
683     // we would have already failed in ParsePayloadMetadata method and thus not
684     // even be here. So no need to handle that case again here.
685     if (!install_plan_->metadata_signature.empty()) {
686       // Note: Validate must be called only if CanPerformInstallOperation is
687       // called. Otherwise, we might be failing operations before even if there
688       // isn't sufficient data to compute the proper hash.
689       *error = ValidateOperationHash(op);
690       if (*error != ErrorCode::kSuccess) {
691         if (install_plan_->hash_checks_mandatory) {
692           LOG(ERROR) << "Mandatory operation hash check failed";
693           return false;
694         }
695 
696         // For non-mandatory cases, just send a UMA stat.
697         LOG(WARNING) << "Ignoring operation validation errors";
698         *error = ErrorCode::kSuccess;
699       }
700     }
701 
702     // Makes sure we unblock exit when this operation completes.
703     ScopedTerminatorExitUnblocker exit_unblocker =
704         ScopedTerminatorExitUnblocker();  // Avoids a compiler unused var bug.
705 
706     bool op_result;
707     switch (op.type()) {
708       case InstallOperation::REPLACE:
709       case InstallOperation::REPLACE_BZ:
710       case InstallOperation::REPLACE_XZ:
711         op_result = PerformReplaceOperation(op);
712         break;
713       case InstallOperation::ZERO:
714       case InstallOperation::DISCARD:
715         op_result = PerformZeroOrDiscardOperation(op);
716         break;
717       case InstallOperation::MOVE:
718         op_result = PerformMoveOperation(op);
719         break;
720       case InstallOperation::BSDIFF:
721         op_result = PerformBsdiffOperation(op);
722         break;
723       case InstallOperation::SOURCE_COPY:
724         op_result = PerformSourceCopyOperation(op, error);
725         break;
726       case InstallOperation::SOURCE_BSDIFF:
727         op_result = PerformSourceBsdiffOperation(op, error);
728         break;
729       default:
730        op_result = false;
731     }
732     if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error))
733       return false;
734 
735     next_operation_num_++;
736     UpdateOverallProgress(false, "Completed ");
737     CheckpointUpdateProgress();
738   }
739 
740   // In major version 2, we don't add dummy operation to the payload.
741   // If we already extracted the signature we should skip this step.
742   if (major_payload_version_ == kBrilloMajorPayloadVersion &&
743       manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
744       signatures_message_data_.empty()) {
745     if (manifest_.signatures_offset() != buffer_offset_) {
746       LOG(ERROR) << "Payload signatures offset points to blob offset "
747                  << manifest_.signatures_offset()
748                  << " but signatures are expected at offset "
749                  << buffer_offset_;
750       *error = ErrorCode::kDownloadPayloadVerificationError;
751       return false;
752     }
753     CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
754     // Needs more data to cover entire signature.
755     if (buffer_.size() < manifest_.signatures_size())
756       return true;
757     if (!ExtractSignatureMessage()) {
758       LOG(ERROR) << "Extract payload signature failed.";
759       *error = ErrorCode::kDownloadPayloadVerificationError;
760       return false;
761     }
762     DiscardBuffer(true, 0);
763     // Since we extracted the SignatureMessage we need to advance the
764     // checkpoint, otherwise we would reload the signature and try to extract
765     // it again.
766     CheckpointUpdateProgress();
767   }
768 
769   return true;
770 }
771 
IsManifestValid()772 bool DeltaPerformer::IsManifestValid() {
773   return manifest_valid_;
774 }
775 
ParseManifestPartitions(ErrorCode * error)776 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
777   if (major_payload_version_ == kBrilloMajorPayloadVersion) {
778     partitions_.clear();
779     for (const PartitionUpdate& partition : manifest_.partitions()) {
780       partitions_.push_back(partition);
781     }
782     manifest_.clear_partitions();
783   } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
784     LOG(INFO) << "Converting update information from old format.";
785     PartitionUpdate root_part;
786     root_part.set_partition_name(kLegacyPartitionNameRoot);
787 #ifdef __ANDROID__
788     LOG(WARNING) << "Legacy payload major version provided to an Android "
789                     "build. Assuming no post-install. Please use major version "
790                     "2 or newer.";
791     root_part.set_run_postinstall(false);
792 #else
793     root_part.set_run_postinstall(true);
794 #endif  // __ANDROID__
795     if (manifest_.has_old_rootfs_info()) {
796       *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info();
797       manifest_.clear_old_rootfs_info();
798     }
799     if (manifest_.has_new_rootfs_info()) {
800       *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info();
801       manifest_.clear_new_rootfs_info();
802     }
803     *root_part.mutable_operations() = manifest_.install_operations();
804     manifest_.clear_install_operations();
805     partitions_.push_back(std::move(root_part));
806 
807     PartitionUpdate kern_part;
808     kern_part.set_partition_name(kLegacyPartitionNameKernel);
809     kern_part.set_run_postinstall(false);
810     if (manifest_.has_old_kernel_info()) {
811       *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info();
812       manifest_.clear_old_kernel_info();
813     }
814     if (manifest_.has_new_kernel_info()) {
815       *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info();
816       manifest_.clear_new_kernel_info();
817     }
818     *kern_part.mutable_operations() = manifest_.kernel_install_operations();
819     manifest_.clear_kernel_install_operations();
820     partitions_.push_back(std::move(kern_part));
821   }
822 
823   // TODO(deymo): Remove this block of code once we switched to optional
824   // source partition verification. This list of partitions in the InstallPlan
825   // is initialized with the expected hashes in the payload major version 1,
826   // so we need to check those now if already set. See b/23182225.
827   if (!install_plan_->partitions.empty()) {
828     if (!VerifySourcePartitions()) {
829       *error = ErrorCode::kDownloadStateInitializationError;
830       return false;
831     }
832   }
833 
834   // Fill in the InstallPlan::partitions based on the partitions from the
835   // payload.
836   install_plan_->partitions.clear();
837   for (const auto& partition : partitions_) {
838     InstallPlan::Partition install_part;
839     install_part.name = partition.partition_name();
840     install_part.run_postinstall =
841         partition.has_run_postinstall() && partition.run_postinstall();
842     if (install_part.run_postinstall) {
843       install_part.postinstall_path =
844           (partition.has_postinstall_path() ? partition.postinstall_path()
845                                             : kPostinstallDefaultScript);
846       install_part.filesystem_type = partition.filesystem_type();
847       install_part.postinstall_optional = partition.postinstall_optional();
848     }
849 
850     if (partition.has_old_partition_info()) {
851       const PartitionInfo& info = partition.old_partition_info();
852       install_part.source_size = info.size();
853       install_part.source_hash.assign(info.hash().begin(), info.hash().end());
854     }
855 
856     if (!partition.has_new_partition_info()) {
857       LOG(ERROR) << "Unable to get new partition hash info on partition "
858                  << install_part.name << ".";
859       *error = ErrorCode::kDownloadNewPartitionInfoError;
860       return false;
861     }
862     const PartitionInfo& info = partition.new_partition_info();
863     install_part.target_size = info.size();
864     install_part.target_hash.assign(info.hash().begin(), info.hash().end());
865 
866     install_plan_->partitions.push_back(install_part);
867   }
868 
869   if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
870     LOG(ERROR) << "Unable to determine all the partition devices.";
871     *error = ErrorCode::kInstallDeviceOpenError;
872     return false;
873   }
874   LogPartitionInfo(partitions_);
875   return true;
876 }
877 
CanPerformInstallOperation(const chromeos_update_engine::InstallOperation & operation)878 bool DeltaPerformer::CanPerformInstallOperation(
879     const chromeos_update_engine::InstallOperation& operation) {
880   // If we don't have a data blob we can apply it right away.
881   if (!operation.has_data_offset() && !operation.has_data_length())
882     return true;
883 
884   // See if we have the entire data blob in the buffer
885   if (operation.data_offset() < buffer_offset_) {
886     LOG(ERROR) << "we threw away data it seems?";
887     return false;
888   }
889 
890   return (operation.data_offset() + operation.data_length() <=
891           buffer_offset_ + buffer_.size());
892 }
893 
PerformReplaceOperation(const InstallOperation & operation)894 bool DeltaPerformer::PerformReplaceOperation(
895     const InstallOperation& operation) {
896   CHECK(operation.type() == InstallOperation::REPLACE ||
897         operation.type() == InstallOperation::REPLACE_BZ ||
898         operation.type() == InstallOperation::REPLACE_XZ);
899 
900   // Since we delete data off the beginning of the buffer as we use it,
901   // the data we need should be exactly at the beginning of the buffer.
902   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
903   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
904 
905   // Extract the signature message if it's in this operation.
906   if (ExtractSignatureMessageFromOperation(operation)) {
907     // If this is dummy replace operation, we ignore it after extracting the
908     // signature.
909     DiscardBuffer(true, 0);
910     return true;
911   }
912 
913   // Setup the ExtentWriter stack based on the operation type.
914   std::unique_ptr<ExtentWriter> writer =
915     brillo::make_unique_ptr(new ZeroPadExtentWriter(
916       brillo::make_unique_ptr(new DirectExtentWriter())));
917 
918   if (operation.type() == InstallOperation::REPLACE_BZ) {
919     writer.reset(new BzipExtentWriter(std::move(writer)));
920   } else if (operation.type() == InstallOperation::REPLACE_XZ) {
921     writer.reset(new XzExtentWriter(std::move(writer)));
922   }
923 
924   // Create a vector of extents to pass to the ExtentWriter.
925   vector<Extent> extents;
926   for (int i = 0; i < operation.dst_extents_size(); i++) {
927     extents.push_back(operation.dst_extents(i));
928   }
929 
930   TEST_AND_RETURN_FALSE(writer->Init(target_fd_, extents, block_size_));
931   TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
932   TEST_AND_RETURN_FALSE(writer->End());
933 
934   // Update buffer
935   DiscardBuffer(true, buffer_.size());
936   return true;
937 }
938 
PerformZeroOrDiscardOperation(const InstallOperation & operation)939 bool DeltaPerformer::PerformZeroOrDiscardOperation(
940     const InstallOperation& operation) {
941   CHECK(operation.type() == InstallOperation::DISCARD ||
942         operation.type() == InstallOperation::ZERO);
943 
944   // These operations have no blob.
945   TEST_AND_RETURN_FALSE(!operation.has_data_offset());
946   TEST_AND_RETURN_FALSE(!operation.has_data_length());
947 
948 #ifdef BLKZEROOUT
949   bool attempt_ioctl = true;
950   int request =
951       (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
952 #else  // !defined(BLKZEROOUT)
953   bool attempt_ioctl = false;
954   int request = 0;
955 #endif  // !defined(BLKZEROOUT)
956 
957   brillo::Blob zeros;
958   for (int i = 0; i < operation.dst_extents_size(); i++) {
959     Extent extent = operation.dst_extents(i);
960     const uint64_t start = extent.start_block() * block_size_;
961     const uint64_t length = extent.num_blocks() * block_size_;
962     if (attempt_ioctl) {
963       int result = 0;
964       if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0)
965         continue;
966       attempt_ioctl = false;
967       zeros.resize(16 * block_size_);
968     }
969     // In case of failure, we fall back to writing 0 to the selected region.
970     for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
971       uint64_t chunk_length = min(length - offset,
972                                   static_cast<uint64_t>(zeros.size()));
973       TEST_AND_RETURN_FALSE(
974           utils::PWriteAll(target_fd_, zeros.data(), chunk_length, start + offset));
975     }
976   }
977   return true;
978 }
979 
PerformMoveOperation(const InstallOperation & operation)980 bool DeltaPerformer::PerformMoveOperation(const InstallOperation& operation) {
981   // Calculate buffer size. Note, this function doesn't do a sliding
982   // window to copy in case the source and destination blocks overlap.
983   // If we wanted to do a sliding window, we could program the server
984   // to generate deltas that effectively did a sliding window.
985 
986   uint64_t blocks_to_read = 0;
987   for (int i = 0; i < operation.src_extents_size(); i++)
988     blocks_to_read += operation.src_extents(i).num_blocks();
989 
990   uint64_t blocks_to_write = 0;
991   for (int i = 0; i < operation.dst_extents_size(); i++)
992     blocks_to_write += operation.dst_extents(i).num_blocks();
993 
994   DCHECK_EQ(blocks_to_write, blocks_to_read);
995   brillo::Blob buf(blocks_to_write * block_size_);
996 
997   // Read in bytes.
998   ssize_t bytes_read = 0;
999   for (int i = 0; i < operation.src_extents_size(); i++) {
1000     ssize_t bytes_read_this_iteration = 0;
1001     const Extent& extent = operation.src_extents(i);
1002     const size_t bytes = extent.num_blocks() * block_size_;
1003     TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
1004     TEST_AND_RETURN_FALSE(utils::PReadAll(target_fd_,
1005                                           &buf[bytes_read],
1006                                           bytes,
1007                                           extent.start_block() * block_size_,
1008                                           &bytes_read_this_iteration));
1009     TEST_AND_RETURN_FALSE(
1010         bytes_read_this_iteration == static_cast<ssize_t>(bytes));
1011     bytes_read += bytes_read_this_iteration;
1012   }
1013 
1014   // Write bytes out.
1015   ssize_t bytes_written = 0;
1016   for (int i = 0; i < operation.dst_extents_size(); i++) {
1017     const Extent& extent = operation.dst_extents(i);
1018     const size_t bytes = extent.num_blocks() * block_size_;
1019     TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
1020     TEST_AND_RETURN_FALSE(utils::PWriteAll(target_fd_,
1021                                            &buf[bytes_written],
1022                                            bytes,
1023                                            extent.start_block() * block_size_));
1024     bytes_written += bytes;
1025   }
1026   DCHECK_EQ(bytes_written, bytes_read);
1027   DCHECK_EQ(bytes_written, static_cast<ssize_t>(buf.size()));
1028   return true;
1029 }
1030 
1031 namespace {
1032 
1033 // Takes |extents| and fills an empty vector |blocks| with a block index for
1034 // each block in |extents|. For example, [(3, 2), (8, 1)] would give [3, 4, 8].
ExtentsToBlocks(const RepeatedPtrField<Extent> & extents,vector<uint64_t> * blocks)1035 void ExtentsToBlocks(const RepeatedPtrField<Extent>& extents,
1036                      vector<uint64_t>* blocks) {
1037   for (Extent ext : extents) {
1038     for (uint64_t j = 0; j < ext.num_blocks(); j++)
1039       blocks->push_back(ext.start_block() + j);
1040   }
1041 }
1042 
1043 // Takes |extents| and returns the number of blocks in those extents.
GetBlockCount(const RepeatedPtrField<Extent> & extents)1044 uint64_t GetBlockCount(const RepeatedPtrField<Extent>& extents) {
1045   uint64_t sum = 0;
1046   for (Extent ext : extents) {
1047     sum += ext.num_blocks();
1048   }
1049   return sum;
1050 }
1051 
1052 // Compare |calculated_hash| with source hash in |operation|, return false and
1053 // dump hash and set |error| if don't match.
ValidateSourceHash(const brillo::Blob & calculated_hash,const InstallOperation & operation,ErrorCode * error)1054 bool ValidateSourceHash(const brillo::Blob& calculated_hash,
1055                         const InstallOperation& operation,
1056                         ErrorCode* error) {
1057   brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
1058                                     operation.src_sha256_hash().end());
1059   if (calculated_hash != expected_source_hash) {
1060     LOG(ERROR) << "The hash of the source data on disk for this operation "
1061                << "doesn't match the expected value. This could mean that the "
1062                << "delta update payload was targeted for another version, or "
1063                << "that the source partition was modified after it was "
1064                << "installed, for example, by mounting a filesystem.";
1065     LOG(ERROR) << "Expected:   sha256|hex = "
1066                << base::HexEncode(expected_source_hash.data(),
1067                                   expected_source_hash.size());
1068     LOG(ERROR) << "Calculated: sha256|hex = "
1069                << base::HexEncode(calculated_hash.data(),
1070                                   calculated_hash.size());
1071 
1072     vector<string> source_extents;
1073     for (const Extent& ext : operation.src_extents()) {
1074       source_extents.push_back(base::StringPrintf(
1075           "%" PRIu64 ":%" PRIu64, ext.start_block(), ext.num_blocks()));
1076     }
1077     LOG(ERROR) << "Operation source (offset:size) in blocks: "
1078                << base::JoinString(source_extents, ",");
1079 
1080     *error = ErrorCode::kDownloadStateInitializationError;
1081     return false;
1082   }
1083   return true;
1084 }
1085 
1086 }  // namespace
1087 
PerformSourceCopyOperation(const InstallOperation & operation,ErrorCode * error)1088 bool DeltaPerformer::PerformSourceCopyOperation(
1089     const InstallOperation& operation, ErrorCode* error) {
1090   if (operation.has_src_length())
1091     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1092   if (operation.has_dst_length())
1093     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1094 
1095   uint64_t blocks_to_read = GetBlockCount(operation.src_extents());
1096   uint64_t blocks_to_write = GetBlockCount(operation.dst_extents());
1097   TEST_AND_RETURN_FALSE(blocks_to_write ==  blocks_to_read);
1098 
1099   // Create vectors of all the individual src/dst blocks.
1100   vector<uint64_t> src_blocks;
1101   vector<uint64_t> dst_blocks;
1102   ExtentsToBlocks(operation.src_extents(), &src_blocks);
1103   ExtentsToBlocks(operation.dst_extents(), &dst_blocks);
1104   DCHECK_EQ(src_blocks.size(), blocks_to_read);
1105   DCHECK_EQ(src_blocks.size(), dst_blocks.size());
1106 
1107   brillo::Blob buf(block_size_);
1108   ssize_t bytes_read = 0;
1109   HashCalculator source_hasher;
1110   // Read/write one block at a time.
1111   for (uint64_t i = 0; i < blocks_to_read; i++) {
1112     ssize_t bytes_read_this_iteration = 0;
1113     uint64_t src_block = src_blocks[i];
1114     uint64_t dst_block = dst_blocks[i];
1115 
1116     // Read in bytes.
1117     TEST_AND_RETURN_FALSE(
1118         utils::PReadAll(source_fd_,
1119                         buf.data(),
1120                         block_size_,
1121                         src_block * block_size_,
1122                         &bytes_read_this_iteration));
1123 
1124     // Write bytes out.
1125     TEST_AND_RETURN_FALSE(
1126         utils::PWriteAll(target_fd_,
1127                          buf.data(),
1128                          block_size_,
1129                          dst_block * block_size_));
1130 
1131     bytes_read += bytes_read_this_iteration;
1132     TEST_AND_RETURN_FALSE(bytes_read_this_iteration ==
1133                           static_cast<ssize_t>(block_size_));
1134 
1135     if (operation.has_src_sha256_hash())
1136       TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), buf.size()));
1137   }
1138 
1139   if (operation.has_src_sha256_hash()) {
1140     TEST_AND_RETURN_FALSE(source_hasher.Finalize());
1141     TEST_AND_RETURN_FALSE(
1142         ValidateSourceHash(source_hasher.raw_hash(), operation, error));
1143   }
1144 
1145   DCHECK_EQ(bytes_read, static_cast<ssize_t>(blocks_to_read * block_size_));
1146   return true;
1147 }
1148 
ExtentsToBsdiffPositionsString(const RepeatedPtrField<Extent> & extents,uint64_t block_size,uint64_t full_length,string * positions_string)1149 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
1150     const RepeatedPtrField<Extent>& extents,
1151     uint64_t block_size,
1152     uint64_t full_length,
1153     string* positions_string) {
1154   string ret;
1155   uint64_t length = 0;
1156   for (int i = 0; i < extents.size(); i++) {
1157     Extent extent = extents.Get(i);
1158     int64_t start = extent.start_block() * block_size;
1159     uint64_t this_length = min(full_length - length,
1160                                extent.num_blocks() * block_size);
1161     ret += base::StringPrintf("%" PRIi64 ":%" PRIu64 ",", start, this_length);
1162     length += this_length;
1163   }
1164   TEST_AND_RETURN_FALSE(length == full_length);
1165   if (!ret.empty())
1166     ret.resize(ret.size() - 1);  // Strip trailing comma off
1167   *positions_string = ret;
1168   return true;
1169 }
1170 
PerformBsdiffOperation(const InstallOperation & operation)1171 bool DeltaPerformer::PerformBsdiffOperation(const InstallOperation& operation) {
1172   // Since we delete data off the beginning of the buffer as we use it,
1173   // the data we need should be exactly at the beginning of the buffer.
1174   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1175   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1176 
1177   string input_positions;
1178   TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
1179                                                        block_size_,
1180                                                        operation.src_length(),
1181                                                        &input_positions));
1182   string output_positions;
1183   TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
1184                                                        block_size_,
1185                                                        operation.dst_length(),
1186                                                        &output_positions));
1187 
1188   string temp_filename;
1189   TEST_AND_RETURN_FALSE(utils::MakeTempFile("au_patch.XXXXXX",
1190                                             &temp_filename,
1191                                             nullptr));
1192   ScopedPathUnlinker path_unlinker(temp_filename);
1193   {
1194     int fd = open(temp_filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644);
1195     ScopedFdCloser fd_closer(&fd);
1196     TEST_AND_RETURN_FALSE(
1197         utils::WriteAll(fd, buffer_.data(), operation.data_length()));
1198   }
1199 
1200   // Update the buffer to release the patch data memory as soon as the patch
1201   // file is written out.
1202   DiscardBuffer(true, buffer_.size());
1203 
1204   vector<string> cmd{kBspatchPath, target_path_, target_path_, temp_filename,
1205                      input_positions, output_positions};
1206 
1207   int return_code = 0;
1208   TEST_AND_RETURN_FALSE(
1209       Subprocess::SynchronousExecFlags(cmd, Subprocess::kSearchPath,
1210                                        &return_code, nullptr));
1211   TEST_AND_RETURN_FALSE(return_code == 0);
1212 
1213   if (operation.dst_length() % block_size_) {
1214     // Zero out rest of final block.
1215     // TODO(adlr): build this into bspatch; it's more efficient that way.
1216     const Extent& last_extent =
1217         operation.dst_extents(operation.dst_extents_size() - 1);
1218     const uint64_t end_byte =
1219         (last_extent.start_block() + last_extent.num_blocks()) * block_size_;
1220     const uint64_t begin_byte =
1221         end_byte - (block_size_ - operation.dst_length() % block_size_);
1222     brillo::Blob zeros(end_byte - begin_byte);
1223     TEST_AND_RETURN_FALSE(
1224         utils::PWriteAll(target_fd_, zeros.data(), end_byte - begin_byte, begin_byte));
1225   }
1226   return true;
1227 }
1228 
PerformSourceBsdiffOperation(const InstallOperation & operation,ErrorCode * error)1229 bool DeltaPerformer::PerformSourceBsdiffOperation(
1230     const InstallOperation& operation, ErrorCode* error) {
1231   // Since we delete data off the beginning of the buffer as we use it,
1232   // the data we need should be exactly at the beginning of the buffer.
1233   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1234   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1235   if (operation.has_src_length())
1236     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1237   if (operation.has_dst_length())
1238     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1239 
1240   if (operation.has_src_sha256_hash()) {
1241     HashCalculator source_hasher;
1242     const uint64_t kMaxBlocksToRead = 512;  // 2MB if block size is 4KB
1243     brillo::Blob buf(kMaxBlocksToRead * block_size_);
1244     for (const Extent& extent : operation.src_extents()) {
1245       for (uint64_t i = 0; i < extent.num_blocks(); i += kMaxBlocksToRead) {
1246         uint64_t blocks_to_read =
1247             min(kMaxBlocksToRead, extent.num_blocks() - i);
1248         ssize_t bytes_to_read = blocks_to_read * block_size_;
1249         ssize_t bytes_read_this_iteration = 0;
1250         TEST_AND_RETURN_FALSE(
1251             utils::PReadAll(source_fd_, buf.data(), bytes_to_read,
1252                             (extent.start_block() + i) * block_size_,
1253                             &bytes_read_this_iteration));
1254         TEST_AND_RETURN_FALSE(bytes_read_this_iteration == bytes_to_read);
1255         TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), bytes_to_read));
1256       }
1257     }
1258     TEST_AND_RETURN_FALSE(source_hasher.Finalize());
1259     TEST_AND_RETURN_FALSE(
1260         ValidateSourceHash(source_hasher.raw_hash(), operation, error));
1261   }
1262 
1263   string input_positions;
1264   TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
1265                                                        block_size_,
1266                                                        operation.src_length(),
1267                                                        &input_positions));
1268   string output_positions;
1269   TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
1270                                                        block_size_,
1271                                                        operation.dst_length(),
1272                                                        &output_positions));
1273 
1274   string temp_filename;
1275   TEST_AND_RETURN_FALSE(utils::MakeTempFile("au_patch.XXXXXX",
1276                                             &temp_filename,
1277                                             nullptr));
1278   ScopedPathUnlinker path_unlinker(temp_filename);
1279   {
1280     int fd = open(temp_filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644);
1281     ScopedFdCloser fd_closer(&fd);
1282     TEST_AND_RETURN_FALSE(
1283         utils::WriteAll(fd, buffer_.data(), operation.data_length()));
1284   }
1285 
1286   // Update the buffer to release the patch data memory as soon as the patch
1287   // file is written out.
1288   DiscardBuffer(true, buffer_.size());
1289 
1290   vector<string> cmd{kBspatchPath, source_path_, target_path_, temp_filename,
1291                      input_positions, output_positions};
1292 
1293   int return_code = 0;
1294   TEST_AND_RETURN_FALSE(
1295       Subprocess::SynchronousExecFlags(cmd, Subprocess::kSearchPath,
1296                                        &return_code, nullptr));
1297   TEST_AND_RETURN_FALSE(return_code == 0);
1298   return true;
1299 }
1300 
ExtractSignatureMessageFromOperation(const InstallOperation & operation)1301 bool DeltaPerformer::ExtractSignatureMessageFromOperation(
1302     const InstallOperation& operation) {
1303   if (operation.type() != InstallOperation::REPLACE ||
1304       !manifest_.has_signatures_offset() ||
1305       manifest_.signatures_offset() != operation.data_offset()) {
1306     return false;
1307   }
1308   TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() &&
1309                         manifest_.signatures_size() == operation.data_length());
1310   TEST_AND_RETURN_FALSE(ExtractSignatureMessage());
1311   return true;
1312 }
1313 
ExtractSignatureMessage()1314 bool DeltaPerformer::ExtractSignatureMessage() {
1315   TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
1316   TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
1317   TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
1318   signatures_message_data_.assign(
1319       buffer_.begin(),
1320       buffer_.begin() + manifest_.signatures_size());
1321 
1322   // Save the signature blob because if the update is interrupted after the
1323   // download phase we don't go through this path anymore. Some alternatives to
1324   // consider:
1325   //
1326   // 1. On resume, re-download the signature blob from the server and re-verify
1327   // it.
1328   //
1329   // 2. Verify the signature as soon as it's received and don't checkpoint the
1330   // blob and the signed sha-256 context.
1331   LOG_IF(WARNING, !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
1332                                      string(signatures_message_data_.begin(),
1333                                             signatures_message_data_.end())))
1334       << "Unable to store the signature blob.";
1335 
1336   LOG(INFO) << "Extracted signature data of size "
1337             << manifest_.signatures_size() << " at "
1338             << manifest_.signatures_offset();
1339   return true;
1340 }
1341 
GetPublicKeyFromResponse(base::FilePath * out_tmp_key)1342 bool DeltaPerformer::GetPublicKeyFromResponse(base::FilePath *out_tmp_key) {
1343   if (hardware_->IsOfficialBuild() ||
1344       utils::FileExists(public_key_path_.c_str()) ||
1345       install_plan_->public_key_rsa.empty())
1346     return false;
1347 
1348   if (!utils::DecodeAndStoreBase64String(install_plan_->public_key_rsa,
1349                                          out_tmp_key))
1350     return false;
1351 
1352   return true;
1353 }
1354 
ValidateMetadataSignature(const brillo::Blob & payload)1355 ErrorCode DeltaPerformer::ValidateMetadataSignature(
1356     const brillo::Blob& payload) {
1357   if (payload.size() < metadata_size_ + metadata_signature_size_)
1358     return ErrorCode::kDownloadMetadataSignatureError;
1359 
1360   brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
1361   if (!install_plan_->metadata_signature.empty()) {
1362     // Convert base64-encoded signature to raw bytes.
1363     if (!brillo::data_encoding::Base64Decode(
1364         install_plan_->metadata_signature, &metadata_signature_blob)) {
1365       LOG(ERROR) << "Unable to decode base64 metadata signature: "
1366                  << install_plan_->metadata_signature;
1367       return ErrorCode::kDownloadMetadataSignatureError;
1368     }
1369   } else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
1370     metadata_signature_protobuf_blob.assign(payload.begin() + metadata_size_,
1371                                             payload.begin() + metadata_size_ +
1372                                             metadata_signature_size_);
1373   }
1374 
1375   if (metadata_signature_blob.empty() &&
1376       metadata_signature_protobuf_blob.empty()) {
1377     if (install_plan_->hash_checks_mandatory) {
1378       LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
1379                  << "response and payload.";
1380       return ErrorCode::kDownloadMetadataSignatureMissingError;
1381     }
1382 
1383     LOG(WARNING) << "Cannot validate metadata as the signature is empty";
1384     return ErrorCode::kSuccess;
1385   }
1386 
1387   // See if we should use the public RSA key in the Omaha response.
1388   base::FilePath path_to_public_key(public_key_path_);
1389   base::FilePath tmp_key;
1390   if (GetPublicKeyFromResponse(&tmp_key))
1391     path_to_public_key = tmp_key;
1392   ScopedPathUnlinker tmp_key_remover(tmp_key.value());
1393   if (tmp_key.empty())
1394     tmp_key_remover.set_should_remove(false);
1395 
1396   LOG(INFO) << "Verifying metadata hash signature using public key: "
1397             << path_to_public_key.value();
1398 
1399   HashCalculator metadata_hasher;
1400   metadata_hasher.Update(payload.data(), metadata_size_);
1401   if (!metadata_hasher.Finalize()) {
1402     LOG(ERROR) << "Unable to compute actual hash of manifest";
1403     return ErrorCode::kDownloadMetadataSignatureVerificationError;
1404   }
1405 
1406   brillo::Blob calculated_metadata_hash = metadata_hasher.raw_hash();
1407   PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash);
1408   if (calculated_metadata_hash.empty()) {
1409     LOG(ERROR) << "Computed actual hash of metadata is empty.";
1410     return ErrorCode::kDownloadMetadataSignatureVerificationError;
1411   }
1412 
1413   if (!metadata_signature_blob.empty()) {
1414     brillo::Blob expected_metadata_hash;
1415     if (!PayloadVerifier::GetRawHashFromSignature(metadata_signature_blob,
1416                                                   path_to_public_key.value(),
1417                                                   &expected_metadata_hash)) {
1418       LOG(ERROR) << "Unable to compute expected hash from metadata signature";
1419       return ErrorCode::kDownloadMetadataSignatureError;
1420     }
1421     if (calculated_metadata_hash != expected_metadata_hash) {
1422       LOG(ERROR) << "Manifest hash verification failed. Expected hash = ";
1423       utils::HexDumpVector(expected_metadata_hash);
1424       LOG(ERROR) << "Calculated hash = ";
1425       utils::HexDumpVector(calculated_metadata_hash);
1426       return ErrorCode::kDownloadMetadataSignatureMismatch;
1427     }
1428   } else {
1429     if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
1430                                           path_to_public_key.value(),
1431                                           calculated_metadata_hash)) {
1432       LOG(ERROR) << "Manifest hash verification failed.";
1433       return ErrorCode::kDownloadMetadataSignatureMismatch;
1434     }
1435   }
1436 
1437   // The autoupdate_CatchBadSignatures test checks for this string in
1438   // log-files. Keep in sync.
1439   LOG(INFO) << "Metadata hash signature matches value in Omaha response.";
1440   return ErrorCode::kSuccess;
1441 }
1442 
ValidateManifest()1443 ErrorCode DeltaPerformer::ValidateManifest() {
1444   // Perform assorted checks to sanity check the manifest, make sure it
1445   // matches data from other sources, and that it is a supported version.
1446 
1447   bool has_old_fields =
1448       (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info());
1449   for (const PartitionUpdate& partition : manifest_.partitions()) {
1450     has_old_fields = has_old_fields || partition.has_old_partition_info();
1451   }
1452 
1453   // The presence of an old partition hash is the sole indicator for a delta
1454   // update.
1455   InstallPayloadType actual_payload_type =
1456       has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
1457 
1458   if (install_plan_->payload_type == InstallPayloadType::kUnknown) {
1459     LOG(INFO) << "Detected a '"
1460               << InstallPayloadTypeToString(actual_payload_type)
1461               << "' payload.";
1462     install_plan_->payload_type = actual_payload_type;
1463   } else if (install_plan_->payload_type != actual_payload_type) {
1464     LOG(ERROR) << "InstallPlan expected a '"
1465                << InstallPayloadTypeToString(install_plan_->payload_type)
1466                << "' payload but the downloaded manifest contains a '"
1467                << InstallPayloadTypeToString(actual_payload_type)
1468                << "' payload.";
1469     return ErrorCode::kPayloadMismatchedType;
1470   }
1471 
1472   // Check that the minor version is compatible.
1473   if (actual_payload_type == InstallPayloadType::kFull) {
1474     if (manifest_.minor_version() != kFullPayloadMinorVersion) {
1475       LOG(ERROR) << "Manifest contains minor version "
1476                  << manifest_.minor_version()
1477                  << ", but all full payloads should have version "
1478                  << kFullPayloadMinorVersion << ".";
1479       return ErrorCode::kUnsupportedMinorPayloadVersion;
1480     }
1481   } else {
1482     if (manifest_.minor_version() != supported_minor_version_) {
1483       LOG(ERROR) << "Manifest contains minor version "
1484                  << manifest_.minor_version()
1485                  << " not the supported "
1486                  << supported_minor_version_;
1487       return ErrorCode::kUnsupportedMinorPayloadVersion;
1488     }
1489   }
1490 
1491   if (major_payload_version_ != kChromeOSMajorPayloadVersion) {
1492     if (manifest_.has_old_rootfs_info() ||
1493         manifest_.has_new_rootfs_info() ||
1494         manifest_.has_old_kernel_info() ||
1495         manifest_.has_new_kernel_info() ||
1496         manifest_.install_operations_size() != 0 ||
1497         manifest_.kernel_install_operations_size() != 0) {
1498       LOG(ERROR) << "Manifest contains deprecated field only supported in "
1499                  << "major payload version 1, but the payload major version is "
1500                  << major_payload_version_;
1501       return ErrorCode::kPayloadMismatchedType;
1502     }
1503   }
1504 
1505   // TODO(garnold) we should be adding more and more manifest checks, such as
1506   // partition boundaries etc (see chromium-os:37661).
1507 
1508   return ErrorCode::kSuccess;
1509 }
1510 
ValidateOperationHash(const InstallOperation & operation)1511 ErrorCode DeltaPerformer::ValidateOperationHash(
1512     const InstallOperation& operation) {
1513   if (!operation.data_sha256_hash().size()) {
1514     if (!operation.data_length()) {
1515       // Operations that do not have any data blob won't have any operation hash
1516       // either. So, these operations are always considered validated since the
1517       // metadata that contains all the non-data-blob portions of the operation
1518       // has already been validated. This is true for both HTTP and HTTPS cases.
1519       return ErrorCode::kSuccess;
1520     }
1521 
1522     // No hash is present for an operation that has data blobs. This shouldn't
1523     // happen normally for any client that has this code, because the
1524     // corresponding update should have been produced with the operation
1525     // hashes. So if it happens it means either we've turned operation hash
1526     // generation off in DeltaDiffGenerator or it's a regression of some sort.
1527     // One caveat though: The last operation is a dummy signature operation
1528     // that doesn't have a hash at the time the manifest is created. So we
1529     // should not complaint about that operation. This operation can be
1530     // recognized by the fact that it's offset is mentioned in the manifest.
1531     if (manifest_.signatures_offset() &&
1532         manifest_.signatures_offset() == operation.data_offset()) {
1533       LOG(INFO) << "Skipping hash verification for signature operation "
1534                 << next_operation_num_ + 1;
1535     } else {
1536       if (install_plan_->hash_checks_mandatory) {
1537         LOG(ERROR) << "Missing mandatory operation hash for operation "
1538                    << next_operation_num_ + 1;
1539         return ErrorCode::kDownloadOperationHashMissingError;
1540       }
1541 
1542       LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1
1543                    << " as there's no operation hash in manifest";
1544     }
1545     return ErrorCode::kSuccess;
1546   }
1547 
1548   brillo::Blob expected_op_hash;
1549   expected_op_hash.assign(operation.data_sha256_hash().data(),
1550                           (operation.data_sha256_hash().data() +
1551                            operation.data_sha256_hash().size()));
1552 
1553   HashCalculator operation_hasher;
1554   operation_hasher.Update(buffer_.data(), operation.data_length());
1555   if (!operation_hasher.Finalize()) {
1556     LOG(ERROR) << "Unable to compute actual hash of operation "
1557                << next_operation_num_;
1558     return ErrorCode::kDownloadOperationHashVerificationError;
1559   }
1560 
1561   brillo::Blob calculated_op_hash = operation_hasher.raw_hash();
1562   if (calculated_op_hash != expected_op_hash) {
1563     LOG(ERROR) << "Hash verification failed for operation "
1564                << next_operation_num_ << ". Expected hash = ";
1565     utils::HexDumpVector(expected_op_hash);
1566     LOG(ERROR) << "Calculated hash over " << operation.data_length()
1567                << " bytes at offset: " << operation.data_offset() << " = ";
1568     utils::HexDumpVector(calculated_op_hash);
1569     return ErrorCode::kDownloadOperationHashMismatch;
1570   }
1571 
1572   return ErrorCode::kSuccess;
1573 }
1574 
1575 #define TEST_AND_RETURN_VAL(_retval, _condition)                \
1576   do {                                                          \
1577     if (!(_condition)) {                                        \
1578       LOG(ERROR) << "VerifyPayload failure: " << #_condition;   \
1579       return _retval;                                           \
1580     }                                                           \
1581   } while (0);
1582 
VerifyPayload(const string & update_check_response_hash,const uint64_t update_check_response_size)1583 ErrorCode DeltaPerformer::VerifyPayload(
1584     const string& update_check_response_hash,
1585     const uint64_t update_check_response_size) {
1586 
1587   // See if we should use the public RSA key in the Omaha response.
1588   base::FilePath path_to_public_key(public_key_path_);
1589   base::FilePath tmp_key;
1590   if (GetPublicKeyFromResponse(&tmp_key))
1591     path_to_public_key = tmp_key;
1592   ScopedPathUnlinker tmp_key_remover(tmp_key.value());
1593   if (tmp_key.empty())
1594     tmp_key_remover.set_should_remove(false);
1595 
1596   LOG(INFO) << "Verifying payload using public key: "
1597             << path_to_public_key.value();
1598 
1599   // Verifies the download size.
1600   TEST_AND_RETURN_VAL(ErrorCode::kPayloadSizeMismatchError,
1601                       update_check_response_size ==
1602                       metadata_size_ + metadata_signature_size_ +
1603                       buffer_offset_);
1604 
1605   // Verifies the payload hash.
1606   const string& payload_hash_data = payload_hash_calculator_.hash();
1607   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
1608                       !payload_hash_data.empty());
1609   TEST_AND_RETURN_VAL(ErrorCode::kPayloadHashMismatchError,
1610                       payload_hash_data == update_check_response_hash);
1611 
1612   // Verifies the signed payload hash.
1613   if (!utils::FileExists(path_to_public_key.value().c_str())) {
1614     LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
1615     return ErrorCode::kSuccess;
1616   }
1617   TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
1618                       !signatures_message_data_.empty());
1619   brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
1620   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1621                       PayloadVerifier::PadRSA2048SHA256Hash(&hash_data));
1622   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1623                       !hash_data.empty());
1624 
1625   if (!PayloadVerifier::VerifySignature(
1626       signatures_message_data_, path_to_public_key.value(), hash_data)) {
1627     // The autoupdate_CatchBadSignatures test checks for this string
1628     // in log-files. Keep in sync.
1629     LOG(ERROR) << "Public key verification failed, thus update failed.";
1630     return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1631   }
1632 
1633   LOG(INFO) << "Payload hash matches value in payload.";
1634 
1635   // At this point, we are guaranteed to have downloaded a full payload, i.e
1636   // the one whose size matches the size mentioned in Omaha response. If any
1637   // errors happen after this, it's likely a problem with the payload itself or
1638   // the state of the system and not a problem with the URL or network.  So,
1639   // indicate that to the download delegate so that AU can backoff
1640   // appropriately.
1641   if (download_delegate_)
1642     download_delegate_->DownloadComplete();
1643 
1644   return ErrorCode::kSuccess;
1645 }
1646 
1647 namespace {
LogVerifyError(const string & type,const string & device,uint64_t size,const string & local_hash,const string & expected_hash)1648 void LogVerifyError(const string& type,
1649                     const string& device,
1650                     uint64_t size,
1651                     const string& local_hash,
1652                     const string& expected_hash) {
1653   LOG(ERROR) << "This is a server-side error due to "
1654              << "mismatched delta update image!";
1655   LOG(ERROR) << "The delta I've been given contains a " << type << " delta "
1656              << "update that must be applied over a " << type << " with "
1657              << "a specific checksum, but the " << type << " we're starting "
1658              << "with doesn't have that checksum! This means that "
1659              << "the delta I've been given doesn't match my existing "
1660              << "system. The " << type << " partition I have has hash: "
1661              << local_hash << " but the update expected me to have "
1662              << expected_hash << " .";
1663   LOG(INFO) << "To get the checksum of the " << type << " partition run this"
1664                "command: dd if=" << device << " bs=1M count=" << size
1665             << " iflag=count_bytes 2>/dev/null | openssl dgst -sha256 -binary "
1666                "| openssl base64";
1667   LOG(INFO) << "To get the checksum of partitions in a bin file, "
1668             << "run: .../src/scripts/sha256_partitions.sh .../file.bin";
1669 }
1670 
StringForHashBytes(const void * bytes,size_t size)1671 string StringForHashBytes(const void* bytes, size_t size) {
1672   return brillo::data_encoding::Base64Encode(bytes, size);
1673 }
1674 }  // namespace
1675 
VerifySourcePartitions()1676 bool DeltaPerformer::VerifySourcePartitions() {
1677   LOG(INFO) << "Verifying source partitions.";
1678   CHECK(manifest_valid_);
1679   CHECK(install_plan_);
1680   if (install_plan_->partitions.size() != partitions_.size()) {
1681     DLOG(ERROR) << "The list of partitions in the InstallPlan doesn't match the "
1682                    "list received in the payload. The InstallPlan has "
1683                 << install_plan_->partitions.size()
1684                 << " partitions while the payload has " << partitions_.size()
1685                 << " partitions.";
1686     return false;
1687   }
1688   for (size_t i = 0; i < partitions_.size(); ++i) {
1689     if (partitions_[i].partition_name() != install_plan_->partitions[i].name) {
1690       DLOG(ERROR) << "The InstallPlan's partition " << i << " is \""
1691                   << install_plan_->partitions[i].name
1692                   << "\" but the payload expects it to be \""
1693                   << partitions_[i].partition_name()
1694                   << "\". This is an error in the DeltaPerformer setup.";
1695       return false;
1696     }
1697     if (!partitions_[i].has_old_partition_info())
1698       continue;
1699     const PartitionInfo& info = partitions_[i].old_partition_info();
1700     const InstallPlan::Partition& plan_part = install_plan_->partitions[i];
1701     bool valid =
1702         !plan_part.source_hash.empty() &&
1703         plan_part.source_hash.size() == info.hash().size() &&
1704         memcmp(plan_part.source_hash.data(),
1705                info.hash().data(),
1706                plan_part.source_hash.size()) == 0;
1707     if (!valid) {
1708       LogVerifyError(partitions_[i].partition_name(),
1709                      plan_part.source_path,
1710                      info.hash().size(),
1711                      StringForHashBytes(plan_part.source_hash.data(),
1712                                         plan_part.source_hash.size()),
1713                      StringForHashBytes(info.hash().data(),
1714                                         info.hash().size()));
1715       return false;
1716     }
1717   }
1718   return true;
1719 }
1720 
DiscardBuffer(bool do_advance_offset,size_t signed_hash_buffer_size)1721 void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
1722                                    size_t signed_hash_buffer_size) {
1723   // Update the buffer offset.
1724   if (do_advance_offset)
1725     buffer_offset_ += buffer_.size();
1726 
1727   // Hash the content.
1728   payload_hash_calculator_.Update(buffer_.data(), buffer_.size());
1729   signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size);
1730 
1731   // Swap content with an empty vector to ensure that all memory is released.
1732   brillo::Blob().swap(buffer_);
1733 }
1734 
CanResumeUpdate(PrefsInterface * prefs,string update_check_response_hash)1735 bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
1736                                      string update_check_response_hash) {
1737   int64_t next_operation = kUpdateStateOperationInvalid;
1738   if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
1739         next_operation != kUpdateStateOperationInvalid &&
1740         next_operation > 0))
1741     return false;
1742 
1743   string interrupted_hash;
1744   if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
1745         !interrupted_hash.empty() &&
1746         interrupted_hash == update_check_response_hash))
1747     return false;
1748 
1749   int64_t resumed_update_failures;
1750   // Note that storing this value is optional, but if it is there it should not
1751   // be more than the limit.
1752   if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
1753       resumed_update_failures > kMaxResumedUpdateFailures)
1754     return false;
1755 
1756   // Sanity check the rest.
1757   int64_t next_data_offset = -1;
1758   if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1759         next_data_offset >= 0))
1760     return false;
1761 
1762   string sha256_context;
1763   if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
1764         !sha256_context.empty()))
1765     return false;
1766 
1767   int64_t manifest_metadata_size = 0;
1768   if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1769         manifest_metadata_size > 0))
1770     return false;
1771 
1772   int64_t manifest_signature_size = 0;
1773   if (!(prefs->GetInt64(kPrefsManifestSignatureSize,
1774                         &manifest_signature_size) &&
1775         manifest_signature_size >= 0))
1776     return false;
1777 
1778   return true;
1779 }
1780 
ResetUpdateProgress(PrefsInterface * prefs,bool quick)1781 bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) {
1782   TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
1783                                         kUpdateStateOperationInvalid));
1784   if (!quick) {
1785     prefs->SetString(kPrefsUpdateCheckResponseHash, "");
1786     prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
1787     prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
1788     prefs->SetString(kPrefsUpdateStateSHA256Context, "");
1789     prefs->SetString(kPrefsUpdateStateSignedSHA256Context, "");
1790     prefs->SetString(kPrefsUpdateStateSignatureBlob, "");
1791     prefs->SetInt64(kPrefsManifestMetadataSize, -1);
1792     prefs->SetInt64(kPrefsManifestSignatureSize, -1);
1793     prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
1794   }
1795   return true;
1796 }
1797 
CheckpointUpdateProgress()1798 bool DeltaPerformer::CheckpointUpdateProgress() {
1799   Terminator::set_exit_blocked(true);
1800   if (last_updated_buffer_offset_ != buffer_offset_) {
1801     // Resets the progress in case we die in the middle of the state update.
1802     ResetUpdateProgress(prefs_, true);
1803     TEST_AND_RETURN_FALSE(
1804         prefs_->SetString(kPrefsUpdateStateSHA256Context,
1805                           payload_hash_calculator_.GetContext()));
1806     TEST_AND_RETURN_FALSE(
1807         prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
1808                           signed_hash_calculator_.GetContext()));
1809     TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataOffset,
1810                                            buffer_offset_));
1811     last_updated_buffer_offset_ = buffer_offset_;
1812 
1813     if (next_operation_num_ < num_total_operations_) {
1814       size_t partition_index = current_partition_;
1815       while (next_operation_num_ >= acc_num_operations_[partition_index])
1816         partition_index++;
1817       const size_t partition_operation_num = next_operation_num_ - (
1818           partition_index ? acc_num_operations_[partition_index - 1] : 0);
1819       const InstallOperation& op =
1820           partitions_[partition_index].operations(partition_operation_num);
1821       TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
1822                                              op.data_length()));
1823     } else {
1824       TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
1825                                              0));
1826     }
1827   }
1828   TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextOperation,
1829                                          next_operation_num_));
1830   return true;
1831 }
1832 
PrimeUpdateState()1833 bool DeltaPerformer::PrimeUpdateState() {
1834   CHECK(manifest_valid_);
1835   block_size_ = manifest_.block_size();
1836 
1837   int64_t next_operation = kUpdateStateOperationInvalid;
1838   if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
1839       next_operation == kUpdateStateOperationInvalid ||
1840       next_operation <= 0) {
1841     // Initiating a new update, no more state needs to be initialized.
1842     return true;
1843   }
1844   next_operation_num_ = next_operation;
1845 
1846   // Resuming an update -- load the rest of the update state.
1847   int64_t next_data_offset = -1;
1848   TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsUpdateStateNextDataOffset,
1849                                          &next_data_offset) &&
1850                         next_data_offset >= 0);
1851   buffer_offset_ = next_data_offset;
1852 
1853   // The signed hash context and the signature blob may be empty if the
1854   // interrupted update didn't reach the signature.
1855   string signed_hash_context;
1856   if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context,
1857                         &signed_hash_context)) {
1858     TEST_AND_RETURN_FALSE(
1859         signed_hash_calculator_.SetContext(signed_hash_context));
1860   }
1861 
1862   string signature_blob;
1863   if (prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signature_blob)) {
1864     signatures_message_data_.assign(signature_blob.begin(),
1865                                     signature_blob.end());
1866   }
1867 
1868   string hash_context;
1869   TEST_AND_RETURN_FALSE(prefs_->GetString(kPrefsUpdateStateSHA256Context,
1870                                           &hash_context) &&
1871                         payload_hash_calculator_.SetContext(hash_context));
1872 
1873   int64_t manifest_metadata_size = 0;
1874   TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsManifestMetadataSize,
1875                                          &manifest_metadata_size) &&
1876                         manifest_metadata_size > 0);
1877   metadata_size_ = manifest_metadata_size;
1878 
1879   int64_t manifest_signature_size = 0;
1880   TEST_AND_RETURN_FALSE(
1881       prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size) &&
1882       manifest_signature_size >= 0);
1883   metadata_signature_size_ = manifest_signature_size;
1884 
1885   // Advance the download progress to reflect what doesn't need to be
1886   // re-downloaded.
1887   total_bytes_received_ += buffer_offset_;
1888 
1889   // Speculatively count the resume as a failure.
1890   int64_t resumed_update_failures;
1891   if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
1892     resumed_update_failures++;
1893   } else {
1894     resumed_update_failures = 1;
1895   }
1896   prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures);
1897   return true;
1898 }
1899 
1900 }  // namespace chromeos_update_engine
1901