1 //
2 // Copyright (C) 2015 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "update_engine/payload_generator/payload_file.h"
18
19 #include <endian.h>
20
21 #include <algorithm>
22 #include <map>
23 #include <utility>
24
25 #include <base/strings/stringprintf.h>
26
27 #include "update_engine/common/hash_calculator.h"
28 #include "update_engine/common/utils.h"
29 #include "update_engine/payload_consumer/delta_performer.h"
30 #include "update_engine/payload_consumer/file_writer.h"
31 #include "update_engine/payload_consumer/payload_constants.h"
32 #include "update_engine/payload_generator/annotated_operation.h"
33 #include "update_engine/payload_generator/delta_diff_utils.h"
34 #include "update_engine/payload_generator/payload_signer.h"
35
36 using std::string;
37 using std::vector;
38
39 namespace chromeos_update_engine {
40
41 namespace {
42
43 struct DeltaObject {
DeltaObjectchromeos_update_engine::__anon885c46fd0111::DeltaObject44 DeltaObject(const string& in_name, const int in_type, const off_t in_size)
45 : name(in_name), type(in_type), size(in_size) {}
operator <chromeos_update_engine::__anon885c46fd0111::DeltaObject46 bool operator<(const DeltaObject& object) const {
47 return (size != object.size) ? (size < object.size) : (name < object.name);
48 }
49 string name;
50 int type;
51 off_t size;
52 };
53
54 // Writes the uint64_t passed in in host-endian to the file as big-endian.
55 // Returns true on success.
WriteUint64AsBigEndian(FileWriter * writer,const uint64_t value)56 bool WriteUint64AsBigEndian(FileWriter* writer, const uint64_t value) {
57 uint64_t value_be = htobe64(value);
58 TEST_AND_RETURN_FALSE(writer->Write(&value_be, sizeof(value_be)));
59 return true;
60 }
61
62 } // namespace
63
Init(const PayloadGenerationConfig & config)64 bool PayloadFile::Init(const PayloadGenerationConfig& config) {
65 TEST_AND_RETURN_FALSE(config.version.Validate());
66 major_version_ = config.version.major;
67 manifest_.set_minor_version(config.version.minor);
68 manifest_.set_block_size(config.block_size);
69 manifest_.set_max_timestamp(config.max_timestamp);
70
71 if (config.target.dynamic_partition_metadata != nullptr)
72 *(manifest_.mutable_dynamic_partition_metadata()) =
73 *(config.target.dynamic_partition_metadata);
74
75 if (config.is_partial_update) {
76 manifest_.set_partial_update(true);
77 }
78
79 if (!config.apex_info_file.empty()) {
80 ApexMetadata apex_metadata;
81 int fd = open(config.apex_info_file.c_str(), O_RDONLY);
82 if (fd < 0) {
83 PLOG(FATAL) << "Failed to open " << config.apex_info_file << " for read.";
84 }
85 ScopedFdCloser closer{&fd};
86 CHECK(apex_metadata.ParseFromFileDescriptor(fd));
87 if (apex_metadata.apex_info_size() > 0) {
88 *manifest_.mutable_apex_info() =
89 std::move(*apex_metadata.mutable_apex_info());
90 }
91 }
92 return true;
93 }
94
AddPartition(const PartitionConfig & old_conf,const PartitionConfig & new_conf,vector<AnnotatedOperation> aops,vector<CowMergeOperation> merge_sequence,size_t cow_size)95 bool PayloadFile::AddPartition(const PartitionConfig& old_conf,
96 const PartitionConfig& new_conf,
97 vector<AnnotatedOperation> aops,
98 vector<CowMergeOperation> merge_sequence,
99 size_t cow_size) {
100 Partition part;
101 part.cow_size = cow_size;
102 part.name = new_conf.name;
103 part.aops = std::move(aops);
104 part.cow_merge_sequence = std::move(merge_sequence);
105 part.postinstall = new_conf.postinstall;
106 part.verity = new_conf.verity;
107 part.version = new_conf.version;
108 // Initialize the PartitionInfo objects if present.
109 if (!old_conf.path.empty())
110 TEST_AND_RETURN_FALSE(
111 diff_utils::InitializePartitionInfo(old_conf, &part.old_info));
112 TEST_AND_RETURN_FALSE(
113 diff_utils::InitializePartitionInfo(new_conf, &part.new_info));
114 part_vec_.push_back(std::move(part));
115 return true;
116 }
117
WritePayload(const string & payload_file,const string & data_blobs_path,const string & private_key_path,uint64_t * metadata_size_out)118 bool PayloadFile::WritePayload(const string& payload_file,
119 const string& data_blobs_path,
120 const string& private_key_path,
121 uint64_t* metadata_size_out) {
122 // Reorder the data blobs with the manifest_.
123 ScopedTempFile ordered_blobs_file("CrAU_temp_data.ordered.XXXXXX");
124 TEST_AND_RETURN_FALSE(
125 ReorderDataBlobs(data_blobs_path, ordered_blobs_file.path()));
126
127 // Check that install op blobs are in order.
128 uint64_t next_blob_offset = 0;
129 for (const auto& part : part_vec_) {
130 for (const auto& aop : part.aops) {
131 if (!aop.op.has_data_offset())
132 continue;
133 if (aop.op.data_offset() != next_blob_offset) {
134 LOG(FATAL) << "bad blob offset! " << aop.op.data_offset()
135 << " != " << next_blob_offset;
136 }
137 next_blob_offset += aop.op.data_length();
138 }
139 }
140
141 // Copy the operations and partition info from the part_vec_ to the manifest.
142 manifest_.clear_partitions();
143 for (const auto& part : part_vec_) {
144 PartitionUpdate* partition = manifest_.add_partitions();
145 partition->set_partition_name(part.name);
146 if (!part.version.empty()) {
147 partition->set_version(part.version);
148 }
149 if (part.cow_size > 0) {
150 partition->set_estimate_cow_size(part.cow_size);
151 }
152 if (part.postinstall.run) {
153 partition->set_run_postinstall(true);
154 if (!part.postinstall.path.empty())
155 partition->set_postinstall_path(part.postinstall.path);
156 if (!part.postinstall.filesystem_type.empty())
157 partition->set_filesystem_type(part.postinstall.filesystem_type);
158 partition->set_postinstall_optional(part.postinstall.optional);
159 }
160 if (!part.verity.IsEmpty()) {
161 if (part.verity.hash_tree_extent.num_blocks() != 0) {
162 *partition->mutable_hash_tree_data_extent() =
163 part.verity.hash_tree_data_extent;
164 *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent;
165 partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm);
166 if (!part.verity.hash_tree_salt.empty())
167 partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(),
168 part.verity.hash_tree_salt.size());
169 }
170 if (part.verity.fec_extent.num_blocks() != 0) {
171 *partition->mutable_fec_data_extent() = part.verity.fec_data_extent;
172 *partition->mutable_fec_extent() = part.verity.fec_extent;
173 partition->set_fec_roots(part.verity.fec_roots);
174 }
175 }
176 for (const AnnotatedOperation& aop : part.aops) {
177 *partition->add_operations() = aop.op;
178 }
179 for (const auto& merge_op : part.cow_merge_sequence) {
180 *partition->add_merge_operations() = merge_op;
181 }
182
183 if (part.old_info.has_size() || part.old_info.has_hash())
184 *(partition->mutable_old_partition_info()) = part.old_info;
185 if (part.new_info.has_size() || part.new_info.has_hash())
186 *(partition->mutable_new_partition_info()) = part.new_info;
187 }
188
189 // Signatures appear at the end of the blobs. Note the offset in the
190 // |manifest_|.
191 uint64_t signature_blob_length = 0;
192 if (!private_key_path.empty()) {
193 TEST_AND_RETURN_FALSE(PayloadSigner::SignatureBlobLength(
194 {private_key_path}, &signature_blob_length));
195 PayloadSigner::AddSignatureToManifest(
196 next_blob_offset, signature_blob_length, &manifest_);
197 }
198
199 // Serialize protobuf
200 string serialized_manifest;
201 TEST_AND_RETURN_FALSE(manifest_.SerializeToString(&serialized_manifest));
202
203 uint64_t metadata_size =
204 sizeof(kDeltaMagic) + 2 * sizeof(uint64_t) + serialized_manifest.size();
205
206 LOG(INFO) << "Writing final delta file header...";
207 DirectFileWriter writer;
208 TEST_AND_RETURN_FALSE_ERRNO(writer.Open(payload_file.c_str(),
209 O_WRONLY | O_CREAT | O_TRUNC,
210 0644) == 0);
211 ScopedFileWriterCloser writer_closer(&writer);
212
213 // Write header
214 TEST_AND_RETURN_FALSE_ERRNO(writer.Write(kDeltaMagic, sizeof(kDeltaMagic)));
215
216 // Write major version number
217 TEST_AND_RETURN_FALSE(WriteUint64AsBigEndian(&writer, major_version_));
218
219 // Write protobuf length
220 TEST_AND_RETURN_FALSE(
221 WriteUint64AsBigEndian(&writer, serialized_manifest.size()));
222
223 // Metadata signature has the same size as payload signature, because they
224 // are both the same kind of signature for the same kind of hash.
225 uint32_t metadata_signature_size = htobe32(signature_blob_length);
226 TEST_AND_RETURN_FALSE_ERRNO(
227 writer.Write(&metadata_signature_size, sizeof(metadata_signature_size)));
228 metadata_size += sizeof(metadata_signature_size);
229 // Set correct size instead of big endian size.
230 metadata_signature_size = signature_blob_length;
231
232 // Write protobuf
233 LOG(INFO) << "Writing final delta file protobuf... "
234 << serialized_manifest.size();
235 TEST_AND_RETURN_FALSE_ERRNO(
236 writer.Write(serialized_manifest.data(), serialized_manifest.size()));
237
238 // Write metadata signature blob.
239 if (!private_key_path.empty()) {
240 brillo::Blob metadata_hash;
241 TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
242 payload_file, metadata_size, &metadata_hash));
243 string metadata_signature;
244 TEST_AND_RETURN_FALSE(PayloadSigner::SignHashWithKeys(
245 metadata_hash, {private_key_path}, &metadata_signature));
246 TEST_AND_RETURN_FALSE_ERRNO(
247 writer.Write(metadata_signature.data(), metadata_signature.size()));
248 }
249
250 // Append the data blobs.
251 LOG(INFO) << "Writing final delta file data blobs...";
252 int blobs_fd = open(ordered_blobs_file.path().c_str(), O_RDONLY, 0);
253 ScopedFdCloser blobs_fd_closer(&blobs_fd);
254 TEST_AND_RETURN_FALSE(blobs_fd >= 0);
255 for (;;) {
256 vector<char> buf(1024 * 1024);
257 ssize_t rc = read(blobs_fd, buf.data(), buf.size());
258 if (0 == rc) {
259 // EOF
260 break;
261 }
262 TEST_AND_RETURN_FALSE_ERRNO(rc > 0);
263 TEST_AND_RETURN_FALSE_ERRNO(writer.Write(buf.data(), rc));
264 }
265
266 // Write payload signature blob.
267 if (!private_key_path.empty()) {
268 LOG(INFO) << "Signing the update...";
269 string signature;
270 TEST_AND_RETURN_FALSE(PayloadSigner::SignPayload(
271 payload_file,
272 {private_key_path},
273 metadata_size,
274 metadata_signature_size,
275 metadata_size + metadata_signature_size + manifest_.signatures_offset(),
276 &signature));
277 TEST_AND_RETURN_FALSE_ERRNO(
278 writer.Write(signature.data(), signature.size()));
279 }
280
281 ReportPayloadUsage(metadata_size);
282 *metadata_size_out = metadata_size;
283 return true;
284 }
285
ReorderDataBlobs(const string & data_blobs_path,const string & new_data_blobs_path)286 bool PayloadFile::ReorderDataBlobs(const string& data_blobs_path,
287 const string& new_data_blobs_path) {
288 int in_fd = open(data_blobs_path.c_str(), O_RDONLY, 0);
289 TEST_AND_RETURN_FALSE_ERRNO(in_fd >= 0);
290 ScopedFdCloser in_fd_closer(&in_fd);
291
292 DirectFileWriter writer;
293 int rc = writer.Open(
294 new_data_blobs_path.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 0644);
295 if (rc != 0) {
296 PLOG(ERROR) << "Error creating " << new_data_blobs_path;
297 return false;
298 }
299 ScopedFileWriterCloser writer_closer(&writer);
300 uint64_t out_file_size = 0;
301
302 for (auto& part : part_vec_) {
303 for (AnnotatedOperation& aop : part.aops) {
304 if (!aop.op.has_data_offset())
305 continue;
306 CHECK(aop.op.has_data_length());
307 brillo::Blob buf(aop.op.data_length());
308 ssize_t rc = pread(in_fd, buf.data(), buf.size(), aop.op.data_offset());
309 TEST_AND_RETURN_FALSE(rc == static_cast<ssize_t>(buf.size()));
310
311 // Add the hash of the data blobs for this operation
312 TEST_AND_RETURN_FALSE(AddOperationHash(&aop.op, buf));
313
314 aop.op.set_data_offset(out_file_size);
315 TEST_AND_RETURN_FALSE_ERRNO(writer.Write(buf.data(), buf.size()));
316 out_file_size += buf.size();
317 }
318 }
319 return true;
320 }
321
AddOperationHash(InstallOperation * op,const brillo::Blob & buf)322 bool PayloadFile::AddOperationHash(InstallOperation* op,
323 const brillo::Blob& buf) {
324 brillo::Blob hash;
325 TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfData(buf, &hash));
326 op->set_data_sha256_hash(hash.data(), hash.size());
327 return true;
328 }
329
ReportPayloadUsage(uint64_t metadata_size) const330 void PayloadFile::ReportPayloadUsage(uint64_t metadata_size) const {
331 std::map<DeltaObject, int> object_counts;
332 off_t total_size = 0;
333 int total_op = 0;
334
335 for (const auto& part : part_vec_) {
336 string part_prefix = "<" + part.name + ">:";
337 for (const AnnotatedOperation& aop : part.aops) {
338 DeltaObject delta(
339 part_prefix + aop.name, aop.op.type(), aop.op.data_length());
340 object_counts[delta]++;
341 total_size += aop.op.data_length();
342 }
343 total_op += part.aops.size();
344 }
345
346 object_counts[DeltaObject("<manifest-metadata>", -1, metadata_size)] = 1;
347 total_size += metadata_size;
348
349 constexpr char kFormatString[] = "%6.2f%% %10jd %-13s %s %d\n";
350 for (const auto& object_count : object_counts) {
351 const DeltaObject& object = object_count.first;
352 // Use printf() instead of LOG(INFO) because timestamp makes it difficult to
353 // compare two reports.
354 printf(kFormatString,
355 object.size * 100.0 / total_size,
356 object.size,
357 (object.type >= 0
358 ? InstallOperationTypeName(
359 static_cast<InstallOperation::Type>(object.type))
360 : "-"),
361 object.name.c_str(),
362 object_count.second);
363 }
364 printf(kFormatString, 100.0, total_size, "", "<total>", total_op);
365 fflush(stdout);
366 }
367
368 } // namespace chromeos_update_engine
369