1 //
2 // Copyright (C) 2015 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "update_engine/payload_generator/inplace_generator.h"
18
19 #include <algorithm>
20 #include <map>
21 #include <set>
22 #include <string>
23 #include <utility>
24 #include <vector>
25
26 #include <base/stl_util.h>
27
28 #include "update_engine/common/utils.h"
29 #include "update_engine/payload_consumer/payload_constants.h"
30 #include "update_engine/payload_generator/cycle_breaker.h"
31 #include "update_engine/payload_generator/delta_diff_generator.h"
32 #include "update_engine/payload_generator/delta_diff_utils.h"
33 #include "update_engine/payload_generator/extent_ranges.h"
34 #include "update_engine/payload_generator/graph_types.h"
35 #include "update_engine/payload_generator/graph_utils.h"
36 #include "update_engine/payload_generator/topological_sort.h"
37 #include "update_engine/update_metadata.pb.h"
38
39 using std::make_pair;
40 using std::map;
41 using std::pair;
42 using std::set;
43 using std::string;
44 using std::vector;
45
46 namespace chromeos_update_engine {
47
48 using Block = InplaceGenerator::Block;
49
50 namespace {
51
52 // The only PayloadVersion supported by this implementation.
53 const PayloadVersion kInPlacePayloadVersion{kChromeOSMajorPayloadVersion,
54 kInPlaceMinorPayloadVersion};
55
56 // This class allocates non-existent temp blocks, starting from
57 // kTempBlockStart. Other code is responsible for converting these
58 // temp blocks into real blocks, as the client can't read or write to
59 // these blocks.
60 class DummyExtentAllocator {
61 public:
Allocate(const uint64_t block_count)62 vector<Extent> Allocate(const uint64_t block_count) {
63 vector<Extent> ret(1);
64 ret[0].set_start_block(next_block_);
65 ret[0].set_num_blocks(block_count);
66 next_block_ += block_count;
67 return ret;
68 }
69
70 private:
71 uint64_t next_block_{kTempBlockStart};
72 };
73
74 // Takes a vector of blocks and returns an equivalent vector of Extent
75 // objects.
CompressExtents(const vector<uint64_t> & blocks)76 vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
77 vector<Extent> new_extents;
78 for (uint64_t block : blocks) {
79 AppendBlockToExtents(&new_extents, block);
80 }
81 return new_extents;
82 }
83
84 // Helper class to compare two operations by start block of the first Extent in
85 // their destination extents given the index of the operations in the graph.
86 class IndexedInstallOperationsDstComparator {
87 public:
IndexedInstallOperationsDstComparator(Graph * graph)88 explicit IndexedInstallOperationsDstComparator(Graph* graph)
89 : graph_(graph) {}
90
91 // Compares the operations in the vertex a and b of graph_.
operator ()(size_t a,size_t b) const92 bool operator()(size_t a, size_t b) const {
93 return diff_utils::CompareAopsByDestination((*graph_)[a].aop,
94 (*graph_)[b].aop);
95 }
96
97 private:
98 const Graph* const graph_;
99 };
100
101 } // namespace
102
CheckGraph(const Graph & graph)103 void InplaceGenerator::CheckGraph(const Graph& graph) {
104 for (const Vertex& v : graph) {
105 CHECK(v.aop.op.has_type());
106 }
107 }
108
SubstituteBlocks(Vertex * vertex,const vector<Extent> & remove_extents,const vector<Extent> & replace_extents)109 void InplaceGenerator::SubstituteBlocks(Vertex* vertex,
110 const vector<Extent>& remove_extents,
111 const vector<Extent>& replace_extents) {
112 // First, expand out the blocks that op reads from
113 vector<uint64_t> read_blocks = ExpandExtents(vertex->aop.op.src_extents());
114 {
115 // Expand remove_extents and replace_extents
116 vector<uint64_t> remove_extents_expanded = ExpandExtents(remove_extents);
117 vector<uint64_t> replace_extents_expanded = ExpandExtents(replace_extents);
118 CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
119 map<uint64_t, uint64_t> conversion;
120 for (vector<uint64_t>::size_type i = 0; i < replace_extents_expanded.size();
121 i++) {
122 conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
123 }
124 ApplyMap(&read_blocks, conversion);
125 for (auto& edge_prop_pair : vertex->out_edges) {
126 vector<uint64_t> write_before_deps_expanded =
127 ExpandExtents(edge_prop_pair.second.write_extents);
128 ApplyMap(&write_before_deps_expanded, conversion);
129 edge_prop_pair.second.write_extents =
130 CompressExtents(write_before_deps_expanded);
131 }
132 }
133 // Convert read_blocks back to extents
134 vertex->aop.op.clear_src_extents();
135 vector<Extent> new_extents = CompressExtents(read_blocks);
136 StoreExtents(new_extents, vertex->aop.op.mutable_src_extents());
137 }
138
CutEdges(Graph * graph,const set<Edge> & edges,vector<CutEdgeVertexes> * out_cuts)139 bool InplaceGenerator::CutEdges(Graph* graph,
140 const set<Edge>& edges,
141 vector<CutEdgeVertexes>* out_cuts) {
142 DummyExtentAllocator scratch_allocator;
143 vector<CutEdgeVertexes> cuts;
144 cuts.reserve(edges.size());
145
146 uint64_t scratch_blocks_used = 0;
147 for (const Edge& edge : edges) {
148 cuts.resize(cuts.size() + 1);
149 vector<Extent> old_extents =
150 (*graph)[edge.first].out_edges[edge.second].extents;
151 // Choose some scratch space
152 scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge);
153 cuts.back().tmp_extents =
154 scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
155 // create vertex to copy original->scratch
156 cuts.back().new_vertex = graph->size();
157 graph->emplace_back();
158 cuts.back().old_src = edge.first;
159 cuts.back().old_dst = edge.second;
160
161 EdgeProperties& cut_edge_properties =
162 (*graph)[edge.first].out_edges.find(edge.second)->second;
163
164 // This should never happen, as we should only be cutting edges between
165 // real file nodes, and write-before relationships are created from
166 // a real file node to a temp copy node:
167 CHECK(cut_edge_properties.write_extents.empty())
168 << "Can't cut edge that has write-before relationship.";
169
170 // make node depend on the copy operation
171 (*graph)[edge.first].out_edges.insert(
172 make_pair(graph->size() - 1, cut_edge_properties));
173
174 // Set src/dst extents and other proto variables for copy operation
175 graph->back().aop.op.set_type(InstallOperation::MOVE);
176 StoreExtents(cut_edge_properties.extents,
177 graph->back().aop.op.mutable_src_extents());
178 StoreExtents(cuts.back().tmp_extents,
179 graph->back().aop.op.mutable_dst_extents());
180 graph->back().aop.op.set_src_length(graph_utils::EdgeWeight(*graph, edge) *
181 kBlockSize);
182 graph->back().aop.op.set_dst_length(graph->back().aop.op.src_length());
183
184 // make the dest node read from the scratch space
185 SubstituteBlocks(&((*graph)[edge.second]),
186 (*graph)[edge.first].out_edges[edge.second].extents,
187 cuts.back().tmp_extents);
188
189 // delete the old edge
190 CHECK_EQ(static_cast<Graph::size_type>(1),
191 (*graph)[edge.first].out_edges.erase(edge.second));
192
193 // Add an edge from dst to copy operation
194 EdgeProperties write_before_edge_properties;
195 write_before_edge_properties.write_extents = cuts.back().tmp_extents;
196 (*graph)[edge.second].out_edges.insert(
197 make_pair(graph->size() - 1, write_before_edge_properties));
198 }
199 out_cuts->swap(cuts);
200 return true;
201 }
202
203 // Creates all the edges for the graph. Writers of a block point to
204 // readers of the same block. This is because for an edge A->B, B
205 // must complete before A executes.
CreateEdges(Graph * graph,const vector<Block> & blocks)206 void InplaceGenerator::CreateEdges(Graph* graph, const vector<Block>& blocks) {
207 for (vector<Block>::size_type i = 0; i < blocks.size(); i++) {
208 // Blocks with both a reader and writer get an edge
209 if (blocks[i].reader == Vertex::kInvalidIndex ||
210 blocks[i].writer == Vertex::kInvalidIndex)
211 continue;
212 // Don't have a node depend on itself
213 if (blocks[i].reader == blocks[i].writer)
214 continue;
215 // See if there's already an edge we can add onto
216 Vertex::EdgeMap::iterator edge_it =
217 (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
218 if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) {
219 // No existing edge. Create one
220 (*graph)[blocks[i].writer].out_edges.insert(
221 make_pair(blocks[i].reader, EdgeProperties()));
222 edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
223 CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
224 }
225 AppendBlockToExtents(&edge_it->second.extents, i);
226 }
227 }
228
229 namespace {
230
231 class SortCutsByTopoOrderLess {
232 public:
SortCutsByTopoOrderLess(const vector<vector<Vertex::Index>::size_type> & table)233 explicit SortCutsByTopoOrderLess(
234 const vector<vector<Vertex::Index>::size_type>& table)
235 : table_(table) {}
operator ()(const CutEdgeVertexes & a,const CutEdgeVertexes & b)236 bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
237 return table_[a.old_dst] < table_[b.old_dst];
238 }
239
240 private:
241 const vector<vector<Vertex::Index>::size_type>& table_;
242 };
243
244 } // namespace
245
GenerateReverseTopoOrderMap(const vector<Vertex::Index> & op_indexes,vector<vector<Vertex::Index>::size_type> * reverse_op_indexes)246 void InplaceGenerator::GenerateReverseTopoOrderMap(
247 const vector<Vertex::Index>& op_indexes,
248 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
249 vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
250 for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size(); i != e;
251 ++i) {
252 Vertex::Index node = op_indexes[i];
253 if (table.size() < (node + 1)) {
254 table.resize(node + 1);
255 }
256 table[node] = i;
257 }
258 reverse_op_indexes->swap(table);
259 }
260
SortCutsByTopoOrder(const vector<Vertex::Index> & op_indexes,vector<CutEdgeVertexes> * cuts)261 void InplaceGenerator::SortCutsByTopoOrder(
262 const vector<Vertex::Index>& op_indexes, vector<CutEdgeVertexes>* cuts) {
263 // first, make a reverse lookup table.
264 vector<vector<Vertex::Index>::size_type> table;
265 GenerateReverseTopoOrderMap(op_indexes, &table);
266 SortCutsByTopoOrderLess less(table);
267 sort(cuts->begin(), cuts->end(), less);
268 }
269
MoveAndSortFullOpsToBack(Graph * graph,vector<Vertex::Index> * op_indexes)270 void InplaceGenerator::MoveAndSortFullOpsToBack(
271 Graph* graph, vector<Vertex::Index>* op_indexes) {
272 vector<Vertex::Index> ret;
273 vector<Vertex::Index> full_ops;
274 ret.reserve(op_indexes->size());
275 for (auto op_index : *op_indexes) {
276 InstallOperation::Type type = (*graph)[op_index].aop.op.type();
277 if (type == InstallOperation::REPLACE ||
278 type == InstallOperation::REPLACE_BZ) {
279 full_ops.push_back(op_index);
280 } else {
281 ret.push_back(op_index);
282 }
283 }
284 LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
285 << (full_ops.size() + ret.size()) << " total ops.";
286 // Sort full ops according to their dst_extents.
287 sort(full_ops.begin(),
288 full_ops.end(),
289 IndexedInstallOperationsDstComparator(graph));
290 ret.insert(ret.end(), full_ops.begin(), full_ops.end());
291 op_indexes->swap(ret);
292 }
293
294 namespace {
295
296 template <typename T>
TempBlocksExistInExtents(const T & extents)297 bool TempBlocksExistInExtents(const T& extents) {
298 for (const auto& extent : extents) {
299 uint64_t start = extent.start_block();
300 uint64_t num = extent.num_blocks();
301 if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
302 LOG(ERROR) << "temp block!";
303 LOG(ERROR) << "start: " << start << ", num: " << num;
304 LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
305 LOG(ERROR) << "returning true";
306 return true;
307 }
308 // check for wrap-around, which would be a bug:
309 CHECK(start <= (start + num));
310 }
311 return false;
312 }
313
314 // Converts the cuts, which must all have the same |old_dst| member,
315 // to full. It does this by converting the |old_dst| to REPLACE or
316 // REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking
317 // all temp nodes invalid.
ConvertCutsToFull(Graph * graph,const string & new_part,BlobFileWriter * blob_file,vector<Vertex::Index> * op_indexes,vector<vector<Vertex::Index>::size_type> * reverse_op_indexes,const vector<CutEdgeVertexes> & cuts)318 bool ConvertCutsToFull(
319 Graph* graph,
320 const string& new_part,
321 BlobFileWriter* blob_file,
322 vector<Vertex::Index>* op_indexes,
323 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
324 const vector<CutEdgeVertexes>& cuts) {
325 CHECK(!cuts.empty());
326 set<Vertex::Index> deleted_nodes;
327 for (const CutEdgeVertexes& cut : cuts) {
328 TEST_AND_RETURN_FALSE(
329 InplaceGenerator::ConvertCutToFullOp(graph, cut, new_part, blob_file));
330 deleted_nodes.insert(cut.new_vertex);
331 }
332 deleted_nodes.insert(cuts[0].old_dst);
333
334 vector<Vertex::Index> new_op_indexes;
335 new_op_indexes.reserve(op_indexes->size());
336 for (Vertex::Index vertex_index : *op_indexes) {
337 if (base::ContainsKey(deleted_nodes, vertex_index))
338 continue;
339 new_op_indexes.push_back(vertex_index);
340 }
341 new_op_indexes.push_back(cuts[0].old_dst);
342 op_indexes->swap(new_op_indexes);
343 InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes,
344 reverse_op_indexes);
345 return true;
346 }
347
348 // Tries to assign temp blocks for a collection of cuts, all of which share
349 // the same old_dst member. If temp blocks can't be found, old_dst will be
350 // converted to a REPLACE or REPLACE_BZ operation. Returns true on success,
351 // which can happen even if blocks are converted to full. Returns false
352 // on exceptional error cases.
AssignBlockForAdjoiningCuts(Graph * graph,const string & new_part,BlobFileWriter * blob_file,vector<Vertex::Index> * op_indexes,vector<vector<Vertex::Index>::size_type> * reverse_op_indexes,const vector<CutEdgeVertexes> & cuts)353 bool AssignBlockForAdjoiningCuts(
354 Graph* graph,
355 const string& new_part,
356 BlobFileWriter* blob_file,
357 vector<Vertex::Index>* op_indexes,
358 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
359 const vector<CutEdgeVertexes>& cuts) {
360 CHECK(!cuts.empty());
361 const Vertex::Index old_dst = cuts[0].old_dst;
362 // Calculate # of blocks needed
363 uint64_t blocks_needed = 0;
364 vector<uint64_t> cuts_blocks_needed(cuts.size());
365 for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
366 uint64_t cut_blocks_needed = 0;
367 for (const Extent& extent : cuts[i].tmp_extents) {
368 cut_blocks_needed += extent.num_blocks();
369 }
370 blocks_needed += cut_blocks_needed;
371 cuts_blocks_needed[i] = cut_blocks_needed;
372 }
373
374 // Find enough blocks
375 ExtentRanges scratch_ranges;
376 // Each block that's supplying temp blocks and the corresponding blocks:
377 typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector;
378 SupplierVector block_suppliers;
379 uint64_t scratch_blocks_found = 0;
380 for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
381 e = op_indexes->size();
382 i < e;
383 ++i) {
384 Vertex::Index test_node = (*op_indexes)[i];
385 if (!(*graph)[test_node].valid)
386 continue;
387 // See if this node has sufficient blocks
388 ExtentRanges ranges;
389 ranges.AddRepeatedExtents((*graph)[test_node].aop.op.dst_extents());
390 ranges.SubtractExtent(
391 ExtentForRange(kTempBlockStart, kSparseHole - kTempBlockStart));
392 ranges.SubtractRepeatedExtents((*graph)[test_node].aop.op.src_extents());
393 // For now, for simplicity, subtract out all blocks in read-before
394 // dependencies.
395 for (Vertex::EdgeMap::const_iterator
396 edge_i = (*graph)[test_node].out_edges.begin(),
397 edge_e = (*graph)[test_node].out_edges.end();
398 edge_i != edge_e;
399 ++edge_i) {
400 ranges.SubtractExtents(edge_i->second.extents);
401 }
402
403 // Prevent using the block 0 as scratch space due to crbug.com/480751.
404 if (ranges.ContainsBlock(0)) {
405 LOG(INFO) << "Removing block 0 from the selected scratch range in vertex "
406 << i;
407 ranges.SubtractBlock(0);
408 }
409
410 if (ranges.blocks() == 0)
411 continue;
412
413 if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
414 // trim down ranges
415 vector<Extent> new_ranges =
416 ranges.GetExtentsForBlockCount(blocks_needed - scratch_blocks_found);
417 ranges = ExtentRanges();
418 ranges.AddExtents(new_ranges);
419 }
420 scratch_ranges.AddRanges(ranges);
421 block_suppliers.push_back(make_pair(test_node, ranges));
422 scratch_blocks_found += ranges.blocks();
423 if (scratch_ranges.blocks() >= blocks_needed)
424 break;
425 }
426 if (scratch_ranges.blocks() < blocks_needed) {
427 LOG(INFO) << "Unable to find sufficient scratch";
428 TEST_AND_RETURN_FALSE(ConvertCutsToFull(
429 graph, new_part, blob_file, op_indexes, reverse_op_indexes, cuts));
430 return true;
431 }
432 // Use the scratch we found
433 TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found);
434
435 // Make all the suppliers depend on this node
436 for (const auto& index_range_pair : block_suppliers) {
437 graph_utils::AddReadBeforeDepExtents(
438 &(*graph)[index_range_pair.first],
439 old_dst,
440 index_range_pair.second.GetExtentsForBlockCount(
441 index_range_pair.second.blocks()));
442 }
443
444 // Replace temp blocks in each cut
445 for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
446 const CutEdgeVertexes& cut = cuts[i];
447 vector<Extent> real_extents =
448 scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]);
449 scratch_ranges.SubtractExtents(real_extents);
450
451 // Fix the old dest node w/ the real blocks
452 InplaceGenerator::SubstituteBlocks(
453 &(*graph)[old_dst], cut.tmp_extents, real_extents);
454
455 // Fix the new node w/ the real blocks. Since the new node is just a
456 // copy operation, we can replace all the dest extents w/ the real
457 // blocks.
458 InstallOperation* op = &(*graph)[cut.new_vertex].aop.op;
459 op->clear_dst_extents();
460 StoreExtents(real_extents, op->mutable_dst_extents());
461 }
462 return true;
463 }
464
465 } // namespace
466
AssignTempBlocks(Graph * graph,const string & new_part,BlobFileWriter * blob_file,vector<Vertex::Index> * op_indexes,vector<vector<Vertex::Index>::size_type> * reverse_op_indexes,const vector<CutEdgeVertexes> & cuts)467 bool InplaceGenerator::AssignTempBlocks(
468 Graph* graph,
469 const string& new_part,
470 BlobFileWriter* blob_file,
471 vector<Vertex::Index>* op_indexes,
472 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
473 const vector<CutEdgeVertexes>& cuts) {
474 CHECK(!cuts.empty());
475
476 // group of cuts w/ the same old_dst:
477 vector<CutEdgeVertexes> cuts_group;
478
479 for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0; true;
480 --i) {
481 LOG(INFO) << "Fixing temp blocks in cut " << i
482 << ": old dst: " << cuts[i].old_dst
483 << " new vertex: " << cuts[i].new_vertex
484 << " path: " << (*graph)[cuts[i].old_dst].aop.name;
485
486 if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
487 cuts_group.push_back(cuts[i]);
488 } else {
489 CHECK(!cuts_group.empty());
490 TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
491 new_part,
492 blob_file,
493 op_indexes,
494 reverse_op_indexes,
495 cuts_group));
496 cuts_group.clear();
497 cuts_group.push_back(cuts[i]);
498 }
499
500 if (i == e) {
501 // break out of for() loop
502 break;
503 }
504 }
505 CHECK(!cuts_group.empty());
506 TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(
507 graph, new_part, blob_file, op_indexes, reverse_op_indexes, cuts_group));
508 return true;
509 }
510
NoTempBlocksRemain(const Graph & graph)511 bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) {
512 size_t idx = 0;
513 for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
514 ++it, ++idx) {
515 if (!it->valid)
516 continue;
517 const InstallOperation& op = it->aop.op;
518 if (TempBlocksExistInExtents(op.dst_extents()) ||
519 TempBlocksExistInExtents(op.src_extents())) {
520 LOG(INFO) << "bad extents in node " << idx;
521 LOG(INFO) << "so yeah";
522 return false;
523 }
524
525 // Check out-edges:
526 for (const auto& edge_prop_pair : it->out_edges) {
527 if (TempBlocksExistInExtents(edge_prop_pair.second.extents) ||
528 TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) {
529 LOG(INFO) << "bad out edge in node " << idx;
530 LOG(INFO) << "so yeah";
531 return false;
532 }
533 }
534 }
535 return true;
536 }
537
ConvertCutToFullOp(Graph * graph,const CutEdgeVertexes & cut,const string & new_part,BlobFileWriter * blob_file)538 bool InplaceGenerator::ConvertCutToFullOp(Graph* graph,
539 const CutEdgeVertexes& cut,
540 const string& new_part,
541 BlobFileWriter* blob_file) {
542 // Drop all incoming edges, keep all outgoing edges
543
544 // Keep all outgoing edges
545 if ((*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE_BZ &&
546 (*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE) {
547 Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
548 graph_utils::DropWriteBeforeDeps(&out_edges);
549
550 // Replace the operation with a REPLACE or REPLACE_BZ to generate the same
551 // |new_extents| list of blocks and update the graph.
552 vector<AnnotatedOperation> new_aop;
553 vector<Extent> new_extents;
554 ExtentsToVector((*graph)[cut.old_dst].aop.op.dst_extents(), &new_extents);
555 TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
556 &new_aop,
557 "", // old_part
558 new_part,
559 vector<Extent>(), // old_extents
560 new_extents,
561 {}, // old_deflates
562 {}, // new_deflates
563 (*graph)[cut.old_dst].aop.name,
564 -1, // chunk_blocks, forces to have a single operation.
565 kInPlacePayloadVersion,
566 blob_file));
567 TEST_AND_RETURN_FALSE(new_aop.size() == 1);
568 TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
569 graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
570
571 (*graph)[cut.old_dst].out_edges = out_edges;
572
573 // Right now we don't have doubly-linked edges, so we have to scan
574 // the whole graph.
575 graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
576 }
577
578 // Delete temp node
579 (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
580 CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
581 (*graph)[cut.old_dst].out_edges.end());
582 (*graph)[cut.new_vertex].valid = false;
583 LOG(INFO) << "marked node invalid: " << cut.new_vertex;
584 return true;
585 }
586
ConvertGraphToDag(Graph * graph,const string & new_part,BlobFileWriter * blob_file,vector<Vertex::Index> * final_order,Vertex::Index scratch_vertex)587 bool InplaceGenerator::ConvertGraphToDag(Graph* graph,
588 const string& new_part,
589 BlobFileWriter* blob_file,
590 vector<Vertex::Index>* final_order,
591 Vertex::Index scratch_vertex) {
592 CycleBreaker cycle_breaker;
593 LOG(INFO) << "Finding cycles...";
594 set<Edge> cut_edges;
595 cycle_breaker.BreakCycles(*graph, &cut_edges);
596 LOG(INFO) << "done finding cycles";
597 CheckGraph(*graph);
598
599 // Calculate number of scratch blocks needed
600
601 LOG(INFO) << "Cutting cycles...";
602 vector<CutEdgeVertexes> cuts;
603 TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
604 LOG(INFO) << "done cutting cycles";
605 LOG(INFO) << "There are " << cuts.size() << " cuts.";
606 CheckGraph(*graph);
607
608 LOG(INFO) << "Creating initial topological order...";
609 TopologicalSort(*graph, final_order);
610 LOG(INFO) << "done with initial topo order";
611 CheckGraph(*graph);
612
613 LOG(INFO) << "Moving full ops to the back";
614 MoveAndSortFullOpsToBack(graph, final_order);
615 LOG(INFO) << "done moving full ops to back";
616
617 vector<vector<Vertex::Index>::size_type> inverse_final_order;
618 GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
619
620 SortCutsByTopoOrder(*final_order, &cuts);
621
622 if (!cuts.empty())
623 TEST_AND_RETURN_FALSE(AssignTempBlocks(
624 graph, new_part, blob_file, final_order, &inverse_final_order, cuts));
625 LOG(INFO) << "Making sure all temp blocks have been allocated";
626
627 // Remove the scratch node, if any
628 if (scratch_vertex != Vertex::kInvalidIndex) {
629 final_order->erase(final_order->begin() +
630 inverse_final_order[scratch_vertex]);
631 (*graph)[scratch_vertex].valid = false;
632 GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
633 }
634
635 graph_utils::DumpGraph(*graph);
636 CHECK(NoTempBlocksRemain(*graph));
637 LOG(INFO) << "done making sure all temp blocks are allocated";
638 return true;
639 }
640
CreateScratchNode(uint64_t start_block,uint64_t num_blocks,Vertex * vertex)641 void InplaceGenerator::CreateScratchNode(uint64_t start_block,
642 uint64_t num_blocks,
643 Vertex* vertex) {
644 vertex->aop.name = "<scratch>";
645 vertex->aop.op.set_type(InstallOperation::REPLACE_BZ);
646 vertex->aop.op.set_data_offset(0);
647 vertex->aop.op.set_data_length(0);
648 Extent* extent = vertex->aop.op.add_dst_extents();
649 extent->set_start_block(start_block);
650 extent->set_num_blocks(num_blocks);
651 }
652
AddInstallOpToBlocksVector(const InstallOperation & operation,const Graph & graph,Vertex::Index vertex,vector<Block> * blocks)653 bool InplaceGenerator::AddInstallOpToBlocksVector(
654 const InstallOperation& operation,
655 const Graph& graph,
656 Vertex::Index vertex,
657 vector<Block>* blocks) {
658 // See if this is already present.
659 TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0);
660
661 enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT };
662 for (int field = READER; field < BLOCK_FIELD_COUNT; field++) {
663 const char* past_participle = (field == READER) ? "read" : "written";
664 const google::protobuf::RepeatedPtrField<Extent>& extents =
665 (field == READER) ? operation.src_extents() : operation.dst_extents();
666 Vertex::Index Block::*access_type =
667 (field == READER) ? &Block::reader : &Block::writer;
668
669 for (const Extent& extent : extents) {
670 for (uint64_t block = extent.start_block();
671 block < (extent.start_block() + extent.num_blocks());
672 block++) {
673 if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
674 LOG(FATAL) << "Block " << block << " is already " << past_participle
675 << " by " << (*blocks)[block].*access_type << "("
676 << graph[(*blocks)[block].*access_type].aop.name
677 << ") and also " << vertex << "(" << graph[vertex].aop.name
678 << ")";
679 }
680 (*blocks)[block].*access_type = vertex;
681 }
682 }
683 }
684 return true;
685 }
686
AddInstallOpToGraph(Graph * graph,Vertex::Index existing_vertex,vector<Block> * blocks,const InstallOperation & operation,const string & op_name)687 bool InplaceGenerator::AddInstallOpToGraph(Graph* graph,
688 Vertex::Index existing_vertex,
689 vector<Block>* blocks,
690 const InstallOperation& operation,
691 const string& op_name) {
692 Vertex::Index vertex = existing_vertex;
693 if (vertex == Vertex::kInvalidIndex) {
694 graph->emplace_back();
695 vertex = graph->size() - 1;
696 }
697 (*graph)[vertex].aop.op = operation;
698 CHECK((*graph)[vertex].aop.op.has_type());
699 (*graph)[vertex].aop.name = op_name;
700
701 if (blocks)
702 TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector(
703 (*graph)[vertex].aop.op, *graph, vertex, blocks));
704 return true;
705 }
706
ApplyMap(vector<uint64_t> * collection,const map<uint64_t,uint64_t> & the_map)707 void InplaceGenerator::ApplyMap(vector<uint64_t>* collection,
708 const map<uint64_t, uint64_t>& the_map) {
709 for (uint64_t& elem : *collection) {
710 const auto& map_it = the_map.find(elem);
711 if (map_it != the_map.end())
712 elem = map_it->second;
713 }
714 }
715
ResolveReadAfterWriteDependencies(const PartitionConfig & old_part,const PartitionConfig & new_part,uint64_t partition_size,size_t block_size,BlobFileWriter * blob_file,vector<AnnotatedOperation> * aops)716 bool InplaceGenerator::ResolveReadAfterWriteDependencies(
717 const PartitionConfig& old_part,
718 const PartitionConfig& new_part,
719 uint64_t partition_size,
720 size_t block_size,
721 BlobFileWriter* blob_file,
722 vector<AnnotatedOperation>* aops) {
723 // Convert the operations to the graph.
724 Graph graph;
725 CheckGraph(graph);
726 vector<Block> blocks(std::max(old_part.size, new_part.size) / block_size);
727 for (const auto& aop : *aops) {
728 AddInstallOpToGraph(
729 &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
730 }
731 CheckGraph(graph);
732
733 // Final scratch block (if there's space)
734 Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
735 if (blocks.size() < (partition_size / block_size)) {
736 scratch_vertex = graph.size();
737 graph.emplace_back();
738 size_t scratch_blocks = (partition_size / block_size) - blocks.size();
739 LOG(INFO) << "Added " << scratch_blocks << " scratch space blocks.";
740 CreateScratchNode(blocks.size(), scratch_blocks, &graph.back());
741 }
742 CheckGraph(graph);
743
744 LOG(INFO) << "Creating edges...";
745 CreateEdges(&graph, blocks);
746 LOG(INFO) << "Done creating edges";
747 CheckGraph(graph);
748
749 vector<Vertex::Index> final_order;
750 TEST_AND_RETURN_FALSE(ConvertGraphToDag(
751 &graph, new_part.path, blob_file, &final_order, scratch_vertex));
752
753 // Copy operations over to the |aops| vector in the final_order generated by
754 // the topological sort.
755 aops->clear();
756 aops->reserve(final_order.size());
757 for (const Vertex::Index vertex_index : final_order) {
758 const Vertex& vertex = graph[vertex_index];
759 aops->push_back(vertex.aop);
760 }
761 return true;
762 }
763
GenerateOperations(const PayloadGenerationConfig & config,const PartitionConfig & old_part,const PartitionConfig & new_part,BlobFileWriter * blob_file,vector<AnnotatedOperation> * aops)764 bool InplaceGenerator::GenerateOperations(const PayloadGenerationConfig& config,
765 const PartitionConfig& old_part,
766 const PartitionConfig& new_part,
767 BlobFileWriter* blob_file,
768 vector<AnnotatedOperation>* aops) {
769 TEST_AND_RETURN_FALSE(old_part.name == new_part.name);
770 TEST_AND_RETURN_FALSE(config.version.major == kInPlacePayloadVersion.major);
771 TEST_AND_RETURN_FALSE(config.version.minor == kInPlacePayloadVersion.minor);
772
773 ssize_t hard_chunk_blocks =
774 (config.hard_chunk_size == -1
775 ? -1
776 : config.hard_chunk_size / config.block_size);
777 size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size;
778 uint64_t partition_size = new_part.size;
779 if (new_part.name == kPartitionNameRoot)
780 partition_size = config.rootfs_partition_size;
781
782 LOG(INFO) << "Delta compressing " << new_part.name << " partition...";
783 TEST_AND_RETURN_FALSE(diff_utils::DeltaReadPartition(aops,
784 old_part,
785 new_part,
786 hard_chunk_blocks,
787 soft_chunk_blocks,
788 config.version,
789 blob_file));
790 LOG(INFO) << "Done reading " << new_part.name;
791
792 TEST_AND_RETURN_FALSE(ResolveReadAfterWriteDependencies(
793 old_part, new_part, partition_size, config.block_size, blob_file, aops));
794 LOG(INFO) << "Done reordering " << new_part.name;
795 return true;
796 }
797
798 }; // namespace chromeos_update_engine
799