1 //
2 // Copyright (C) 2015 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "update_engine/payload_generator/inplace_generator.h"
18
19 #include <algorithm>
20 #include <map>
21 #include <set>
22 #include <string>
23 #include <utility>
24 #include <vector>
25
26 #include "update_engine/common/utils.h"
27 #include "update_engine/payload_consumer/payload_constants.h"
28 #include "update_engine/payload_generator/cycle_breaker.h"
29 #include "update_engine/payload_generator/delta_diff_generator.h"
30 #include "update_engine/payload_generator/delta_diff_utils.h"
31 #include "update_engine/payload_generator/extent_ranges.h"
32 #include "update_engine/payload_generator/graph_types.h"
33 #include "update_engine/payload_generator/graph_utils.h"
34 #include "update_engine/payload_generator/topological_sort.h"
35 #include "update_engine/update_metadata.pb.h"
36
37 using std::make_pair;
38 using std::map;
39 using std::pair;
40 using std::set;
41 using std::string;
42 using std::vector;
43
44 namespace chromeos_update_engine {
45
46 using Block = InplaceGenerator::Block;
47
48 namespace {
49
50 // The only PayloadVersion supported by this implementation.
51 const PayloadVersion kInPlacePayloadVersion{kChromeOSMajorPayloadVersion,
52 kInPlaceMinorPayloadVersion};
53
54 // This class allocates non-existent temp blocks, starting from
55 // kTempBlockStart. Other code is responsible for converting these
56 // temp blocks into real blocks, as the client can't read or write to
57 // these blocks.
58 class DummyExtentAllocator {
59 public:
Allocate(const uint64_t block_count)60 vector<Extent> Allocate(const uint64_t block_count) {
61 vector<Extent> ret(1);
62 ret[0].set_start_block(next_block_);
63 ret[0].set_num_blocks(block_count);
64 next_block_ += block_count;
65 return ret;
66 }
67
68 private:
69 uint64_t next_block_{kTempBlockStart};
70 };
71
72 // Takes a vector of blocks and returns an equivalent vector of Extent
73 // objects.
CompressExtents(const vector<uint64_t> & blocks)74 vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
75 vector<Extent> new_extents;
76 for (uint64_t block : blocks) {
77 AppendBlockToExtents(&new_extents, block);
78 }
79 return new_extents;
80 }
81
82 // Helper class to compare two operations by start block of the first Extent in
83 // their destination extents given the index of the operations in the graph.
84 class IndexedInstallOperationsDstComparator {
85 public:
IndexedInstallOperationsDstComparator(Graph * graph)86 explicit IndexedInstallOperationsDstComparator(Graph* graph)
87 : graph_(graph) {}
88
89 // Compares the operations in the vertex a and b of graph_.
operator ()(size_t a,size_t b) const90 bool operator()(size_t a, size_t b) const {
91 return diff_utils::CompareAopsByDestination((*graph_)[a].aop,
92 (*graph_)[b].aop);
93 }
94
95 private:
96 const Graph* const graph_;
97 };
98
99 } // namespace
100
CheckGraph(const Graph & graph)101 void InplaceGenerator::CheckGraph(const Graph& graph) {
102 for (const Vertex& v : graph) {
103 CHECK(v.aop.op.has_type());
104 }
105 }
106
SubstituteBlocks(Vertex * vertex,const vector<Extent> & remove_extents,const vector<Extent> & replace_extents)107 void InplaceGenerator::SubstituteBlocks(
108 Vertex* vertex,
109 const vector<Extent>& remove_extents,
110 const vector<Extent>& replace_extents) {
111 // First, expand out the blocks that op reads from
112 vector<uint64_t> read_blocks =
113 ExpandExtents(vertex->aop.op.src_extents());
114 {
115 // Expand remove_extents and replace_extents
116 vector<uint64_t> remove_extents_expanded = ExpandExtents(remove_extents);
117 vector<uint64_t> replace_extents_expanded = ExpandExtents(replace_extents);
118 CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
119 map<uint64_t, uint64_t> conversion;
120 for (vector<uint64_t>::size_type i = 0;
121 i < replace_extents_expanded.size(); i++) {
122 conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
123 }
124 ApplyMap(&read_blocks, conversion);
125 for (auto& edge_prop_pair : vertex->out_edges) {
126 vector<uint64_t> write_before_deps_expanded = ExpandExtents(
127 edge_prop_pair.second.write_extents);
128 ApplyMap(&write_before_deps_expanded, conversion);
129 edge_prop_pair.second.write_extents =
130 CompressExtents(write_before_deps_expanded);
131 }
132 }
133 // Convert read_blocks back to extents
134 vertex->aop.op.clear_src_extents();
135 vector<Extent> new_extents = CompressExtents(read_blocks);
136 StoreExtents(new_extents, vertex->aop.op.mutable_src_extents());
137 }
138
CutEdges(Graph * graph,const set<Edge> & edges,vector<CutEdgeVertexes> * out_cuts)139 bool InplaceGenerator::CutEdges(Graph* graph,
140 const set<Edge>& edges,
141 vector<CutEdgeVertexes>* out_cuts) {
142 DummyExtentAllocator scratch_allocator;
143 vector<CutEdgeVertexes> cuts;
144 cuts.reserve(edges.size());
145
146 uint64_t scratch_blocks_used = 0;
147 for (const Edge& edge : edges) {
148 cuts.resize(cuts.size() + 1);
149 vector<Extent> old_extents =
150 (*graph)[edge.first].out_edges[edge.second].extents;
151 // Choose some scratch space
152 scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge);
153 cuts.back().tmp_extents =
154 scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
155 // create vertex to copy original->scratch
156 cuts.back().new_vertex = graph->size();
157 graph->emplace_back();
158 cuts.back().old_src = edge.first;
159 cuts.back().old_dst = edge.second;
160
161 EdgeProperties& cut_edge_properties =
162 (*graph)[edge.first].out_edges.find(edge.second)->second;
163
164 // This should never happen, as we should only be cutting edges between
165 // real file nodes, and write-before relationships are created from
166 // a real file node to a temp copy node:
167 CHECK(cut_edge_properties.write_extents.empty())
168 << "Can't cut edge that has write-before relationship.";
169
170 // make node depend on the copy operation
171 (*graph)[edge.first].out_edges.insert(make_pair(graph->size() - 1,
172 cut_edge_properties));
173
174 // Set src/dst extents and other proto variables for copy operation
175 graph->back().aop.op.set_type(InstallOperation::MOVE);
176 StoreExtents(cut_edge_properties.extents,
177 graph->back().aop.op.mutable_src_extents());
178 StoreExtents(cuts.back().tmp_extents,
179 graph->back().aop.op.mutable_dst_extents());
180 graph->back().aop.op.set_src_length(
181 graph_utils::EdgeWeight(*graph, edge) * kBlockSize);
182 graph->back().aop.op.set_dst_length(graph->back().aop.op.src_length());
183
184 // make the dest node read from the scratch space
185 SubstituteBlocks(
186 &((*graph)[edge.second]),
187 (*graph)[edge.first].out_edges[edge.second].extents,
188 cuts.back().tmp_extents);
189
190 // delete the old edge
191 CHECK_EQ(static_cast<Graph::size_type>(1),
192 (*graph)[edge.first].out_edges.erase(edge.second));
193
194 // Add an edge from dst to copy operation
195 EdgeProperties write_before_edge_properties;
196 write_before_edge_properties.write_extents = cuts.back().tmp_extents;
197 (*graph)[edge.second].out_edges.insert(
198 make_pair(graph->size() - 1, write_before_edge_properties));
199 }
200 out_cuts->swap(cuts);
201 return true;
202 }
203
204 // Creates all the edges for the graph. Writers of a block point to
205 // readers of the same block. This is because for an edge A->B, B
206 // must complete before A executes.
CreateEdges(Graph * graph,const vector<Block> & blocks)207 void InplaceGenerator::CreateEdges(
208 Graph* graph,
209 const vector<Block>& blocks) {
210 for (vector<Block>::size_type i = 0;
211 i < blocks.size(); i++) {
212 // Blocks with both a reader and writer get an edge
213 if (blocks[i].reader == Vertex::kInvalidIndex ||
214 blocks[i].writer == Vertex::kInvalidIndex)
215 continue;
216 // Don't have a node depend on itself
217 if (blocks[i].reader == blocks[i].writer)
218 continue;
219 // See if there's already an edge we can add onto
220 Vertex::EdgeMap::iterator edge_it =
221 (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
222 if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) {
223 // No existing edge. Create one
224 (*graph)[blocks[i].writer].out_edges.insert(
225 make_pair(blocks[i].reader, EdgeProperties()));
226 edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
227 CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
228 }
229 AppendBlockToExtents(&edge_it->second.extents, i);
230 }
231 }
232
233 namespace {
234
235 class SortCutsByTopoOrderLess {
236 public:
SortCutsByTopoOrderLess(const vector<vector<Vertex::Index>::size_type> & table)237 explicit SortCutsByTopoOrderLess(
238 const vector<vector<Vertex::Index>::size_type>& table)
239 : table_(table) {}
operator ()(const CutEdgeVertexes & a,const CutEdgeVertexes & b)240 bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
241 return table_[a.old_dst] < table_[b.old_dst];
242 }
243 private:
244 const vector<vector<Vertex::Index>::size_type>& table_;
245 };
246
247 } // namespace
248
GenerateReverseTopoOrderMap(const vector<Vertex::Index> & op_indexes,vector<vector<Vertex::Index>::size_type> * reverse_op_indexes)249 void InplaceGenerator::GenerateReverseTopoOrderMap(
250 const vector<Vertex::Index>& op_indexes,
251 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
252 vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
253 for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size();
254 i != e; ++i) {
255 Vertex::Index node = op_indexes[i];
256 if (table.size() < (node + 1)) {
257 table.resize(node + 1);
258 }
259 table[node] = i;
260 }
261 reverse_op_indexes->swap(table);
262 }
263
SortCutsByTopoOrder(const vector<Vertex::Index> & op_indexes,vector<CutEdgeVertexes> * cuts)264 void InplaceGenerator::SortCutsByTopoOrder(
265 const vector<Vertex::Index>& op_indexes,
266 vector<CutEdgeVertexes>* cuts) {
267 // first, make a reverse lookup table.
268 vector<vector<Vertex::Index>::size_type> table;
269 GenerateReverseTopoOrderMap(op_indexes, &table);
270 SortCutsByTopoOrderLess less(table);
271 sort(cuts->begin(), cuts->end(), less);
272 }
273
MoveAndSortFullOpsToBack(Graph * graph,vector<Vertex::Index> * op_indexes)274 void InplaceGenerator::MoveAndSortFullOpsToBack(
275 Graph* graph,
276 vector<Vertex::Index>* op_indexes) {
277 vector<Vertex::Index> ret;
278 vector<Vertex::Index> full_ops;
279 ret.reserve(op_indexes->size());
280 for (auto op_index : *op_indexes) {
281 InstallOperation_Type type = (*graph)[op_index].aop.op.type();
282 if (type == InstallOperation::REPLACE ||
283 type == InstallOperation::REPLACE_BZ) {
284 full_ops.push_back(op_index);
285 } else {
286 ret.push_back(op_index);
287 }
288 }
289 LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
290 << (full_ops.size() + ret.size()) << " total ops.";
291 // Sort full ops according to their dst_extents.
292 sort(full_ops.begin(), full_ops.end(),
293 IndexedInstallOperationsDstComparator(graph));
294 ret.insert(ret.end(), full_ops.begin(), full_ops.end());
295 op_indexes->swap(ret);
296 }
297
298 namespace {
299
300 template<typename T>
TempBlocksExistInExtents(const T & extents)301 bool TempBlocksExistInExtents(const T& extents) {
302 for (const auto& extent : extents) {
303 uint64_t start = extent.start_block();
304 uint64_t num = extent.num_blocks();
305 if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
306 LOG(ERROR) << "temp block!";
307 LOG(ERROR) << "start: " << start << ", num: " << num;
308 LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
309 LOG(ERROR) << "returning true";
310 return true;
311 }
312 // check for wrap-around, which would be a bug:
313 CHECK(start <= (start + num));
314 }
315 return false;
316 }
317
318 // Converts the cuts, which must all have the same |old_dst| member,
319 // to full. It does this by converting the |old_dst| to REPLACE or
320 // REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking
321 // all temp nodes invalid.
ConvertCutsToFull(Graph * graph,const string & new_part,BlobFileWriter * blob_file,vector<Vertex::Index> * op_indexes,vector<vector<Vertex::Index>::size_type> * reverse_op_indexes,const vector<CutEdgeVertexes> & cuts)322 bool ConvertCutsToFull(
323 Graph* graph,
324 const string& new_part,
325 BlobFileWriter* blob_file,
326 vector<Vertex::Index>* op_indexes,
327 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
328 const vector<CutEdgeVertexes>& cuts) {
329 CHECK(!cuts.empty());
330 set<Vertex::Index> deleted_nodes;
331 for (const CutEdgeVertexes& cut : cuts) {
332 TEST_AND_RETURN_FALSE(InplaceGenerator::ConvertCutToFullOp(
333 graph,
334 cut,
335 new_part,
336 blob_file));
337 deleted_nodes.insert(cut.new_vertex);
338 }
339 deleted_nodes.insert(cuts[0].old_dst);
340
341 vector<Vertex::Index> new_op_indexes;
342 new_op_indexes.reserve(op_indexes->size());
343 for (Vertex::Index vertex_index : *op_indexes) {
344 if (utils::SetContainsKey(deleted_nodes, vertex_index))
345 continue;
346 new_op_indexes.push_back(vertex_index);
347 }
348 new_op_indexes.push_back(cuts[0].old_dst);
349 op_indexes->swap(new_op_indexes);
350 InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes,
351 reverse_op_indexes);
352 return true;
353 }
354
355 // Tries to assign temp blocks for a collection of cuts, all of which share
356 // the same old_dst member. If temp blocks can't be found, old_dst will be
357 // converted to a REPLACE or REPLACE_BZ operation. Returns true on success,
358 // which can happen even if blocks are converted to full. Returns false
359 // on exceptional error cases.
AssignBlockForAdjoiningCuts(Graph * graph,const string & new_part,BlobFileWriter * blob_file,vector<Vertex::Index> * op_indexes,vector<vector<Vertex::Index>::size_type> * reverse_op_indexes,const vector<CutEdgeVertexes> & cuts)360 bool AssignBlockForAdjoiningCuts(
361 Graph* graph,
362 const string& new_part,
363 BlobFileWriter* blob_file,
364 vector<Vertex::Index>* op_indexes,
365 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
366 const vector<CutEdgeVertexes>& cuts) {
367 CHECK(!cuts.empty());
368 const Vertex::Index old_dst = cuts[0].old_dst;
369 // Calculate # of blocks needed
370 uint64_t blocks_needed = 0;
371 vector<uint64_t> cuts_blocks_needed(cuts.size());
372 for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
373 uint64_t cut_blocks_needed = 0;
374 for (const Extent& extent : cuts[i].tmp_extents) {
375 cut_blocks_needed += extent.num_blocks();
376 }
377 blocks_needed += cut_blocks_needed;
378 cuts_blocks_needed[i] = cut_blocks_needed;
379 }
380
381 // Find enough blocks
382 ExtentRanges scratch_ranges;
383 // Each block that's supplying temp blocks and the corresponding blocks:
384 typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector;
385 SupplierVector block_suppliers;
386 uint64_t scratch_blocks_found = 0;
387 for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
388 e = op_indexes->size(); i < e; ++i) {
389 Vertex::Index test_node = (*op_indexes)[i];
390 if (!(*graph)[test_node].valid)
391 continue;
392 // See if this node has sufficient blocks
393 ExtentRanges ranges;
394 ranges.AddRepeatedExtents((*graph)[test_node].aop.op.dst_extents());
395 ranges.SubtractExtent(ExtentForRange(
396 kTempBlockStart, kSparseHole - kTempBlockStart));
397 ranges.SubtractRepeatedExtents((*graph)[test_node].aop.op.src_extents());
398 // For now, for simplicity, subtract out all blocks in read-before
399 // dependencies.
400 for (Vertex::EdgeMap::const_iterator edge_i =
401 (*graph)[test_node].out_edges.begin(),
402 edge_e = (*graph)[test_node].out_edges.end();
403 edge_i != edge_e; ++edge_i) {
404 ranges.SubtractExtents(edge_i->second.extents);
405 }
406
407 // Prevent using the block 0 as scratch space due to crbug.com/480751.
408 if (ranges.ContainsBlock(0)) {
409 LOG(INFO) << "Removing block 0 from the selected scratch range in vertex "
410 << i;
411 ranges.SubtractBlock(0);
412 }
413
414 if (ranges.blocks() == 0)
415 continue;
416
417 if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
418 // trim down ranges
419 vector<Extent> new_ranges = ranges.GetExtentsForBlockCount(
420 blocks_needed - scratch_blocks_found);
421 ranges = ExtentRanges();
422 ranges.AddExtents(new_ranges);
423 }
424 scratch_ranges.AddRanges(ranges);
425 block_suppliers.push_back(make_pair(test_node, ranges));
426 scratch_blocks_found += ranges.blocks();
427 if (scratch_ranges.blocks() >= blocks_needed)
428 break;
429 }
430 if (scratch_ranges.blocks() < blocks_needed) {
431 LOG(INFO) << "Unable to find sufficient scratch";
432 TEST_AND_RETURN_FALSE(ConvertCutsToFull(graph,
433 new_part,
434 blob_file,
435 op_indexes,
436 reverse_op_indexes,
437 cuts));
438 return true;
439 }
440 // Use the scratch we found
441 TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found);
442
443 // Make all the suppliers depend on this node
444 for (const auto& index_range_pair : block_suppliers) {
445 graph_utils::AddReadBeforeDepExtents(
446 &(*graph)[index_range_pair.first],
447 old_dst,
448 index_range_pair.second.GetExtentsForBlockCount(
449 index_range_pair.second.blocks()));
450 }
451
452 // Replace temp blocks in each cut
453 for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
454 const CutEdgeVertexes& cut = cuts[i];
455 vector<Extent> real_extents =
456 scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]);
457 scratch_ranges.SubtractExtents(real_extents);
458
459 // Fix the old dest node w/ the real blocks
460 InplaceGenerator::SubstituteBlocks(&(*graph)[old_dst],
461 cut.tmp_extents,
462 real_extents);
463
464 // Fix the new node w/ the real blocks. Since the new node is just a
465 // copy operation, we can replace all the dest extents w/ the real
466 // blocks.
467 InstallOperation* op = &(*graph)[cut.new_vertex].aop.op;
468 op->clear_dst_extents();
469 StoreExtents(real_extents, op->mutable_dst_extents());
470 }
471 return true;
472 }
473
474 } // namespace
475
AssignTempBlocks(Graph * graph,const string & new_part,BlobFileWriter * blob_file,vector<Vertex::Index> * op_indexes,vector<vector<Vertex::Index>::size_type> * reverse_op_indexes,const vector<CutEdgeVertexes> & cuts)476 bool InplaceGenerator::AssignTempBlocks(
477 Graph* graph,
478 const string& new_part,
479 BlobFileWriter* blob_file,
480 vector<Vertex::Index>* op_indexes,
481 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
482 const vector<CutEdgeVertexes>& cuts) {
483 CHECK(!cuts.empty());
484
485 // group of cuts w/ the same old_dst:
486 vector<CutEdgeVertexes> cuts_group;
487
488 for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0;
489 true ; --i) {
490 LOG(INFO) << "Fixing temp blocks in cut " << i
491 << ": old dst: " << cuts[i].old_dst << " new vertex: "
492 << cuts[i].new_vertex << " path: "
493 << (*graph)[cuts[i].old_dst].aop.name;
494
495 if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
496 cuts_group.push_back(cuts[i]);
497 } else {
498 CHECK(!cuts_group.empty());
499 TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
500 new_part,
501 blob_file,
502 op_indexes,
503 reverse_op_indexes,
504 cuts_group));
505 cuts_group.clear();
506 cuts_group.push_back(cuts[i]);
507 }
508
509 if (i == e) {
510 // break out of for() loop
511 break;
512 }
513 }
514 CHECK(!cuts_group.empty());
515 TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
516 new_part,
517 blob_file,
518 op_indexes,
519 reverse_op_indexes,
520 cuts_group));
521 return true;
522 }
523
NoTempBlocksRemain(const Graph & graph)524 bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) {
525 size_t idx = 0;
526 for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
527 ++it, ++idx) {
528 if (!it->valid)
529 continue;
530 const InstallOperation& op = it->aop.op;
531 if (TempBlocksExistInExtents(op.dst_extents()) ||
532 TempBlocksExistInExtents(op.src_extents())) {
533 LOG(INFO) << "bad extents in node " << idx;
534 LOG(INFO) << "so yeah";
535 return false;
536 }
537
538 // Check out-edges:
539 for (const auto& edge_prop_pair : it->out_edges) {
540 if (TempBlocksExistInExtents(edge_prop_pair.second.extents) ||
541 TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) {
542 LOG(INFO) << "bad out edge in node " << idx;
543 LOG(INFO) << "so yeah";
544 return false;
545 }
546 }
547 }
548 return true;
549 }
550
ConvertCutToFullOp(Graph * graph,const CutEdgeVertexes & cut,const string & new_part,BlobFileWriter * blob_file)551 bool InplaceGenerator::ConvertCutToFullOp(Graph* graph,
552 const CutEdgeVertexes& cut,
553 const string& new_part,
554 BlobFileWriter* blob_file) {
555 // Drop all incoming edges, keep all outgoing edges
556
557 // Keep all outgoing edges
558 if ((*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE_BZ &&
559 (*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE) {
560 Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
561 graph_utils::DropWriteBeforeDeps(&out_edges);
562
563 // Replace the operation with a REPLACE or REPLACE_BZ to generate the same
564 // |new_extents| list of blocks and update the graph.
565 vector<AnnotatedOperation> new_aop;
566 vector<Extent> new_extents;
567 ExtentsToVector((*graph)[cut.old_dst].aop.op.dst_extents(),
568 &new_extents);
569 TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
570 &new_aop,
571 "", // old_part
572 new_part,
573 vector<Extent>(), // old_extents
574 new_extents,
575 {}, // old_deflates
576 {}, // new_deflates
577 (*graph)[cut.old_dst].aop.name,
578 -1, // chunk_blocks, forces to have a single operation.
579 kInPlacePayloadVersion,
580 blob_file));
581 TEST_AND_RETURN_FALSE(new_aop.size() == 1);
582 TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
583 graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
584
585 (*graph)[cut.old_dst].out_edges = out_edges;
586
587 // Right now we don't have doubly-linked edges, so we have to scan
588 // the whole graph.
589 graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
590 }
591
592 // Delete temp node
593 (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
594 CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
595 (*graph)[cut.old_dst].out_edges.end());
596 (*graph)[cut.new_vertex].valid = false;
597 LOG(INFO) << "marked node invalid: " << cut.new_vertex;
598 return true;
599 }
600
ConvertGraphToDag(Graph * graph,const string & new_part,BlobFileWriter * blob_file,vector<Vertex::Index> * final_order,Vertex::Index scratch_vertex)601 bool InplaceGenerator::ConvertGraphToDag(Graph* graph,
602 const string& new_part,
603 BlobFileWriter* blob_file,
604 vector<Vertex::Index>* final_order,
605 Vertex::Index scratch_vertex) {
606 CycleBreaker cycle_breaker;
607 LOG(INFO) << "Finding cycles...";
608 set<Edge> cut_edges;
609 cycle_breaker.BreakCycles(*graph, &cut_edges);
610 LOG(INFO) << "done finding cycles";
611 CheckGraph(*graph);
612
613 // Calculate number of scratch blocks needed
614
615 LOG(INFO) << "Cutting cycles...";
616 vector<CutEdgeVertexes> cuts;
617 TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
618 LOG(INFO) << "done cutting cycles";
619 LOG(INFO) << "There are " << cuts.size() << " cuts.";
620 CheckGraph(*graph);
621
622 LOG(INFO) << "Creating initial topological order...";
623 TopologicalSort(*graph, final_order);
624 LOG(INFO) << "done with initial topo order";
625 CheckGraph(*graph);
626
627 LOG(INFO) << "Moving full ops to the back";
628 MoveAndSortFullOpsToBack(graph, final_order);
629 LOG(INFO) << "done moving full ops to back";
630
631 vector<vector<Vertex::Index>::size_type> inverse_final_order;
632 GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
633
634 SortCutsByTopoOrder(*final_order, &cuts);
635
636 if (!cuts.empty())
637 TEST_AND_RETURN_FALSE(AssignTempBlocks(graph,
638 new_part,
639 blob_file,
640 final_order,
641 &inverse_final_order,
642 cuts));
643 LOG(INFO) << "Making sure all temp blocks have been allocated";
644
645 // Remove the scratch node, if any
646 if (scratch_vertex != Vertex::kInvalidIndex) {
647 final_order->erase(final_order->begin() +
648 inverse_final_order[scratch_vertex]);
649 (*graph)[scratch_vertex].valid = false;
650 GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
651 }
652
653 graph_utils::DumpGraph(*graph);
654 CHECK(NoTempBlocksRemain(*graph));
655 LOG(INFO) << "done making sure all temp blocks are allocated";
656 return true;
657 }
658
CreateScratchNode(uint64_t start_block,uint64_t num_blocks,Vertex * vertex)659 void InplaceGenerator::CreateScratchNode(uint64_t start_block,
660 uint64_t num_blocks,
661 Vertex* vertex) {
662 vertex->aop.name = "<scratch>";
663 vertex->aop.op.set_type(InstallOperation::REPLACE_BZ);
664 vertex->aop.op.set_data_offset(0);
665 vertex->aop.op.set_data_length(0);
666 Extent* extent = vertex->aop.op.add_dst_extents();
667 extent->set_start_block(start_block);
668 extent->set_num_blocks(num_blocks);
669 }
670
AddInstallOpToBlocksVector(const InstallOperation & operation,const Graph & graph,Vertex::Index vertex,vector<Block> * blocks)671 bool InplaceGenerator::AddInstallOpToBlocksVector(
672 const InstallOperation& operation,
673 const Graph& graph,
674 Vertex::Index vertex,
675 vector<Block>* blocks) {
676 // See if this is already present.
677 TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0);
678
679 enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT };
680 for (int field = READER; field < BLOCK_FIELD_COUNT; field++) {
681 const char* past_participle = (field == READER) ? "read" : "written";
682 const google::protobuf::RepeatedPtrField<Extent>& extents =
683 (field == READER) ? operation.src_extents() : operation.dst_extents();
684 Vertex::Index Block::*access_type = (field == READER) ?
685 &Block::reader : &Block::writer;
686
687 for (const Extent& extent : extents) {
688 for (uint64_t block = extent.start_block();
689 block < (extent.start_block() + extent.num_blocks()); block++) {
690 if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
691 LOG(FATAL) << "Block " << block << " is already "
692 << past_participle << " by "
693 << (*blocks)[block].*access_type << "("
694 << graph[(*blocks)[block].*access_type].aop.name
695 << ") and also " << vertex << "("
696 << graph[vertex].aop.name << ")";
697 }
698 (*blocks)[block].*access_type = vertex;
699 }
700 }
701 }
702 return true;
703 }
704
AddInstallOpToGraph(Graph * graph,Vertex::Index existing_vertex,vector<Block> * blocks,const InstallOperation & operation,const string & op_name)705 bool InplaceGenerator::AddInstallOpToGraph(Graph* graph,
706 Vertex::Index existing_vertex,
707 vector<Block>* blocks,
708 const InstallOperation& operation,
709 const string& op_name) {
710 Vertex::Index vertex = existing_vertex;
711 if (vertex == Vertex::kInvalidIndex) {
712 graph->emplace_back();
713 vertex = graph->size() - 1;
714 }
715 (*graph)[vertex].aop.op = operation;
716 CHECK((*graph)[vertex].aop.op.has_type());
717 (*graph)[vertex].aop.name = op_name;
718
719 if (blocks)
720 TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector(
721 (*graph)[vertex].aop.op,
722 *graph,
723 vertex,
724 blocks));
725 return true;
726 }
727
ApplyMap(vector<uint64_t> * collection,const map<uint64_t,uint64_t> & the_map)728 void InplaceGenerator::ApplyMap(vector<uint64_t>* collection,
729 const map<uint64_t, uint64_t>& the_map) {
730 for (uint64_t& elem : *collection) {
731 const auto& map_it = the_map.find(elem);
732 if (map_it != the_map.end())
733 elem = map_it->second;
734 }
735 }
736
ResolveReadAfterWriteDependencies(const PartitionConfig & old_part,const PartitionConfig & new_part,uint64_t partition_size,size_t block_size,BlobFileWriter * blob_file,vector<AnnotatedOperation> * aops)737 bool InplaceGenerator::ResolveReadAfterWriteDependencies(
738 const PartitionConfig& old_part,
739 const PartitionConfig& new_part,
740 uint64_t partition_size,
741 size_t block_size,
742 BlobFileWriter* blob_file,
743 vector<AnnotatedOperation>* aops) {
744 // Convert the operations to the graph.
745 Graph graph;
746 CheckGraph(graph);
747 vector<Block> blocks(std::max(old_part.size, new_part.size) / block_size);
748 for (const auto& aop : *aops) {
749 AddInstallOpToGraph(
750 &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
751 }
752 CheckGraph(graph);
753
754 // Final scratch block (if there's space)
755 Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
756 if (blocks.size() < (partition_size / block_size)) {
757 scratch_vertex = graph.size();
758 graph.emplace_back();
759 size_t scratch_blocks = (partition_size / block_size) - blocks.size();
760 LOG(INFO) << "Added " << scratch_blocks << " scratch space blocks.";
761 CreateScratchNode(blocks.size(), scratch_blocks, &graph.back());
762 }
763 CheckGraph(graph);
764
765 LOG(INFO) << "Creating edges...";
766 CreateEdges(&graph, blocks);
767 LOG(INFO) << "Done creating edges";
768 CheckGraph(graph);
769
770 vector<Vertex::Index> final_order;
771 TEST_AND_RETURN_FALSE(ConvertGraphToDag(
772 &graph,
773 new_part.path,
774 blob_file,
775 &final_order,
776 scratch_vertex));
777
778 // Copy operations over to the |aops| vector in the final_order generated by
779 // the topological sort.
780 aops->clear();
781 aops->reserve(final_order.size());
782 for (const Vertex::Index vertex_index : final_order) {
783 const Vertex& vertex = graph[vertex_index];
784 aops->push_back(vertex.aop);
785 }
786 return true;
787 }
788
GenerateOperations(const PayloadGenerationConfig & config,const PartitionConfig & old_part,const PartitionConfig & new_part,BlobFileWriter * blob_file,vector<AnnotatedOperation> * aops)789 bool InplaceGenerator::GenerateOperations(
790 const PayloadGenerationConfig& config,
791 const PartitionConfig& old_part,
792 const PartitionConfig& new_part,
793 BlobFileWriter* blob_file,
794 vector<AnnotatedOperation>* aops) {
795 TEST_AND_RETURN_FALSE(old_part.name == new_part.name);
796 TEST_AND_RETURN_FALSE(config.version.major == kInPlacePayloadVersion.major);
797 TEST_AND_RETURN_FALSE(config.version.minor == kInPlacePayloadVersion.minor);
798
799 ssize_t hard_chunk_blocks = (config.hard_chunk_size == -1 ? -1 :
800 config.hard_chunk_size / config.block_size);
801 size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size;
802 uint64_t partition_size = new_part.size;
803 if (new_part.name == kLegacyPartitionNameRoot)
804 partition_size = config.rootfs_partition_size;
805
806 LOG(INFO) << "Delta compressing " << new_part.name << " partition...";
807 TEST_AND_RETURN_FALSE(diff_utils::DeltaReadPartition(aops,
808 old_part,
809 new_part,
810 hard_chunk_blocks,
811 soft_chunk_blocks,
812 config.version,
813 blob_file));
814 LOG(INFO) << "Done reading " << new_part.name;
815
816 TEST_AND_RETURN_FALSE(ResolveReadAfterWriteDependencies(
817 old_part, new_part, partition_size, config.block_size, blob_file, aops));
818 LOG(INFO) << "Done reordering " << new_part.name;
819 return true;
820 }
821
822 }; // namespace chromeos_update_engine
823