• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "perfetto/ext/trace_processor/importers/memory_tracker/graph_processor.h"
18 
19 #include <list>
20 
21 namespace perfetto {
22 namespace trace_processor {
23 
24 using Edge = GlobalNodeGraph::Edge;
25 using Node = GlobalNodeGraph::Node;
26 using Process = GlobalNodeGraph::Process;
27 
28 namespace {
29 
30 const char kSharedMemoryRootNode[] = "shared_memory";
31 const char kSizeEntryName[] = "size";
32 const char kEffectiveSizeEntryName[] = "effective_size";
33 
EntryUnitsFromString(const std::string & units)34 Node::Entry::ScalarUnits EntryUnitsFromString(const std::string& units) {
35   if (units == RawMemoryGraphNode::kUnitsBytes) {
36     return Node::Entry::ScalarUnits::kBytes;
37   } else if (units == RawMemoryGraphNode::kUnitsObjects) {
38     return Node::Entry::ScalarUnits::kObjects;
39   } else {
40     // Invalid units so we just return a value of the correct type.
41     return Node::Entry::ScalarUnits::kObjects;
42   }
43 }
44 
GetSizeEntryOfNode(Node * node)45 std::optional<uint64_t> GetSizeEntryOfNode(Node* node) {
46   auto size_it = node->entries()->find(kSizeEntryName);
47   if (size_it == node->entries()->end())
48     return std::nullopt;
49 
50   PERFETTO_DCHECK(size_it->second.type == Node::Entry::Type::kUInt64);
51   PERFETTO_DCHECK(size_it->second.units == Node::Entry::ScalarUnits::kBytes);
52   return std::optional<uint64_t>(size_it->second.value_uint64);
53 }
54 
55 }  // namespace
56 
57 // static
CreateMemoryGraph(const GraphProcessor::RawMemoryNodeMap & process_nodes)58 std::unique_ptr<GlobalNodeGraph> GraphProcessor::CreateMemoryGraph(
59     const GraphProcessor::RawMemoryNodeMap& process_nodes) {
60   auto global_graph = std::unique_ptr<GlobalNodeGraph>(new GlobalNodeGraph());
61 
62   // First pass: collects allocator nodes into a graph and populate
63   // with entries.
64   for (const auto& pid_to_node : process_nodes) {
65     // There can be null entries in the map; simply filter these out.
66     if (!pid_to_node.second)
67       continue;
68 
69     auto* graph = global_graph->CreateGraphForProcess(pid_to_node.first);
70     CollectAllocatorNodes(*pid_to_node.second, global_graph.get(), graph);
71   }
72 
73   // Second pass: generate the graph of edges between the nodes.
74   for (const auto& pid_to_node : process_nodes) {
75     // There can be null entries in the map; simply filter these out.
76     if (!pid_to_node.second)
77       continue;
78 
79     AddEdges(*pid_to_node.second, global_graph.get());
80   }
81 
82   return global_graph;
83 }
84 
85 // static
RemoveWeakNodesFromGraph(GlobalNodeGraph * global_graph)86 void GraphProcessor::RemoveWeakNodesFromGraph(GlobalNodeGraph* global_graph) {
87   auto* global_root = global_graph->shared_memory_graph()->root();
88 
89   // Third pass: mark recursively nodes as weak if they don't have an associated
90   // node and all their children are weak.
91   MarkImplicitWeakParentsRecursively(global_root);
92   for (const auto& pid_to_process : global_graph->process_node_graphs()) {
93     MarkImplicitWeakParentsRecursively(pid_to_process.second->root());
94   }
95 
96   // Fourth pass: recursively mark nodes as weak if they own a node which is
97   // weak or if they have a parent who is weak.
98   {
99     std::set<const Node*> visited;
100     MarkWeakOwnersAndChildrenRecursively(global_root, &visited);
101     for (const auto& pid_to_process : global_graph->process_node_graphs()) {
102       MarkWeakOwnersAndChildrenRecursively(pid_to_process.second->root(),
103                                            &visited);
104     }
105   }
106 
107   // Fifth pass: remove all nodes which are weak (including their descendants)
108   // and clean owned by edges to match.
109   RemoveWeakNodesRecursively(global_root);
110   for (const auto& pid_to_process : global_graph->process_node_graphs()) {
111     RemoveWeakNodesRecursively(pid_to_process.second->root());
112   }
113 }
114 
115 // static
AddOverheadsAndPropagateEntries(GlobalNodeGraph * global_graph)116 void GraphProcessor::AddOverheadsAndPropagateEntries(
117     GlobalNodeGraph* global_graph) {
118   // Sixth pass: account for tracing overhead in system memory allocators.
119   for (auto& pid_to_process : global_graph->process_node_graphs()) {
120     Process* process = pid_to_process.second.get();
121     if (process->FindNode("winheap")) {
122       AssignTracingOverhead("winheap", global_graph,
123                             pid_to_process.second.get());
124     } else if (process->FindNode("malloc")) {
125       AssignTracingOverhead("malloc", global_graph,
126                             pid_to_process.second.get());
127     }
128   }
129 
130   // Seventh pass: aggregate non-size integer entries into parents and propagate
131   // string and int entries for shared graph.
132   auto* global_root = global_graph->shared_memory_graph()->root();
133   AggregateNumericsRecursively(global_root);
134   PropagateNumericsAndDiagnosticsRecursively(global_root);
135   for (auto& pid_to_process : global_graph->process_node_graphs()) {
136     AggregateNumericsRecursively(pid_to_process.second->root());
137   }
138 }
139 
140 // static
CalculateSizesForGraph(GlobalNodeGraph * global_graph)141 void GraphProcessor::CalculateSizesForGraph(GlobalNodeGraph* global_graph) {
142   // Eighth pass: calculate the size field for nodes by considering the sizes
143   // of their children and owners.
144   {
145     auto it = global_graph->VisitInDepthFirstPostOrder();
146     while (Node* node = it.next()) {
147       CalculateSizeForNode(node);
148     }
149   }
150 
151   // Ninth pass: Calculate not-owned and not-owning sub-sizes of all nodes.
152   {
153     auto it = global_graph->VisitInDepthFirstPostOrder();
154     while (Node* node = it.next()) {
155       CalculateNodeSubSizes(node);
156     }
157   }
158 
159   // Tenth pass: Calculate owned and owning coefficients of owned and owner
160   // nodes.
161   {
162     auto it = global_graph->VisitInDepthFirstPostOrder();
163     while (Node* node = it.next()) {
164       CalculateNodeOwnershipCoefficient(node);
165     }
166   }
167 
168   // Eleventh pass: Calculate cumulative owned and owning coefficients of all
169   // nodes.
170   {
171     auto it = global_graph->VisitInDepthFirstPreOrder();
172     while (Node* node = it.next()) {
173       CalculateNodeCumulativeOwnershipCoefficient(node);
174     }
175   }
176 
177   // Twelfth pass: Calculate the effective sizes of all nodes.
178   {
179     auto it = global_graph->VisitInDepthFirstPostOrder();
180     while (Node* node = it.next()) {
181       CalculateNodeEffectiveSize(node);
182     }
183   }
184 }
185 
186 // static
187 std::map<base::PlatformProcessId, uint64_t>
ComputeSharedFootprintFromGraph(const GlobalNodeGraph & global_graph)188 GraphProcessor::ComputeSharedFootprintFromGraph(
189     const GlobalNodeGraph& global_graph) {
190   // Go through all nodes associated with global nodes and find if they are
191   // owned by shared memory nodes.
192   Node* global_root =
193       global_graph.shared_memory_graph()->root()->GetChild("global");
194 
195   // If there are no global nodes then just return an empty map with no data.
196   if (!global_root)
197     return std::map<base::PlatformProcessId, uint64_t>();
198 
199   struct GlobalNodeOwners {
200     std::list<Edge*> edges;
201     int max_priority = 0;
202   };
203 
204   std::map<Node*, GlobalNodeOwners> global_node_to_shared_owners;
205   for (const auto& path_to_child : *global_root->children()) {
206     // The path of this node is something like "global/foo".
207     Node* global_node = path_to_child.second;
208 
209     // If there's no size to attribute, there's no point in propagating
210     // anything.
211     if (global_node->entries()->count(kSizeEntryName) == 0)
212       continue;
213 
214     for (auto* edge : *global_node->owned_by_edges()) {
215       // Find if the source node's path starts with "shared_memory/" which
216       // indcates shared memory.
217       Node* source_root = edge->source()->node_graph()->root();
218       const Node* current = edge->source();
219       PERFETTO_DCHECK(current != source_root);
220 
221       // Traverse up until we hit the point where |current| holds a node which
222       // is the child of |source_root|.
223       while (current->parent() != source_root)
224         current = current->parent();
225 
226       // If the source is indeed a shared memory node, add the edge to the map.
227       if (source_root->GetChild(kSharedMemoryRootNode) == current) {
228         GlobalNodeOwners* owners = &global_node_to_shared_owners[global_node];
229         owners->edges.push_back(edge);
230         owners->max_priority = std::max(owners->max_priority, edge->priority());
231       }
232     }
233   }
234 
235   // Go through the map and leave only the edges which have the maximum
236   // priority.
237   for (auto& global_to_shared_edges : global_node_to_shared_owners) {
238     int max_priority = global_to_shared_edges.second.max_priority;
239     global_to_shared_edges.second.edges.remove_if(
240         [max_priority](Edge* edge) { return edge->priority() < max_priority; });
241   }
242 
243   // Compute the footprints by distributing the memory of the nodes
244   // among the processes which have edges left.
245   std::map<base::PlatformProcessId, uint64_t> pid_to_shared_footprint;
246   for (const auto& global_to_shared_edges : global_node_to_shared_owners) {
247     Node* node = global_to_shared_edges.first;
248     const auto& edges = global_to_shared_edges.second.edges;
249 
250     const Node::Entry& size_entry =
251         node->entries()->find(kSizeEntryName)->second;
252     PERFETTO_DCHECK(size_entry.type == Node::Entry::kUInt64);
253 
254     uint64_t size_per_process = size_entry.value_uint64 / edges.size();
255     for (auto* edge : edges) {
256       base::PlatformProcessId pid = edge->source()->node_graph()->pid();
257       pid_to_shared_footprint[pid] += size_per_process;
258     }
259   }
260 
261   return pid_to_shared_footprint;
262 }
263 
264 // static
CollectAllocatorNodes(const RawProcessMemoryNode & source,GlobalNodeGraph * global_graph,Process * process_graph)265 void GraphProcessor::CollectAllocatorNodes(const RawProcessMemoryNode& source,
266                                            GlobalNodeGraph* global_graph,
267                                            Process* process_graph) {
268   // Turn each node into a node in the graph of nodes in the appropriate
269   // process node or global node.
270   for (const auto& path_to_node : source.allocator_nodes()) {
271     const std::string& path = path_to_node.first;
272     const RawMemoryGraphNode& raw_node = *path_to_node.second;
273 
274     // All global nodes (i.e. those starting with global/) should be redirected
275     // to the shared graph.
276     bool is_global = base::StartsWith(path, "global/");
277     Process* process =
278         is_global ? global_graph->shared_memory_graph() : process_graph;
279 
280     Node* node;
281     auto node_iterator = global_graph->nodes_by_id().find(raw_node.id());
282     if (node_iterator == global_graph->nodes_by_id().end()) {
283       // Storing whether the process is weak here will allow for later
284       // computations on whether or not the node should be removed.
285       bool is_weak = raw_node.flags() & RawMemoryGraphNode::Flags::kWeak;
286       node = process->CreateNode(raw_node.id(), path, is_weak);
287     } else {
288       node = node_iterator->second;
289 
290       PERFETTO_DCHECK(node == process->FindNode(path));
291 
292       PERFETTO_DCHECK(is_global);
293     }
294 
295     // Copy any entries not already present into the node.
296     for (auto& entry : raw_node.entries()) {
297       switch (entry.entry_type) {
298         case RawMemoryGraphNode::MemoryNodeEntry::EntryType::kUint64:
299           node->AddEntry(entry.name, EntryUnitsFromString(entry.units),
300                          entry.value_uint64);
301           break;
302         case RawMemoryGraphNode::MemoryNodeEntry::EntryType::kString:
303           node->AddEntry(entry.name, entry.value_string);
304           break;
305       }
306     }
307   }
308 }
309 
310 // static
AddEdges(const RawProcessMemoryNode & source,GlobalNodeGraph * global_graph)311 void GraphProcessor::AddEdges(const RawProcessMemoryNode& source,
312                               GlobalNodeGraph* global_graph) {
313   const auto& nodes_by_id = global_graph->nodes_by_id();
314   for (const auto& id_to_edge : source.allocator_nodes_edges()) {
315     auto& edge = id_to_edge.second;
316 
317     // Find the source and target nodes in the global map by id.
318     auto source_it = nodes_by_id.find(edge->source);
319     auto target_it = nodes_by_id.find(edge->target);
320 
321     if (source_it == nodes_by_id.end()) {
322       // If the source is missing then simply pretend the edge never existed
323       // leading to the memory being allocated to the target (if it exists).
324       continue;
325     } else if (target_it == nodes_by_id.end()) {
326       // If the target is lost but the source is present, then also ignore
327       // this edge for now.
328       // TODO(lalitm): see crbug.com/770712 for the permanent fix for this
329       // issue.
330       continue;
331     } else {
332       // Add an edge indicating the source node owns the memory of the
333       // target node with the given importance of the edge.
334       global_graph->AddNodeOwnershipEdge(source_it->second, target_it->second,
335                                          edge->importance);
336     }
337   }
338 }
339 
340 // static
MarkImplicitWeakParentsRecursively(Node * node)341 void GraphProcessor::MarkImplicitWeakParentsRecursively(Node* node) {
342   // Ensure that we aren't in a bad state where we have an implicit node
343   // which doesn't have any children (which is not the root node).
344   PERFETTO_DCHECK(node->is_explicit() || !node->children()->empty() ||
345                   !node->parent());
346 
347   // Check that at this stage, any node which is weak is only so because
348   // it was explicitly created as such.
349   PERFETTO_DCHECK(!node->is_weak() || node->is_explicit());
350 
351   // If a node is already weak then all children will be marked weak at a
352   // later stage.
353   if (node->is_weak())
354     return;
355 
356   // Recurse into each child and find out if all the children of this node are
357   // weak.
358   bool all_children_weak = true;
359   for (const auto& path_to_child : *node->children()) {
360     MarkImplicitWeakParentsRecursively(path_to_child.second);
361     all_children_weak = all_children_weak && path_to_child.second->is_weak();
362   }
363 
364   // If all the children are weak and the parent is only an implicit one then we
365   // consider the parent as weak as well and we will later remove it.
366   node->set_weak(!node->is_explicit() && all_children_weak);
367 }
368 
369 // static
MarkWeakOwnersAndChildrenRecursively(Node * node,std::set<const Node * > * visited)370 void GraphProcessor::MarkWeakOwnersAndChildrenRecursively(
371     Node* node,
372     std::set<const Node*>* visited) {
373   // If we've already visited this node then nothing to do.
374   if (visited->count(node) != 0)
375     return;
376 
377   // If we haven't visited the node which this node owns then wait for that.
378   if (node->owns_edge() && visited->count(node->owns_edge()->target()) == 0)
379     return;
380 
381   // If we haven't visited the node's parent then wait for that.
382   if (node->parent() && visited->count(node->parent()) == 0)
383     return;
384 
385   // If either the node we own or our parent is weak, then mark this node
386   // as weak.
387   if ((node->owns_edge() && node->owns_edge()->target()->is_weak()) ||
388       (node->parent() && node->parent()->is_weak())) {
389     node->set_weak(true);
390   }
391   visited->insert(node);
392 
393   // Recurse into each owner node to mark any other nodes.
394   for (auto* owned_by_edge : *node->owned_by_edges()) {
395     MarkWeakOwnersAndChildrenRecursively(owned_by_edge->source(), visited);
396   }
397 
398   // Recurse into each child and find out if all the children of this node are
399   // weak.
400   for (const auto& path_to_child : *node->children()) {
401     MarkWeakOwnersAndChildrenRecursively(path_to_child.second, visited);
402   }
403 }
404 
405 // static
RemoveWeakNodesRecursively(Node * node)406 void GraphProcessor::RemoveWeakNodesRecursively(Node* node) {
407   auto* children = node->children();
408   for (auto child_it = children->begin(); child_it != children->end();) {
409     Node* child = child_it->second;
410 
411     // If the node is weak, remove it. This automatically makes all
412     // descendents unreachable from the parents. If this node owned
413     // by another, it will have been marked earlier in
414     // |MarkWeakOwnersAndChildrenRecursively| and so will be removed
415     // by this method at some point.
416     if (child->is_weak()) {
417       child_it = children->erase(child_it);
418       continue;
419     }
420 
421     // We should never be in a situation where we're about to
422     // keep a node which owns a weak node (which will be/has been
423     // removed).
424     PERFETTO_DCHECK(!child->owns_edge() ||
425                     !child->owns_edge()->target()->is_weak());
426 
427     // Descend and remove all weak child nodes.
428     RemoveWeakNodesRecursively(child);
429 
430     // Remove all edges with owner nodes which are weak.
431     std::vector<Edge*>* owned_by_edges = child->owned_by_edges();
432     auto new_end =
433         std::remove_if(owned_by_edges->begin(), owned_by_edges->end(),
434                        [](Edge* edge) { return edge->source()->is_weak(); });
435     owned_by_edges->erase(new_end, owned_by_edges->end());
436 
437     ++child_it;
438   }
439 }
440 
441 // static
AssignTracingOverhead(const std::string & allocator,GlobalNodeGraph * global_graph,Process * process)442 void GraphProcessor::AssignTracingOverhead(const std::string& allocator,
443                                            GlobalNodeGraph* global_graph,
444                                            Process* process) {
445   // This method should only be called if the allocator node exists.
446   PERFETTO_DCHECK(process->FindNode(allocator));
447 
448   // Check that the tracing node exists and isn't already owning another node.
449   Node* tracing_node = process->FindNode("tracing");
450   if (!tracing_node)
451     return;
452 
453   // This should be first edge associated with the tracing node.
454   PERFETTO_DCHECK(!tracing_node->owns_edge());
455 
456   // Create the node under the allocator to which tracing overhead can be
457   // assigned.
458   std::string child_name = allocator + "/allocated_objects/tracing_overhead";
459   Node* child_node = process->CreateNode(MemoryAllocatorNodeId(), child_name,
460                                          false /* weak */);
461 
462   // Assign the overhead of tracing to the tracing node.
463   global_graph->AddNodeOwnershipEdge(tracing_node, child_node,
464                                      0 /* importance */);
465 }
466 
467 // static
AggregateNumericWithNameForNode(Node * node,const std::string & name)468 Node::Entry GraphProcessor::AggregateNumericWithNameForNode(
469     Node* node,
470     const std::string& name) {
471   bool first = true;
472   Node::Entry::ScalarUnits units = Node::Entry::ScalarUnits::kObjects;
473   uint64_t aggregated = 0;
474   for (auto& path_to_child : *node->children()) {
475     auto* entries = path_to_child.second->entries();
476 
477     // Retrieve the entry with the given column name.
478     auto name_to_entry_it = entries->find(name);
479     if (name_to_entry_it == entries->end())
480       continue;
481 
482     // Extract the entry from the iterator.
483     const Node::Entry& entry = name_to_entry_it->second;
484 
485     // Ensure that the entry is numeric.
486     PERFETTO_DCHECK(entry.type == Node::Entry::Type::kUInt64);
487 
488     // Check that the units of every child's entry with the given name is the
489     // same (i.e. we don't get a number for one child and size for another
490     // child). We do this by having a DCHECK that the units match the first
491     // child's units.
492     PERFETTO_DCHECK(first || units == entry.units);
493     units = entry.units;
494     aggregated += entry.value_uint64;
495     first = false;
496   }
497   return Node::Entry(units, aggregated);
498 }
499 
500 // static
AggregateNumericsRecursively(Node * node)501 void GraphProcessor::AggregateNumericsRecursively(Node* node) {
502   std::set<std::string> numeric_names;
503 
504   for (const auto& path_to_child : *node->children()) {
505     AggregateNumericsRecursively(path_to_child.second);
506     for (const auto& name_to_entry : *path_to_child.second->entries()) {
507       const std::string& name = name_to_entry.first;
508       if (name_to_entry.second.type == Node::Entry::Type::kUInt64 &&
509           name != kSizeEntryName && name != kEffectiveSizeEntryName) {
510         numeric_names.insert(name);
511       }
512     }
513   }
514 
515   for (auto& name : numeric_names) {
516     node->entries()->emplace(name, AggregateNumericWithNameForNode(node, name));
517   }
518 }
519 
520 // static
PropagateNumericsAndDiagnosticsRecursively(Node * node)521 void GraphProcessor::PropagateNumericsAndDiagnosticsRecursively(Node* node) {
522   for (const auto& name_to_entry : *node->entries()) {
523     for (auto* edge : *node->owned_by_edges()) {
524       edge->source()->entries()->insert(name_to_entry);
525     }
526   }
527   for (const auto& path_to_child : *node->children()) {
528     PropagateNumericsAndDiagnosticsRecursively(path_to_child.second);
529   }
530 }
531 
532 // static
AggregateSizeForDescendantNode(Node * root,Node * descendant)533 std::optional<uint64_t> GraphProcessor::AggregateSizeForDescendantNode(
534     Node* root,
535     Node* descendant) {
536   Edge* owns_edge = descendant->owns_edge();
537   if (owns_edge && owns_edge->target()->IsDescendentOf(*root))
538     return std::make_optional(0UL);
539 
540   if (descendant->children()->empty())
541     return GetSizeEntryOfNode(descendant).value_or(0ul);
542 
543   std::optional<uint64_t> size;
544   for (const auto& path_to_child : *descendant->children()) {
545     auto c_size = AggregateSizeForDescendantNode(root, path_to_child.second);
546     if (size) {
547       *size += c_size.value_or(0);
548     } else {
549       size = std::move(c_size);
550     }
551   }
552   return size;
553 }
554 
555 // Assumes that this function has been called on all children and owner nodes.
556 // static
CalculateSizeForNode(Node * node)557 void GraphProcessor::CalculateSizeForNode(Node* node) {
558   // Get the size at the root node if it exists.
559   std::optional<uint64_t> node_size = GetSizeEntryOfNode(node);
560 
561   // Aggregate the size of all the child nodes.
562   std::optional<uint64_t> aggregated_size;
563   for (const auto& path_to_child : *node->children()) {
564     auto c_size = AggregateSizeForDescendantNode(node, path_to_child.second);
565     if (aggregated_size) {
566       *aggregated_size += c_size.value_or(0ul);
567     } else {
568       aggregated_size = std::move(c_size);
569     }
570   }
571 
572   // Check that if both aggregated and node sizes exist that the node size
573   // is bigger than the aggregated.
574   // TODO(lalitm): the following condition is triggered very often even though
575   // it is a warning in JS code. Find a way to add the warning to display in UI
576   // or to fix all instances where this is violated and then enable this check.
577   // PERFETTO_DCHECK(!node_size || !aggregated_size || *node_size >=
578   // *aggregated_size);
579 
580   // Calculate the maximal size of an owner node.
581   std::optional<uint64_t> max_owner_size;
582   for (auto* edge : *node->owned_by_edges()) {
583     auto o_size = GetSizeEntryOfNode(edge->source());
584     if (max_owner_size) {
585       *max_owner_size = std::max(o_size.value_or(0ul), *max_owner_size);
586     } else {
587       max_owner_size = std::move(o_size);
588     }
589   }
590 
591   // Check that if both owner and node sizes exist that the node size
592   // is bigger than the owner.
593   // TODO(lalitm): the following condition is triggered very often even though
594   // it is a warning in JS code. Find a way to add the warning to display in UI
595   // or to fix all instances where this is violated and then enable this check.
596   // PERFETTO_DCHECK(!node_size || !max_owner_size || *node_size >=
597   // *max_owner_size);
598 
599   // Clear out any existing size entry which may exist.
600   node->entries()->erase(kSizeEntryName);
601 
602   // If no inference about size can be made then simply return.
603   if (!node_size && !aggregated_size && !max_owner_size)
604     return;
605 
606   // Update the node with the new size entry.
607   uint64_t aggregated_size_value = aggregated_size.value_or(0ul);
608   uint64_t process_size =
609       std::max({node_size.value_or(0ul), aggregated_size_value,
610                 max_owner_size.value_or(0ul)});
611   node->AddEntry(kSizeEntryName, Node::Entry::ScalarUnits::kBytes,
612                  process_size);
613 
614   // If this is an intermediate node then add a ghost node which stores
615   // all sizes not accounted for by the children.
616   uint64_t unaccounted = process_size - aggregated_size_value;
617   if (unaccounted > 0 && !node->children()->empty()) {
618     Node* unspecified = node->CreateChild("<unspecified>");
619     unspecified->AddEntry(kSizeEntryName, Node::Entry::ScalarUnits::kBytes,
620                           unaccounted);
621   }
622 }
623 
624 // Assumes that this function has been called on all children and owner nodes.
625 // static
CalculateNodeSubSizes(Node * node)626 void GraphProcessor::CalculateNodeSubSizes(Node* node) {
627   // Completely skip nodes with undefined size.
628   std::optional<uint64_t> size_opt = GetSizeEntryOfNode(node);
629   if (!size_opt)
630     return;
631 
632   // If the node is a leaf node, then both sub-sizes are equal to the size.
633   if (node->children()->empty()) {
634     node->add_not_owning_sub_size(*size_opt);
635     node->add_not_owned_sub_size(*size_opt);
636     return;
637   }
638 
639   // Calculate this node's not-owning sub-size by summing up the not-owning
640   // sub-sizes of children which do not own another node.
641   for (const auto& path_to_child : *node->children()) {
642     if (path_to_child.second->owns_edge())
643       continue;
644     node->add_not_owning_sub_size(path_to_child.second->not_owning_sub_size());
645   }
646 
647   // Calculate this node's not-owned sub-size.
648   for (const auto& path_to_child : *node->children()) {
649     Node* child = path_to_child.second;
650 
651     // If the child node is not owned, then add its not-owned sub-size.
652     if (child->owned_by_edges()->empty()) {
653       node->add_not_owned_sub_size(child->not_owned_sub_size());
654       continue;
655     }
656 
657     // If the child node is owned, then add the difference between its size
658     // and the largest owner.
659     uint64_t largest_owner_size = 0;
660     for (Edge* edge : *child->owned_by_edges()) {
661       uint64_t source_size = GetSizeEntryOfNode(edge->source()).value_or(0);
662       largest_owner_size = std::max(largest_owner_size, source_size);
663     }
664     uint64_t child_size = GetSizeEntryOfNode(child).value_or(0);
665     node->add_not_owned_sub_size(child_size - largest_owner_size);
666   }
667 }
668 
669 // static
CalculateNodeOwnershipCoefficient(Node * node)670 void GraphProcessor::CalculateNodeOwnershipCoefficient(Node* node) {
671   // Completely skip nodes with undefined size.
672   std::optional<uint64_t> size_opt = GetSizeEntryOfNode(node);
673   if (!size_opt)
674     return;
675 
676   // We only need to consider owned nodes.
677   if (node->owned_by_edges()->empty())
678     return;
679 
680   // Sort the owners in decreasing order of ownership priority and
681   // increasing order of not-owning sub-size (in case of equal priority).
682   std::vector<Edge*> owners = *node->owned_by_edges();
683   std::sort(owners.begin(), owners.end(), [](Edge* a, Edge* b) {
684     if (a->priority() == b->priority()) {
685       return a->source()->not_owning_sub_size() <
686              b->source()->not_owning_sub_size();
687     }
688     return b->priority() < a->priority();
689   });
690 
691   // Loop over the list of owners and distribute the owned node's not-owned
692   // sub-size among them according to their ownership priority and
693   // not-owning sub-size.
694   uint64_t already_attributed_sub_size = 0;
695   for (auto current_it = owners.begin(); current_it != owners.end();) {
696     // Find the position of the first owner with lower priority.
697     int current_priority = (*current_it)->priority();
698     auto next_it =
699         std::find_if(current_it, owners.end(), [current_priority](Edge* edge) {
700           return edge->priority() < current_priority;
701         });
702 
703     // Compute the number of nodes which have the same priority as current.
704     size_t difference = static_cast<size_t>(std::distance(current_it, next_it));
705 
706     // Visit the owners with the same priority in increasing order of
707     // not-owned sub-size, split the owned memory among them appropriately,
708     // and calculate their owning coefficients.
709     double attributed_not_owning_sub_size = 0;
710     for (; current_it != next_it; current_it++) {
711       uint64_t not_owning_sub_size =
712           (*current_it)->source()->not_owning_sub_size();
713       if (not_owning_sub_size > already_attributed_sub_size) {
714         attributed_not_owning_sub_size +=
715             static_cast<double>(not_owning_sub_size -
716                                 already_attributed_sub_size) /
717             static_cast<double>(difference);
718         already_attributed_sub_size = not_owning_sub_size;
719       }
720 
721       if (not_owning_sub_size != 0) {
722         double coeff = attributed_not_owning_sub_size /
723                        static_cast<double>(not_owning_sub_size);
724         (*current_it)->source()->set_owning_coefficient(coeff);
725       }
726       difference--;
727     }
728 
729     // At the end of this loop, we should move to a node with a lower priority.
730     PERFETTO_DCHECK(current_it == next_it);
731   }
732 
733   // Attribute the remainder of the owned node's not-owned sub-size to
734   // the node itself and calculate its owned coefficient.
735   uint64_t not_owned_sub_size = node->not_owned_sub_size();
736   if (not_owned_sub_size != 0) {
737     double remainder_sub_size =
738         static_cast<double>(not_owned_sub_size - already_attributed_sub_size);
739     node->set_owned_coefficient(remainder_sub_size /
740                                 static_cast<double>(not_owned_sub_size));
741   }
742 }
743 
744 // static
CalculateNodeCumulativeOwnershipCoefficient(Node * node)745 void GraphProcessor::CalculateNodeCumulativeOwnershipCoefficient(Node* node) {
746   // Completely skip nodes with undefined size.
747   std::optional<uint64_t> size_opt = GetSizeEntryOfNode(node);
748   if (!size_opt)
749     return;
750 
751   double cumulative_owned_coefficient = node->owned_coefficient();
752   if (node->parent()) {
753     cumulative_owned_coefficient *=
754         node->parent()->cumulative_owned_coefficient();
755   }
756   node->set_cumulative_owned_coefficient(cumulative_owned_coefficient);
757 
758   if (node->owns_edge()) {
759     node->set_cumulative_owning_coefficient(
760         node->owning_coefficient() *
761         node->owns_edge()->target()->cumulative_owning_coefficient());
762   } else if (node->parent()) {
763     node->set_cumulative_owning_coefficient(
764         node->parent()->cumulative_owning_coefficient());
765   } else {
766     node->set_cumulative_owning_coefficient(1);
767   }
768 }
769 
770 // static
CalculateNodeEffectiveSize(Node * node)771 void GraphProcessor::CalculateNodeEffectiveSize(Node* node) {
772   // Completely skip nodes with undefined size. As a result, each node will
773   // have defined effective size if and only if it has defined size.
774   std::optional<uint64_t> size_opt = GetSizeEntryOfNode(node);
775   if (!size_opt) {
776     node->entries()->erase(kEffectiveSizeEntryName);
777     return;
778   }
779 
780   uint64_t effective_size = 0;
781   if (node->children()->empty()) {
782     // Leaf node.
783     effective_size = static_cast<uint64_t>(
784         static_cast<double>(*size_opt) * node->cumulative_owning_coefficient() *
785         node->cumulative_owned_coefficient());
786   } else {
787     // Non-leaf node.
788     for (const auto& path_to_child : *node->children()) {
789       Node* child = path_to_child.second;
790       if (!GetSizeEntryOfNode(child))
791         continue;
792       effective_size +=
793           child->entries()->find(kEffectiveSizeEntryName)->second.value_uint64;
794     }
795   }
796   node->AddEntry(kEffectiveSizeEntryName, Node::Entry::ScalarUnits::kBytes,
797                  effective_size);
798 }
799 
800 }  // namespace trace_processor
801 }  // namespace perfetto
802