1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/basic-block-instrumentor.h"
6
7 #include <sstream>
8
9 #include "src/codegen/optimized-compilation-info.h"
10 #include "src/compiler/common-operator.h"
11 #include "src/compiler/graph.h"
12 #include "src/compiler/machine-operator.h"
13 #include "src/compiler/node.h"
14 #include "src/compiler/operator-properties.h"
15 #include "src/compiler/schedule.h"
16 #include "src/objects/objects-inl.h"
17
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21
22 // Find the first place to insert new nodes in a block that's already been
23 // scheduled that won't upset the register allocator.
FindInsertionPoint(BasicBlock * block)24 static NodeVector::iterator FindInsertionPoint(BasicBlock* block) {
25 NodeVector::iterator i = block->begin();
26 for (; i != block->end(); ++i) {
27 const Operator* op = (*i)->op();
28 if (OperatorProperties::IsBasicBlockBegin(op)) continue;
29 switch (op->opcode()) {
30 case IrOpcode::kParameter:
31 case IrOpcode::kPhi:
32 case IrOpcode::kEffectPhi:
33 continue;
34 }
35 break;
36 }
37 return i;
38 }
39
IntPtrConstant(CommonOperatorBuilder * common,intptr_t value)40 static const Operator* IntPtrConstant(CommonOperatorBuilder* common,
41 intptr_t value) {
42 return kSystemPointerSize == 8
43 ? common->Int64Constant(value)
44 : common->Int32Constant(static_cast<int32_t>(value));
45 }
46
47 // TODO(dcarney): need to mark code as non-serializable.
PointerConstant(CommonOperatorBuilder * common,const void * ptr)48 static const Operator* PointerConstant(CommonOperatorBuilder* common,
49 const void* ptr) {
50 intptr_t ptr_as_int = reinterpret_cast<intptr_t>(ptr);
51 return IntPtrConstant(common, ptr_as_int);
52 }
53
Instrument(OptimizedCompilationInfo * info,Graph * graph,Schedule * schedule,Isolate * isolate)54 BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
55 OptimizedCompilationInfo* info, Graph* graph, Schedule* schedule,
56 Isolate* isolate) {
57 // Basic block profiling disables concurrent compilation, so handle deref is
58 // fine.
59 AllowHandleDereference allow_handle_dereference;
60 // Skip the exit block in profiles, since the register allocator can't handle
61 // it and entry into it means falling off the end of the function anyway.
62 size_t n_blocks = schedule->RpoBlockCount() - 1;
63 BasicBlockProfilerData* data = BasicBlockProfiler::Get()->NewData(n_blocks);
64 // Set the function name.
65 data->SetFunctionName(info->GetDebugName());
66 // Capture the schedule string before instrumentation.
67 if (FLAG_turbo_profiling_verbose) {
68 std::ostringstream os;
69 os << *schedule;
70 data->SetSchedule(os);
71 }
72 // Check whether we should write counts to a JS heap object or to the
73 // BasicBlockProfilerData directly. The JS heap object is only used for
74 // builtins.
75 bool on_heap_counters = isolate && isolate->IsGeneratingEmbeddedBuiltins();
76 // Add the increment instructions to the start of every block.
77 CommonOperatorBuilder common(graph->zone());
78 MachineOperatorBuilder machine(graph->zone());
79 Node* counters_array = nullptr;
80 if (on_heap_counters) {
81 // Allocation is disallowed here, so rather than referring to an actual
82 // counters array, create a reference to a special marker object. This
83 // object will get fixed up later in the constants table (see
84 // PatchBasicBlockCountersReference). An important and subtle point: we
85 // cannot use the root handle basic_block_counters_marker_handle() and must
86 // create a new separate handle. Otherwise
87 // TurboAssemblerBase::IndirectLoadConstant would helpfully emit a
88 // root-relative load rather than putting this value in the constants table
89 // where we expect it to be for patching.
90 counters_array = graph->NewNode(common.HeapConstant(Handle<HeapObject>::New(
91 ReadOnlyRoots(isolate).basic_block_counters_marker(), isolate)));
92 } else {
93 counters_array = graph->NewNode(PointerConstant(&common, data->counts()));
94 }
95 Node* one = graph->NewNode(common.Int32Constant(1));
96 BasicBlockVector* blocks = schedule->rpo_order();
97 size_t block_number = 0;
98 for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
99 ++it, ++block_number) {
100 BasicBlock* block = (*it);
101 // Iteration is already in reverse post-order.
102 DCHECK_EQ(block->rpo_number(), block_number);
103 data->SetBlockId(block_number, block->id().ToInt());
104 // It is unnecessary to wire effect and control deps for load and store
105 // since this happens after scheduling.
106 // Construct increment operation.
107 int offset_to_counter_value = static_cast<int>(block_number) * kInt32Size;
108 if (on_heap_counters) {
109 offset_to_counter_value += ByteArray::kHeaderSize - kHeapObjectTag;
110 }
111 Node* offset_to_counter =
112 graph->NewNode(IntPtrConstant(&common, offset_to_counter_value));
113 Node* load =
114 graph->NewNode(machine.Load(MachineType::Uint32()), counters_array,
115 offset_to_counter, graph->start(), graph->start());
116 Node* inc = graph->NewNode(machine.Int32Add(), load, one);
117 Node* store = graph->NewNode(
118 machine.Store(StoreRepresentation(MachineRepresentation::kWord32,
119 kNoWriteBarrier)),
120 counters_array, offset_to_counter, inc, graph->start(), graph->start());
121 // Insert the new nodes.
122 static const int kArraySize = 6;
123 Node* to_insert[kArraySize] = {counters_array, one, offset_to_counter,
124 load, inc, store};
125 // The first two Nodes are constant across all blocks.
126 int insertion_start = block_number == 0 ? 0 : 2;
127 NodeVector::iterator insertion_point = FindInsertionPoint(block);
128 block->InsertNodes(insertion_point, &to_insert[insertion_start],
129 &to_insert[kArraySize]);
130 // Tell the scheduler about the new nodes.
131 for (int i = insertion_start; i < kArraySize; ++i) {
132 schedule->SetBlockForNode(block, to_insert[i]);
133 }
134 }
135 return data;
136 }
137
138 } // namespace compiler
139 } // namespace internal
140 } // namespace v8
141