• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/basic-block-instrumentor.h"
6 
7 #include <sstream>
8 
9 #include "src/codegen/optimized-compilation-info.h"
10 #include "src/compiler/common-operator.h"
11 #include "src/compiler/graph.h"
12 #include "src/compiler/machine-operator.h"
13 #include "src/compiler/node.h"
14 #include "src/compiler/operator-properties.h"
15 #include "src/compiler/schedule.h"
16 #include "src/objects/objects-inl.h"
17 
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21 
22 // Find the first place to insert new nodes in a block that's already been
23 // scheduled that won't upset the register allocator.
FindInsertionPoint(BasicBlock * block)24 static NodeVector::iterator FindInsertionPoint(BasicBlock* block) {
25   NodeVector::iterator i = block->begin();
26   for (; i != block->end(); ++i) {
27     const Operator* op = (*i)->op();
28     if (OperatorProperties::IsBasicBlockBegin(op)) continue;
29     switch (op->opcode()) {
30       case IrOpcode::kParameter:
31       case IrOpcode::kPhi:
32       case IrOpcode::kEffectPhi:
33         continue;
34     }
35     break;
36   }
37   return i;
38 }
39 
IntPtrConstant(CommonOperatorBuilder * common,intptr_t value)40 static const Operator* IntPtrConstant(CommonOperatorBuilder* common,
41                                       intptr_t value) {
42   return kSystemPointerSize == 8
43              ? common->Int64Constant(value)
44              : common->Int32Constant(static_cast<int32_t>(value));
45 }
46 
47 // TODO(dcarney): need to mark code as non-serializable.
PointerConstant(CommonOperatorBuilder * common,const void * ptr)48 static const Operator* PointerConstant(CommonOperatorBuilder* common,
49                                        const void* ptr) {
50   intptr_t ptr_as_int = reinterpret_cast<intptr_t>(ptr);
51   return IntPtrConstant(common, ptr_as_int);
52 }
53 
Instrument(OptimizedCompilationInfo * info,Graph * graph,Schedule * schedule,Isolate * isolate)54 BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
55     OptimizedCompilationInfo* info, Graph* graph, Schedule* schedule,
56     Isolate* isolate) {
57   // Basic block profiling disables concurrent compilation, so handle deref is
58   // fine.
59   AllowHandleDereference allow_handle_dereference;
60   // Skip the exit block in profiles, since the register allocator can't handle
61   // it and entry into it means falling off the end of the function anyway.
62   size_t n_blocks = schedule->RpoBlockCount() - 1;
63   BasicBlockProfilerData* data = BasicBlockProfiler::Get()->NewData(n_blocks);
64   // Set the function name.
65   data->SetFunctionName(info->GetDebugName());
66   // Capture the schedule string before instrumentation.
67   if (FLAG_turbo_profiling_verbose) {
68     std::ostringstream os;
69     os << *schedule;
70     data->SetSchedule(os);
71   }
72   // Check whether we should write counts to a JS heap object or to the
73   // BasicBlockProfilerData directly. The JS heap object is only used for
74   // builtins.
75   bool on_heap_counters = isolate && isolate->IsGeneratingEmbeddedBuiltins();
76   // Add the increment instructions to the start of every block.
77   CommonOperatorBuilder common(graph->zone());
78   MachineOperatorBuilder machine(graph->zone());
79   Node* counters_array = nullptr;
80   if (on_heap_counters) {
81     // Allocation is disallowed here, so rather than referring to an actual
82     // counters array, create a reference to a special marker object. This
83     // object will get fixed up later in the constants table (see
84     // PatchBasicBlockCountersReference). An important and subtle point: we
85     // cannot use the root handle basic_block_counters_marker_handle() and must
86     // create a new separate handle. Otherwise
87     // TurboAssemblerBase::IndirectLoadConstant would helpfully emit a
88     // root-relative load rather than putting this value in the constants table
89     // where we expect it to be for patching.
90     counters_array = graph->NewNode(common.HeapConstant(Handle<HeapObject>::New(
91         ReadOnlyRoots(isolate).basic_block_counters_marker(), isolate)));
92   } else {
93     counters_array = graph->NewNode(PointerConstant(&common, data->counts()));
94   }
95   Node* zero = graph->NewNode(common.Int32Constant(0));
96   Node* one = graph->NewNode(common.Int32Constant(1));
97   BasicBlockVector* blocks = schedule->rpo_order();
98   size_t block_number = 0;
99   for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
100        ++it, ++block_number) {
101     BasicBlock* block = (*it);
102     // Iteration is already in reverse post-order.
103     DCHECK_EQ(block->rpo_number(), block_number);
104     data->SetBlockId(block_number, block->id().ToInt());
105     // It is unnecessary to wire effect and control deps for load and store
106     // since this happens after scheduling.
107     // Construct increment operation.
108     int offset_to_counter_value = static_cast<int>(block_number) * kInt32Size;
109     if (on_heap_counters) {
110       offset_to_counter_value += ByteArray::kHeaderSize - kHeapObjectTag;
111     }
112     Node* offset_to_counter =
113         graph->NewNode(IntPtrConstant(&common, offset_to_counter_value));
114     Node* load =
115         graph->NewNode(machine.Load(MachineType::Uint32()), counters_array,
116                        offset_to_counter, graph->start(), graph->start());
117     Node* inc = graph->NewNode(machine.Int32Add(), load, one);
118 
119     // Branchless saturation, because we've already run the scheduler, so
120     // introducing extra control flow here would be surprising.
121     Node* overflow = graph->NewNode(machine.Uint32LessThan(), inc, load);
122     Node* overflow_mask = graph->NewNode(machine.Int32Sub(), zero, overflow);
123     Node* saturated_inc =
124         graph->NewNode(machine.Word32Or(), inc, overflow_mask);
125 
126     Node* store =
127         graph->NewNode(machine.Store(StoreRepresentation(
128                            MachineRepresentation::kWord32, kNoWriteBarrier)),
129                        counters_array, offset_to_counter, saturated_inc,
130                        graph->start(), graph->start());
131     // Insert the new nodes.
132     static const int kArraySize = 10;
133     Node* to_insert[kArraySize] = {
134         counters_array, zero, one,      offset_to_counter,
135         load,           inc,  overflow, overflow_mask,
136         saturated_inc,  store};
137     // The first three Nodes are constant across all blocks.
138     int insertion_start = block_number == 0 ? 0 : 3;
139     NodeVector::iterator insertion_point = FindInsertionPoint(block);
140     block->InsertNodes(insertion_point, &to_insert[insertion_start],
141                        &to_insert[kArraySize]);
142     // Tell the scheduler about the new nodes.
143     for (int i = insertion_start; i < kArraySize; ++i) {
144       schedule->SetBlockForNode(block, to_insert[i]);
145     }
146   }
147   return data;
148 }
149 
150 }  // namespace compiler
151 }  // namespace internal
152 }  // namespace v8
153