• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/interpreter/bytecode-array-writer.h"
6 
7 #include "src/api-inl.h"
8 #include "src/interpreter/bytecode-jump-table.h"
9 #include "src/interpreter/bytecode-label.h"
10 #include "src/interpreter/bytecode-node.h"
11 #include "src/interpreter/bytecode-register.h"
12 #include "src/interpreter/bytecode-source-info.h"
13 #include "src/interpreter/constant-array-builder.h"
14 #include "src/log.h"
15 #include "src/objects-inl.h"
16 
17 namespace v8 {
18 namespace internal {
19 namespace interpreter {
20 
21 STATIC_CONST_MEMBER_DEFINITION const size_t
22     BytecodeArrayWriter::kMaxSizeOfPackedBytecode;
23 
BytecodeArrayWriter(Zone * zone,ConstantArrayBuilder * constant_array_builder,SourcePositionTableBuilder::RecordingMode source_position_mode)24 BytecodeArrayWriter::BytecodeArrayWriter(
25     Zone* zone, ConstantArrayBuilder* constant_array_builder,
26     SourcePositionTableBuilder::RecordingMode source_position_mode)
27     : bytecodes_(zone),
28       unbound_jumps_(0),
29       source_position_table_builder_(source_position_mode),
30       constant_array_builder_(constant_array_builder),
31       last_bytecode_(Bytecode::kIllegal),
32       last_bytecode_offset_(0),
33       last_bytecode_had_source_info_(false),
34       elide_noneffectful_bytecodes_(FLAG_ignition_elide_noneffectful_bytecodes),
35       exit_seen_in_block_(false) {
36   bytecodes_.reserve(512);  // Derived via experimentation.
37 }
38 
ToBytecodeArray(Isolate * isolate,int register_count,int parameter_count,Handle<ByteArray> handler_table)39 Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
40     Isolate* isolate, int register_count, int parameter_count,
41     Handle<ByteArray> handler_table) {
42   DCHECK_EQ(0, unbound_jumps_);
43 
44   int bytecode_size = static_cast<int>(bytecodes()->size());
45   int frame_size = register_count * kPointerSize;
46   Handle<FixedArray> constant_pool =
47       constant_array_builder()->ToFixedArray(isolate);
48   Handle<ByteArray> source_position_table =
49       source_position_table_builder()->ToSourcePositionTable(isolate);
50   Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray(
51       bytecode_size, &bytecodes()->front(), frame_size, parameter_count,
52       constant_pool);
53   bytecode_array->set_handler_table(*handler_table);
54   bytecode_array->set_source_position_table(*source_position_table);
55   LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(
56                               bytecode_array->GetFirstBytecodeAddress(),
57                               *source_position_table));
58   return bytecode_array;
59 }
60 
Write(BytecodeNode * node)61 void BytecodeArrayWriter::Write(BytecodeNode* node) {
62   DCHECK(!Bytecodes::IsJump(node->bytecode()));
63 
64   if (exit_seen_in_block_) return;  // Don't emit dead code.
65   UpdateExitSeenInBlock(node->bytecode());
66   MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
67 
68   UpdateSourcePositionTable(node);
69   EmitBytecode(node);
70 }
71 
WriteJump(BytecodeNode * node,BytecodeLabel * label)72 void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
73   DCHECK(Bytecodes::IsJump(node->bytecode()));
74 
75   // TODO(rmcilroy): For forward jumps we could also mark the label as dead,
76   // thereby avoiding emitting dead code when we bind the label.
77   if (exit_seen_in_block_) return;  // Don't emit dead code.
78   UpdateExitSeenInBlock(node->bytecode());
79   MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
80 
81   UpdateSourcePositionTable(node);
82   EmitJump(node, label);
83 }
84 
WriteSwitch(BytecodeNode * node,BytecodeJumpTable * jump_table)85 void BytecodeArrayWriter::WriteSwitch(BytecodeNode* node,
86                                       BytecodeJumpTable* jump_table) {
87   DCHECK(Bytecodes::IsSwitch(node->bytecode()));
88 
89   // TODO(rmcilroy): For jump tables we could also mark the table as dead,
90   // thereby avoiding emitting dead code when we bind the entries.
91   if (exit_seen_in_block_) return;  // Don't emit dead code.
92   UpdateExitSeenInBlock(node->bytecode());
93   MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
94 
95   UpdateSourcePositionTable(node);
96   EmitSwitch(node, jump_table);
97 }
98 
BindLabel(BytecodeLabel * label)99 void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
100   size_t current_offset = bytecodes()->size();
101   if (label->is_forward_target()) {
102     // An earlier jump instruction refers to this label. Update it's location.
103     PatchJump(current_offset, label->offset());
104     // Now treat as if the label will only be back referred to.
105   }
106   label->bind_to(current_offset);
107   InvalidateLastBytecode();
108   exit_seen_in_block_ = false;  // Starting a new basic block.
109 }
110 
BindLabel(const BytecodeLabel & target,BytecodeLabel * label)111 void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target,
112                                     BytecodeLabel* label) {
113   DCHECK(!label->is_bound());
114   DCHECK(target.is_bound());
115   if (label->is_forward_target()) {
116     // An earlier jump instruction refers to this label. Update it's location.
117     PatchJump(target.offset(), label->offset());
118     // Now treat as if the label will only be back referred to.
119   }
120   label->bind_to(target.offset());
121   InvalidateLastBytecode();
122   // exit_seen_in_block_ was reset when target was bound, so shouldn't be
123   // changed here.
124 }
125 
BindJumpTableEntry(BytecodeJumpTable * jump_table,int case_value)126 void BytecodeArrayWriter::BindJumpTableEntry(BytecodeJumpTable* jump_table,
127                                              int case_value) {
128   DCHECK(!jump_table->is_bound(case_value));
129 
130   size_t current_offset = bytecodes()->size();
131   size_t relative_jump = current_offset - jump_table->switch_bytecode_offset();
132 
133   constant_array_builder()->SetJumpTableSmi(
134       jump_table->ConstantPoolEntryFor(case_value),
135       Smi::FromInt(static_cast<int>(relative_jump)));
136   jump_table->mark_bound(case_value);
137 
138   InvalidateLastBytecode();
139   exit_seen_in_block_ = false;  // Starting a new basic block.
140 }
141 
UpdateSourcePositionTable(const BytecodeNode * const node)142 void BytecodeArrayWriter::UpdateSourcePositionTable(
143     const BytecodeNode* const node) {
144   int bytecode_offset = static_cast<int>(bytecodes()->size());
145   const BytecodeSourceInfo& source_info = node->source_info();
146   if (source_info.is_valid()) {
147     source_position_table_builder()->AddPosition(
148         bytecode_offset, SourcePosition(source_info.source_position()),
149         source_info.is_statement());
150   }
151 }
152 
UpdateExitSeenInBlock(Bytecode bytecode)153 void BytecodeArrayWriter::UpdateExitSeenInBlock(Bytecode bytecode) {
154   switch (bytecode) {
155     case Bytecode::kReturn:
156     case Bytecode::kThrow:
157     case Bytecode::kReThrow:
158     case Bytecode::kAbort:
159     case Bytecode::kJump:
160     case Bytecode::kJumpConstant:
161     case Bytecode::kSuspendGenerator:
162       exit_seen_in_block_ = true;
163       break;
164     default:
165       break;
166   }
167 }
168 
MaybeElideLastBytecode(Bytecode next_bytecode,bool has_source_info)169 void BytecodeArrayWriter::MaybeElideLastBytecode(Bytecode next_bytecode,
170                                                  bool has_source_info) {
171   if (!elide_noneffectful_bytecodes_) return;
172 
173   // If the last bytecode loaded the accumulator without any external effect,
174   // and the next bytecode clobbers this load without reading the accumulator,
175   // then the previous bytecode can be elided as it has no effect.
176   if (Bytecodes::IsAccumulatorLoadWithoutEffects(last_bytecode_) &&
177       Bytecodes::GetAccumulatorUse(next_bytecode) == AccumulatorUse::kWrite &&
178       (!last_bytecode_had_source_info_ || !has_source_info)) {
179     DCHECK_GT(bytecodes()->size(), last_bytecode_offset_);
180     bytecodes()->resize(last_bytecode_offset_);
181     // If the last bytecode had source info we will transfer the source info
182     // to this bytecode.
183     has_source_info |= last_bytecode_had_source_info_;
184   }
185   last_bytecode_ = next_bytecode;
186   last_bytecode_had_source_info_ = has_source_info;
187   last_bytecode_offset_ = bytecodes()->size();
188 }
189 
InvalidateLastBytecode()190 void BytecodeArrayWriter::InvalidateLastBytecode() {
191   last_bytecode_ = Bytecode::kIllegal;
192 }
193 
EmitBytecode(const BytecodeNode * const node)194 void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) {
195   DCHECK_NE(node->bytecode(), Bytecode::kIllegal);
196 
197   Bytecode bytecode = node->bytecode();
198   OperandScale operand_scale = node->operand_scale();
199 
200   if (operand_scale != OperandScale::kSingle) {
201     Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
202     bytecodes()->push_back(Bytecodes::ToByte(prefix));
203   }
204   bytecodes()->push_back(Bytecodes::ToByte(bytecode));
205 
206   const uint32_t* const operands = node->operands();
207   const int operand_count = node->operand_count();
208   const OperandSize* operand_sizes =
209       Bytecodes::GetOperandSizes(bytecode, operand_scale);
210   for (int i = 0; i < operand_count; ++i) {
211     switch (operand_sizes[i]) {
212       case OperandSize::kNone:
213         UNREACHABLE();
214         break;
215       case OperandSize::kByte:
216         bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
217         break;
218       case OperandSize::kShort: {
219         uint16_t operand = static_cast<uint16_t>(operands[i]);
220         const uint8_t* raw_operand = reinterpret_cast<const uint8_t*>(&operand);
221         bytecodes()->push_back(raw_operand[0]);
222         bytecodes()->push_back(raw_operand[1]);
223         break;
224       }
225       case OperandSize::kQuad: {
226         const uint8_t* raw_operand =
227             reinterpret_cast<const uint8_t*>(&operands[i]);
228         bytecodes()->push_back(raw_operand[0]);
229         bytecodes()->push_back(raw_operand[1]);
230         bytecodes()->push_back(raw_operand[2]);
231         bytecodes()->push_back(raw_operand[3]);
232         break;
233       }
234     }
235   }
236 }
237 
238 // static
GetJumpWithConstantOperand(Bytecode jump_bytecode)239 Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
240   switch (jump_bytecode) {
241     case Bytecode::kJump:
242       return Bytecode::kJumpConstant;
243     case Bytecode::kJumpIfTrue:
244       return Bytecode::kJumpIfTrueConstant;
245     case Bytecode::kJumpIfFalse:
246       return Bytecode::kJumpIfFalseConstant;
247     case Bytecode::kJumpIfToBooleanTrue:
248       return Bytecode::kJumpIfToBooleanTrueConstant;
249     case Bytecode::kJumpIfToBooleanFalse:
250       return Bytecode::kJumpIfToBooleanFalseConstant;
251     case Bytecode::kJumpIfNull:
252       return Bytecode::kJumpIfNullConstant;
253     case Bytecode::kJumpIfNotNull:
254       return Bytecode::kJumpIfNotNullConstant;
255     case Bytecode::kJumpIfUndefined:
256       return Bytecode::kJumpIfUndefinedConstant;
257     case Bytecode::kJumpIfNotUndefined:
258       return Bytecode::kJumpIfNotUndefinedConstant;
259     case Bytecode::kJumpIfJSReceiver:
260       return Bytecode::kJumpIfJSReceiverConstant;
261     default:
262       UNREACHABLE();
263   }
264 }
265 
PatchJumpWith8BitOperand(size_t jump_location,int delta)266 void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location,
267                                                    int delta) {
268   Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
269   DCHECK(Bytecodes::IsForwardJump(jump_bytecode));
270   DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
271   DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm);
272   DCHECK_GT(delta, 0);
273   size_t operand_location = jump_location + 1;
274   DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder);
275   if (Bytecodes::ScaleForUnsignedOperand(delta) == OperandScale::kSingle) {
276     // The jump fits within the range of an UImm8 operand, so cancel
277     // the reservation and jump directly.
278     constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
279     bytecodes()->at(operand_location) = static_cast<uint8_t>(delta);
280   } else {
281     // The jump does not fit within the range of an UImm8 operand, so
282     // commit reservation putting the offset into the constant pool,
283     // and update the jump instruction and operand.
284     size_t entry = constant_array_builder()->CommitReservedEntry(
285         OperandSize::kByte, Smi::FromInt(delta));
286     DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
287               OperandSize::kByte);
288     jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
289     bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
290     bytecodes()->at(operand_location) = static_cast<uint8_t>(entry);
291   }
292 }
293 
PatchJumpWith16BitOperand(size_t jump_location,int delta)294 void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
295                                                     int delta) {
296   Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
297   DCHECK(Bytecodes::IsForwardJump(jump_bytecode));
298   DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
299   DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm);
300   DCHECK_GT(delta, 0);
301   size_t operand_location = jump_location + 1;
302   uint8_t operand_bytes[2];
303   if (Bytecodes::ScaleForUnsignedOperand(delta) <= OperandScale::kDouble) {
304     // The jump fits within the range of an Imm16 operand, so cancel
305     // the reservation and jump directly.
306     constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
307     WriteUnalignedUInt16(reinterpret_cast<Address>(operand_bytes),
308                          static_cast<uint16_t>(delta));
309   } else {
310     // The jump does not fit within the range of an Imm16 operand, so
311     // commit reservation putting the offset into the constant pool,
312     // and update the jump instruction and operand.
313     size_t entry = constant_array_builder()->CommitReservedEntry(
314         OperandSize::kShort, Smi::FromInt(delta));
315     jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
316     bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
317     WriteUnalignedUInt16(reinterpret_cast<Address>(operand_bytes),
318                          static_cast<uint16_t>(entry));
319   }
320   DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
321          bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder);
322   bytecodes()->at(operand_location++) = operand_bytes[0];
323   bytecodes()->at(operand_location) = operand_bytes[1];
324 }
325 
PatchJumpWith32BitOperand(size_t jump_location,int delta)326 void BytecodeArrayWriter::PatchJumpWith32BitOperand(size_t jump_location,
327                                                     int delta) {
328   DCHECK(Bytecodes::IsJumpImmediate(
329       Bytecodes::FromByte(bytecodes()->at(jump_location))));
330   constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
331   uint8_t operand_bytes[4];
332   WriteUnalignedUInt32(reinterpret_cast<Address>(operand_bytes),
333                        static_cast<uint32_t>(delta));
334   size_t operand_location = jump_location + 1;
335   DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
336          bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder &&
337          bytecodes()->at(operand_location + 2) == k8BitJumpPlaceholder &&
338          bytecodes()->at(operand_location + 3) == k8BitJumpPlaceholder);
339   bytecodes()->at(operand_location++) = operand_bytes[0];
340   bytecodes()->at(operand_location++) = operand_bytes[1];
341   bytecodes()->at(operand_location++) = operand_bytes[2];
342   bytecodes()->at(operand_location) = operand_bytes[3];
343 }
344 
PatchJump(size_t jump_target,size_t jump_location)345 void BytecodeArrayWriter::PatchJump(size_t jump_target, size_t jump_location) {
346   Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
347   int delta = static_cast<int>(jump_target - jump_location);
348   int prefix_offset = 0;
349   OperandScale operand_scale = OperandScale::kSingle;
350   if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
351     // If a prefix scaling bytecode is emitted the target offset is one
352     // less than the case of no prefix scaling bytecode.
353     delta -= 1;
354     prefix_offset = 1;
355     operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
356     jump_bytecode =
357         Bytecodes::FromByte(bytecodes()->at(jump_location + prefix_offset));
358   }
359 
360   DCHECK(Bytecodes::IsJump(jump_bytecode));
361   switch (operand_scale) {
362     case OperandScale::kSingle:
363       PatchJumpWith8BitOperand(jump_location, delta);
364       break;
365     case OperandScale::kDouble:
366       PatchJumpWith16BitOperand(jump_location + prefix_offset, delta);
367       break;
368     case OperandScale::kQuadruple:
369       PatchJumpWith32BitOperand(jump_location + prefix_offset, delta);
370       break;
371     default:
372       UNREACHABLE();
373   }
374   unbound_jumps_--;
375 }
376 
EmitJump(BytecodeNode * node,BytecodeLabel * label)377 void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
378   DCHECK(Bytecodes::IsJump(node->bytecode()));
379   DCHECK_EQ(0u, node->operand(0));
380 
381   size_t current_offset = bytecodes()->size();
382 
383   if (label->is_bound()) {
384     CHECK_GE(current_offset, label->offset());
385     CHECK_LE(current_offset, static_cast<size_t>(kMaxUInt32));
386     // Label has been bound already so this is a backwards jump.
387     uint32_t delta = static_cast<uint32_t>(current_offset - label->offset());
388     OperandScale operand_scale = Bytecodes::ScaleForUnsignedOperand(delta);
389     if (operand_scale > OperandScale::kSingle) {
390       // Adjust for scaling byte prefix for wide jump offset.
391       delta += 1;
392     }
393     DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
394     node->update_operand0(delta);
395   } else {
396     // The label has not yet been bound so this is a forward reference
397     // that will be patched when the label is bound. We create a
398     // reservation in the constant pool so the jump can be patched
399     // when the label is bound. The reservation means the maximum size
400     // of the operand for the constant is known and the jump can
401     // be emitted into the bytecode stream with space for the operand.
402     unbound_jumps_++;
403     label->set_referrer(current_offset);
404     OperandSize reserved_operand_size =
405         constant_array_builder()->CreateReservedEntry();
406     DCHECK_NE(Bytecode::kJumpLoop, node->bytecode());
407     switch (reserved_operand_size) {
408       case OperandSize::kNone:
409         UNREACHABLE();
410         break;
411       case OperandSize::kByte:
412         node->update_operand0(k8BitJumpPlaceholder);
413         break;
414       case OperandSize::kShort:
415         node->update_operand0(k16BitJumpPlaceholder);
416         break;
417       case OperandSize::kQuad:
418         node->update_operand0(k32BitJumpPlaceholder);
419         break;
420     }
421   }
422   EmitBytecode(node);
423 }
424 
EmitSwitch(BytecodeNode * node,BytecodeJumpTable * jump_table)425 void BytecodeArrayWriter::EmitSwitch(BytecodeNode* node,
426                                      BytecodeJumpTable* jump_table) {
427   DCHECK(Bytecodes::IsSwitch(node->bytecode()));
428 
429   size_t current_offset = bytecodes()->size();
430   if (node->operand_scale() > OperandScale::kSingle) {
431     // Adjust for scaling byte prefix.
432     current_offset += 1;
433   }
434   jump_table->set_switch_bytecode_offset(current_offset);
435 
436   EmitBytecode(node);
437 }
438 
439 }  // namespace interpreter
440 }  // namespace internal
441 }  // namespace v8
442