• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 #ifndef V8_CODEGEN_ASSEMBLER_H_
36 #define V8_CODEGEN_ASSEMBLER_H_
37 
38 #include <forward_list>
39 #include <memory>
40 #include <unordered_map>
41 
42 #include "src/base/memory.h"
43 #include "src/codegen/code-comments.h"
44 #include "src/codegen/cpu-features.h"
45 #include "src/codegen/external-reference.h"
46 #include "src/codegen/reglist.h"
47 #include "src/codegen/reloc-info.h"
48 #include "src/common/globals.h"
49 #include "src/deoptimizer/deoptimize-reason.h"
50 #include "src/flags/flags.h"
51 #include "src/handles/handles.h"
52 #include "src/objects/objects.h"
53 
54 namespace v8 {
55 
56 // Forward declarations.
57 class ApiFunction;
58 
59 namespace internal {
60 
61 using base::Memory;
62 using base::ReadUnalignedValue;
63 using base::WriteUnalignedValue;
64 
65 // Forward declarations.
66 class EmbeddedData;
67 class InstructionStream;
68 class Isolate;
69 class SCTableReference;
70 class SourcePosition;
71 class StatsCounter;
72 class StringConstantBase;
73 
74 // -----------------------------------------------------------------------------
75 // Optimization for far-jmp like instructions that can be replaced by shorter.
76 
77 class JumpOptimizationInfo {
78  public:
is_collecting()79   bool is_collecting() const { return stage_ == kCollection; }
is_optimizing()80   bool is_optimizing() const { return stage_ == kOptimization; }
set_optimizing()81   void set_optimizing() {
82     DCHECK(is_optimizable());
83     stage_ = kOptimization;
84   }
85 
is_optimizable()86   bool is_optimizable() const { return optimizable_; }
set_optimizable()87   void set_optimizable() {
88     DCHECK(is_collecting());
89     optimizable_ = true;
90   }
91 
92   // Used to verify the instruction sequence is always the same in two stages.
hash_code()93   size_t hash_code() const { return hash_code_; }
set_hash_code(size_t hash_code)94   void set_hash_code(size_t hash_code) { hash_code_ = hash_code; }
95 
farjmp_bitmap()96   std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }
97 
98  private:
99   enum { kCollection, kOptimization } stage_ = kCollection;
100   bool optimizable_ = false;
101   std::vector<uint32_t> farjmp_bitmap_;
102   size_t hash_code_ = 0u;
103 };
104 
105 class HeapObjectRequest {
106  public:
107   explicit HeapObjectRequest(double heap_number, int offset = -1);
108   explicit HeapObjectRequest(const StringConstantBase* string, int offset = -1);
109 
110   enum Kind { kHeapNumber, kStringConstant };
kind()111   Kind kind() const { return kind_; }
112 
heap_number()113   double heap_number() const {
114     DCHECK_EQ(kind(), kHeapNumber);
115     return value_.heap_number;
116   }
117 
string()118   const StringConstantBase* string() const {
119     DCHECK_EQ(kind(), kStringConstant);
120     return value_.string;
121   }
122 
123   // The code buffer offset at the time of the request.
offset()124   int offset() const {
125     DCHECK_GE(offset_, 0);
126     return offset_;
127   }
set_offset(int offset)128   void set_offset(int offset) {
129     DCHECK_LT(offset_, 0);
130     offset_ = offset;
131     DCHECK_GE(offset_, 0);
132   }
133 
134  private:
135   Kind kind_;
136 
137   union {
138     double heap_number;
139     const StringConstantBase* string;
140   } value_;
141 
142   int offset_;
143 };
144 
145 // -----------------------------------------------------------------------------
146 // Platform independent assembler base class.
147 
148 enum class CodeObjectRequired { kNo, kYes };
149 
150 struct V8_EXPORT_PRIVATE AssemblerOptions {
151   // Recording reloc info for external references and off-heap targets is
152   // needed whenever code is serialized, e.g. into the snapshot or as a Wasm
153   // module. This flag allows this reloc info to be disabled for code that
154   // will not survive process destruction.
155   bool record_reloc_info_for_serialization = true;
156   // Recording reloc info can be disabled wholesale. This is needed when the
157   // assembler is used on existing code directly (e.g. JumpTableAssembler)
158   // without any buffer to hold reloc information.
159   bool disable_reloc_info_for_patching = false;
160   // Enables access to exrefs by computing a delta from the root array.
161   // Only valid if code will not survive the process.
162   bool enable_root_array_delta_access = false;
163   // Enables specific assembler sequences only used for the simulator.
164   bool enable_simulator_code = false;
165   // Enables use of isolate-independent constants, indirected through the
166   // root array.
167   // (macro assembler feature).
168   bool isolate_independent_code = false;
169   // Enables the use of isolate-independent builtins through an off-heap
170   // trampoline. (macro assembler feature).
171   bool inline_offheap_trampolines = true;
172   // On some platforms, all code is within a given range in the process,
173   // and the start of this range is configured here.
174   Address code_range_start = 0;
175   // Enable pc-relative calls/jumps on platforms that support it. When setting
176   // this flag, the code range must be small enough to fit all offsets into
177   // the instruction immediates.
178   bool use_pc_relative_calls_and_jumps = false;
179   // Enables the collection of information useful for the generation of unwind
180   // info. This is useful in some platform (Win64) where the unwind info depends
181   // on a function prologue/epilogue.
182   bool collect_win64_unwind_info = false;
183   // Whether to emit code comments.
184   bool emit_code_comments = FLAG_code_comments;
185 
186   static AssemblerOptions Default(Isolate* isolate);
187   static AssemblerOptions DefaultForOffHeapTrampoline(Isolate* isolate);
188 };
189 
190 class AssemblerBuffer {
191  public:
192   virtual ~AssemblerBuffer() = default;
193   virtual byte* start() const = 0;
194   virtual int size() const = 0;
195   // Return a grown copy of this buffer. The contained data is uninitialized.
196   // The data in {this} will still be read afterwards (until {this} is
197   // destructed), but not written.
198   virtual std::unique_ptr<AssemblerBuffer> Grow(int new_size)
199       V8_WARN_UNUSED_RESULT = 0;
200 };
201 
202 // Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot
203 // grow, so it must be large enough for all code emitted by the Assembler.
204 V8_EXPORT_PRIVATE
205 std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* buffer,
206                                                          int size);
207 
208 // Allocate a new growable AssemblerBuffer with a given initial size.
209 V8_EXPORT_PRIVATE
210 std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size);
211 
212 class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
213  public:
214   AssemblerBase(const AssemblerOptions& options,
215                 std::unique_ptr<AssemblerBuffer>);
216   virtual ~AssemblerBase();
217 
options()218   const AssemblerOptions& options() const { return options_; }
219 
emit_debug_code()220   bool emit_debug_code() const { return emit_debug_code_; }
set_emit_debug_code(bool value)221   void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
222 
predictable_code_size()223   bool predictable_code_size() const { return predictable_code_size_; }
set_predictable_code_size(bool value)224   void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
225 
enabled_cpu_features()226   uint64_t enabled_cpu_features() const { return enabled_cpu_features_; }
set_enabled_cpu_features(uint64_t features)227   void set_enabled_cpu_features(uint64_t features) {
228     enabled_cpu_features_ = features;
229   }
230   // Features are usually enabled by CpuFeatureScope, which also asserts that
231   // the features are supported before they are enabled.
232   // IMPORTANT:  IsEnabled() should only be used by DCHECKs. For real feature
233   // detection, use IsSupported().
IsEnabled(CpuFeature f)234   bool IsEnabled(CpuFeature f) {
235     return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
236   }
EnableCpuFeature(CpuFeature f)237   void EnableCpuFeature(CpuFeature f) {
238     enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
239   }
240 
is_constant_pool_available()241   bool is_constant_pool_available() const {
242     if (FLAG_enable_embedded_constant_pool) {
243       // We need to disable constant pool here for embeded builtins
244       // because the metadata section is not adjacent to instructions
245       return constant_pool_available_ && !options().isolate_independent_code;
246     } else {
247       // Embedded constant pool not supported on this architecture.
248       UNREACHABLE();
249     }
250   }
251 
jump_optimization_info()252   JumpOptimizationInfo* jump_optimization_info() {
253     return jump_optimization_info_;
254   }
set_jump_optimization_info(JumpOptimizationInfo * jump_opt)255   void set_jump_optimization_info(JumpOptimizationInfo* jump_opt) {
256     jump_optimization_info_ = jump_opt;
257   }
258 
FinalizeJumpOptimizationInfo()259   void FinalizeJumpOptimizationInfo() {}
260 
261   // Overwrite a host NaN with a quiet target NaN.  Used by mksnapshot for
262   // cross-snapshotting.
QuietNaN(HeapObject nan)263   static void QuietNaN(HeapObject nan) {}
264 
pc_offset()265   int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
266 
pc_offset_for_safepoint()267   int pc_offset_for_safepoint() {
268 #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
269     // Mips needs it's own implementation to avoid trampoline's influence.
270     UNREACHABLE();
271 #else
272     return pc_offset();
273 #endif
274   }
275 
buffer_start()276   byte* buffer_start() const { return buffer_->start(); }
buffer_size()277   int buffer_size() const { return buffer_->size(); }
instruction_size()278   int instruction_size() const { return pc_offset(); }
279 
280   // This function is called when code generation is aborted, so that
281   // the assembler could clean up internal data structures.
AbortedCodeGeneration()282   virtual void AbortedCodeGeneration() {}
283 
284   // Debugging
285   void Print(Isolate* isolate);
286 
287   // Record an inline code comment that can be used by a disassembler.
288   // Use --code-comments to enable.
RecordComment(const char * msg)289   void RecordComment(const char* msg) {
290     if (options().emit_code_comments) {
291       code_comments_writer_.Add(pc_offset(), std::string(msg));
292     }
293   }
294 
295   // The minimum buffer size. Should be at least two times the platform-specific
296   // {Assembler::kGap}.
297   static constexpr int kMinimalBufferSize = 128;
298 
299   // The default buffer size used if we do not know the final size of the
300   // generated code.
301   static constexpr int kDefaultBufferSize = 4 * KB;
302 
303  protected:
304   // Add 'target' to the {code_targets_} vector, if necessary, and return the
305   // offset at which it is stored.
306   int AddCodeTarget(Handle<Code> target);
307   Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
308 
309   // Add 'object' to the {embedded_objects_} vector and return the index at
310   // which it is stored.
311   using EmbeddedObjectIndex = size_t;
312   EmbeddedObjectIndex AddEmbeddedObject(Handle<HeapObject> object);
313   Handle<HeapObject> GetEmbeddedObject(EmbeddedObjectIndex index) const;
314 
315   // The buffer into which code and relocation info are generated.
316   std::unique_ptr<AssemblerBuffer> buffer_;
317   // Cached from {buffer_->start()}, for faster access.
318   byte* buffer_start_;
319   std::forward_list<HeapObjectRequest> heap_object_requests_;
320   // The program counter, which points into the buffer above and moves forward.
321   // TODO(jkummerow): This should probably have type {Address}.
322   byte* pc_;
323 
set_constant_pool_available(bool available)324   void set_constant_pool_available(bool available) {
325     if (FLAG_enable_embedded_constant_pool) {
326       constant_pool_available_ = available;
327     } else {
328       // Embedded constant pool not supported on this architecture.
329       UNREACHABLE();
330     }
331   }
332 
333   // {RequestHeapObject} records the need for a future heap number allocation,
334   // code stub generation or string allocation. After code assembly, each
335   // platform's {Assembler::AllocateAndInstallRequestedHeapObjects} will
336   // allocate these objects and place them where they are expected (determined
337   // by the pc offset associated with each request).
338   void RequestHeapObject(HeapObjectRequest request);
339 
ShouldRecordRelocInfo(RelocInfo::Mode rmode)340   bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const {
341     DCHECK(!RelocInfo::IsNone(rmode));
342     if (options().disable_reloc_info_for_patching) return false;
343     if (RelocInfo::IsOnlyForSerializer(rmode) &&
344         !options().record_reloc_info_for_serialization && !emit_debug_code()) {
345       return false;
346     }
347     return true;
348   }
349 
350   CodeCommentsWriter code_comments_writer_;
351 
352  private:
353   // Before we copy code into the code space, we sometimes cannot encode
354   // call/jump code targets as we normally would, as the difference between the
355   // instruction's location in the temporary buffer and the call target is not
356   // guaranteed to fit in the instruction's offset field. We keep track of the
357   // code handles we encounter in calls in this vector, and encode the index of
358   // the code handle in the vector instead.
359   std::vector<Handle<Code>> code_targets_;
360 
361   // If an assembler needs a small number to refer to a heap object handle
362   // (for example, because there are only 32bit available on a 64bit arch), the
363   // assembler adds the object into this vector using AddEmbeddedObject, and
364   // may then refer to the heap object using the handle's index in this vector.
365   std::vector<Handle<HeapObject>> embedded_objects_;
366 
367   // Embedded objects are deduplicated based on handle location. This is a
368   // compromise that is almost as effective as deduplication based on actual
369   // heap object addresses maintains GC safety.
370   std::unordered_map<Handle<HeapObject>, EmbeddedObjectIndex,
371                      Handle<HeapObject>::hash, Handle<HeapObject>::equal_to>
372       embedded_objects_map_;
373 
374   const AssemblerOptions options_;
375   uint64_t enabled_cpu_features_;
376   bool emit_debug_code_;
377   bool predictable_code_size_;
378 
379   // Indicates whether the constant pool can be accessed, which is only possible
380   // if the pp register points to the current code object's constant pool.
381   bool constant_pool_available_;
382 
383   JumpOptimizationInfo* jump_optimization_info_;
384 
385   // Constant pool.
386   friend class FrameAndConstantPoolScope;
387   friend class ConstantPoolUnavailableScope;
388 };
389 
390 // Avoids emitting debug code during the lifetime of this scope object.
391 class DontEmitDebugCodeScope {
392  public:
DontEmitDebugCodeScope(AssemblerBase * assembler)393   explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
394       : assembler_(assembler), old_value_(assembler->emit_debug_code()) {
395     assembler_->set_emit_debug_code(false);
396   }
~DontEmitDebugCodeScope()397   ~DontEmitDebugCodeScope() { assembler_->set_emit_debug_code(old_value_); }
398 
399  private:
400   AssemblerBase* assembler_;
401   bool old_value_;
402 };
403 
404 // Enable a specified feature within a scope.
405 class V8_EXPORT_PRIVATE CpuFeatureScope {
406  public:
407   enum CheckPolicy {
408     kCheckSupported,
409     kDontCheckSupported,
410   };
411 
412 #ifdef DEBUG
413   CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
414                   CheckPolicy check = kCheckSupported);
415   ~CpuFeatureScope();
416 
417  private:
418   AssemblerBase* assembler_;
419   uint64_t old_enabled_;
420 #else
421   CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
422                   CheckPolicy check = kCheckSupported) {}
~CpuFeatureScope()423   ~CpuFeatureScope() {  // NOLINT (modernize-use-equals-default)
424     // Define a destructor to avoid unused variable warnings.
425   }
426 #endif
427 };
428 
429 }  // namespace internal
430 }  // namespace v8
431 #endif  // V8_CODEGEN_ASSEMBLER_H_
432