• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_UTILS_ASSEMBLER_H_
18 #define ART_COMPILER_UTILS_ASSEMBLER_H_
19 
20 #include <vector>
21 
22 #include <android-base/logging.h>
23 
24 #include "arch/instruction_set.h"
25 #include "arch/instruction_set_features.h"
26 #include "arm/constants_arm.h"
27 #include "base/arena_allocator.h"
28 #include "base/arena_object.h"
29 #include "base/array_ref.h"
30 #include "base/enums.h"
31 #include "base/macros.h"
32 #include "base/memory_region.h"
33 #include "dwarf/debug_frame_opcode_writer.h"
34 #include "label.h"
35 #include "managed_register.h"
36 #include "offsets.h"
37 #include "x86/constants_x86.h"
38 #include "x86_64/constants_x86_64.h"
39 
40 namespace art {
41 
42 class Assembler;
43 class AssemblerBuffer;
44 
45 // Assembler fixups are positions in generated code that require processing
46 // after the code has been copied to executable memory. This includes building
47 // relocation information.
48 class AssemblerFixup {
49  public:
50   virtual void Process(const MemoryRegion& region, int position) = 0;
~AssemblerFixup()51   virtual ~AssemblerFixup() {}
52 
53  private:
54   AssemblerFixup* previous_;
55   int position_;
56 
previous()57   AssemblerFixup* previous() const { return previous_; }
set_previous(AssemblerFixup * previous_in)58   void set_previous(AssemblerFixup* previous_in) { previous_ = previous_in; }
59 
position()60   int position() const { return position_; }
set_position(int position_in)61   void set_position(int position_in) { position_ = position_in; }
62 
63   friend class AssemblerBuffer;
64 };
65 
66 // Parent of all queued slow paths, emitted during finalization
67 class SlowPath : public DeletableArenaObject<kArenaAllocAssembler> {
68  public:
SlowPath()69   SlowPath() : next_(nullptr) {}
~SlowPath()70   virtual ~SlowPath() {}
71 
Continuation()72   Label* Continuation() { return &continuation_; }
Entry()73   Label* Entry() { return &entry_; }
74   // Generate code for slow path
75   virtual void Emit(Assembler *sp_asm) = 0;
76 
77  protected:
78   // Entry branched to by fast path
79   Label entry_;
80   // Optional continuation that is branched to at the end of the slow path
81   Label continuation_;
82   // Next in linked list of slow paths
83   SlowPath *next_;
84 
85  private:
86   friend class AssemblerBuffer;
87   DISALLOW_COPY_AND_ASSIGN(SlowPath);
88 };
89 
90 class AssemblerBuffer {
91  public:
92   explicit AssemblerBuffer(ArenaAllocator* allocator);
93   ~AssemblerBuffer();
94 
GetAllocator()95   ArenaAllocator* GetAllocator() {
96     return allocator_;
97   }
98 
99   // Basic support for emitting, loading, and storing.
Emit(T value)100   template<typename T> void Emit(T value) {
101     CHECK(HasEnsuredCapacity());
102     *reinterpret_cast<T*>(cursor_) = value;
103     cursor_ += sizeof(T);
104   }
105 
Load(size_t position)106   template<typename T> T Load(size_t position) {
107     CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
108     return *reinterpret_cast<T*>(contents_ + position);
109   }
110 
Store(size_t position,T value)111   template<typename T> void Store(size_t position, T value) {
112     CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
113     *reinterpret_cast<T*>(contents_ + position) = value;
114   }
115 
Resize(size_t new_size)116   void Resize(size_t new_size) {
117     if (new_size > Capacity()) {
118       ExtendCapacity(new_size);
119     }
120     cursor_ = contents_ + new_size;
121   }
122 
Move(size_t newposition,size_t oldposition,size_t size)123   void Move(size_t newposition, size_t oldposition, size_t size) {
124     // Move a chunk of the buffer from oldposition to newposition.
125     DCHECK_LE(oldposition + size, Size());
126     DCHECK_LE(newposition + size, Size());
127     memmove(contents_ + newposition, contents_ + oldposition, size);
128   }
129 
130   // Emit a fixup at the current location.
EmitFixup(AssemblerFixup * fixup)131   void EmitFixup(AssemblerFixup* fixup) {
132     fixup->set_previous(fixup_);
133     fixup->set_position(Size());
134     fixup_ = fixup;
135   }
136 
EnqueueSlowPath(SlowPath * slowpath)137   void EnqueueSlowPath(SlowPath* slowpath) {
138     if (slow_path_ == nullptr) {
139       slow_path_ = slowpath;
140     } else {
141       SlowPath* cur = slow_path_;
142       for ( ; cur->next_ != nullptr ; cur = cur->next_) {}
143       cur->next_ = slowpath;
144     }
145   }
146 
EmitSlowPaths(Assembler * sp_asm)147   void EmitSlowPaths(Assembler* sp_asm) {
148     SlowPath* cur = slow_path_;
149     SlowPath* next = nullptr;
150     slow_path_ = nullptr;
151     for ( ; cur != nullptr ; cur = next) {
152       cur->Emit(sp_asm);
153       next = cur->next_;
154       delete cur;
155     }
156   }
157 
158   // Get the size of the emitted code.
Size()159   size_t Size() const {
160     CHECK_GE(cursor_, contents_);
161     return cursor_ - contents_;
162   }
163 
contents()164   uint8_t* contents() const { return contents_; }
165 
166   // Copy the assembled instructions into the specified memory block
167   // and apply all fixups.
168   void FinalizeInstructions(const MemoryRegion& region);
169 
170   // To emit an instruction to the assembler buffer, the EnsureCapacity helper
171   // must be used to guarantee that the underlying data area is big enough to
172   // hold the emitted instruction. Usage:
173   //
174   //     AssemblerBuffer buffer;
175   //     AssemblerBuffer::EnsureCapacity ensured(&buffer);
176   //     ... emit bytes for single instruction ...
177 
178 #ifndef NDEBUG
179 
180   class EnsureCapacity {
181    public:
EnsureCapacity(AssemblerBuffer * buffer)182     explicit EnsureCapacity(AssemblerBuffer* buffer) {
183       if (buffer->cursor() > buffer->limit()) {
184         buffer->ExtendCapacity(buffer->Size() + kMinimumGap);
185       }
186       // In debug mode, we save the assembler buffer along with the gap
187       // size before we start emitting to the buffer. This allows us to
188       // check that any single generated instruction doesn't overflow the
189       // limit implied by the minimum gap size.
190       buffer_ = buffer;
191       gap_ = ComputeGap();
192       // Make sure that extending the capacity leaves a big enough gap
193       // for any kind of instruction.
194       CHECK_GE(gap_, kMinimumGap);
195       // Mark the buffer as having ensured the capacity.
196       CHECK(!buffer->HasEnsuredCapacity());  // Cannot nest.
197       buffer->has_ensured_capacity_ = true;
198     }
199 
~EnsureCapacity()200     ~EnsureCapacity() {
201       // Unmark the buffer, so we cannot emit after this.
202       buffer_->has_ensured_capacity_ = false;
203       // Make sure the generated instruction doesn't take up more
204       // space than the minimum gap.
205       int delta = gap_ - ComputeGap();
206       CHECK_LE(delta, kMinimumGap);
207     }
208 
209    private:
210     AssemblerBuffer* buffer_;
211     int gap_;
212 
ComputeGap()213     int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
214   };
215 
216   bool has_ensured_capacity_;
HasEnsuredCapacity()217   bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
218 
219 #else
220 
221   class EnsureCapacity {
222    public:
EnsureCapacity(AssemblerBuffer * buffer)223     explicit EnsureCapacity(AssemblerBuffer* buffer) {
224       if (buffer->cursor() > buffer->limit()) {
225         buffer->ExtendCapacity(buffer->Size() + kMinimumGap);
226       }
227     }
228   };
229 
230   // When building the C++ tests, assertion code is enabled. To allow
231   // asserting that the user of the assembler buffer has ensured the
232   // capacity needed for emitting, we add a placeholder method in non-debug mode.
HasEnsuredCapacity()233   bool HasEnsuredCapacity() const { return true; }
234 
235 #endif
236 
237   // Returns the position in the instruction stream.
GetPosition()238   int GetPosition() { return  cursor_ - contents_; }
239 
Capacity()240   size_t Capacity() const {
241     CHECK_GE(limit_, contents_);
242     return (limit_ - contents_) + kMinimumGap;
243   }
244 
245   // Unconditionally increase the capacity.
246   // The provided `min_capacity` must be higher than current `Capacity()`.
247   void ExtendCapacity(size_t min_capacity);
248 
249  private:
250   // The limit is set to kMinimumGap bytes before the end of the data area.
251   // This leaves enough space for the longest possible instruction and allows
252   // for a single, fast space check per instruction.
253   static constexpr int kMinimumGap = 32;
254 
255   ArenaAllocator* const allocator_;
256   uint8_t* contents_;
257   uint8_t* cursor_;
258   uint8_t* limit_;
259   AssemblerFixup* fixup_;
260 #ifndef NDEBUG
261   bool fixups_processed_;
262 #endif
263 
264   // Head of linked list of slow paths
265   SlowPath* slow_path_;
266 
cursor()267   uint8_t* cursor() const { return cursor_; }
limit()268   uint8_t* limit() const { return limit_; }
269 
270   // Process the fixup chain starting at the given fixup. The offset is
271   // non-zero for fixups in the body if the preamble is non-empty.
272   void ProcessFixups(const MemoryRegion& region);
273 
274   // Compute the limit based on the data area and the capacity. See
275   // description of kMinimumGap for the reasoning behind the value.
ComputeLimit(uint8_t * data,size_t capacity)276   static uint8_t* ComputeLimit(uint8_t* data, size_t capacity) {
277     return data + capacity - kMinimumGap;
278   }
279 
280   friend class AssemblerFixup;
281 };
282 
283 // The purpose of this class is to ensure that we do not have to explicitly
284 // call the AdvancePC method (which is good for convenience and correctness).
285 class DebugFrameOpCodeWriterForAssembler final
286     : public dwarf::DebugFrameOpCodeWriter<> {
287  public:
288   struct DelayedAdvancePC {
289     uint32_t stream_pos;
290     uint32_t pc;
291   };
292 
293   // This method is called the by the opcode writers.
294   void ImplicitlyAdvancePC() final;
295 
DebugFrameOpCodeWriterForAssembler(Assembler * buffer)296   explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
297       : dwarf::DebugFrameOpCodeWriter<>(/* enabled= */ false),
298         assembler_(buffer),
299         delay_emitting_advance_pc_(false),
300         delayed_advance_pcs_() {
301   }
302 
~DebugFrameOpCodeWriterForAssembler()303   ~DebugFrameOpCodeWriterForAssembler() {
304     DCHECK(delayed_advance_pcs_.empty());
305   }
306 
307   // Tell the writer to delay emitting advance PC info.
308   // The assembler must explicitly process all the delayed advances.
DelayEmittingAdvancePCs()309   void DelayEmittingAdvancePCs() {
310     delay_emitting_advance_pc_ = true;
311   }
312 
313   // Override the last delayed PC. The new PC can be out of order.
OverrideDelayedPC(size_t pc)314   void OverrideDelayedPC(size_t pc) {
315     DCHECK(delay_emitting_advance_pc_);
316     if (enabled_) {
317       DCHECK(!delayed_advance_pcs_.empty());
318       delayed_advance_pcs_.back().pc = pc;
319     }
320   }
321 
322   // Return the number of delayed advance PC entries.
NumberOfDelayedAdvancePCs()323   size_t NumberOfDelayedAdvancePCs() const {
324     return delayed_advance_pcs_.size();
325   }
326 
327   // Release the CFI stream and advance PC infos so that the assembler can patch it.
328   std::pair<std::vector<uint8_t>, std::vector<DelayedAdvancePC>>
ReleaseStreamAndPrepareForDelayedAdvancePC()329   ReleaseStreamAndPrepareForDelayedAdvancePC() {
330     DCHECK(delay_emitting_advance_pc_);
331     delay_emitting_advance_pc_ = false;
332     std::pair<std::vector<uint8_t>, std::vector<DelayedAdvancePC>> result;
333     result.first.swap(opcodes_);
334     result.second.swap(delayed_advance_pcs_);
335     return result;
336   }
337 
338   // Reserve space for the CFI stream.
ReserveCFIStream(size_t capacity)339   void ReserveCFIStream(size_t capacity) {
340     opcodes_.reserve(capacity);
341   }
342 
343   // Append raw data to the CFI stream.
AppendRawData(const std::vector<uint8_t> & raw_data,size_t first,size_t last)344   void AppendRawData(const std::vector<uint8_t>& raw_data, size_t first, size_t last) {
345     DCHECK_LE(0u, first);
346     DCHECK_LE(first, last);
347     DCHECK_LE(last, raw_data.size());
348     opcodes_.insert(opcodes_.end(), raw_data.begin() + first, raw_data.begin() + last);
349   }
350 
351  private:
352   Assembler* assembler_;
353   bool delay_emitting_advance_pc_;
354   std::vector<DelayedAdvancePC> delayed_advance_pcs_;
355 };
356 
357 class Assembler : public DeletableArenaObject<kArenaAllocAssembler> {
358  public:
359   // Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
FinalizeCode()360   virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); }
361 
362   // Size of generated code
CodeSize()363   virtual size_t CodeSize() const { return buffer_.Size(); }
CodeBufferBaseAddress()364   virtual const uint8_t* CodeBufferBaseAddress() const { return buffer_.contents(); }
365   // CodePosition() is a non-const method similar to CodeSize(), which is used to
366   // record positions within the code buffer for the purpose of signal handling
367   // (stack overflow checks and implicit null checks may trigger signals and the
368   // signal handlers expect them right before the recorded positions).
369   // On most architectures CodePosition() should be equivalent to CodeSize(), but
370   // the MIPS assembler needs to be aware of this recording, so it doesn't put
371   // the instructions that can trigger signals into branch delay slots. Handling
372   // signals from instructions in delay slots is a bit problematic and should be
373   // avoided.
374   // TODO: Re-evaluate whether we still need this now that MIPS support has been removed.
CodePosition()375   virtual size_t CodePosition() { return CodeSize(); }
376 
377   // Copy instructions out of assembly buffer into the given region of memory
FinalizeInstructions(const MemoryRegion & region)378   virtual void FinalizeInstructions(const MemoryRegion& region) {
379     buffer_.FinalizeInstructions(region);
380   }
381 
382   // TODO: Implement with disassembler.
Comment(const char * format ATTRIBUTE_UNUSED,...)383   virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {}
384 
385   virtual void Bind(Label* label) = 0;
386   virtual void Jump(Label* label) = 0;
387 
~Assembler()388   virtual ~Assembler() {}
389 
390   /**
391    * @brief Buffer of DWARF's Call Frame Information opcodes.
392    * @details It is used by debuggers and other tools to unwind the call stack.
393    */
cfi()394   DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
395 
GetAllocator()396   ArenaAllocator* GetAllocator() {
397     return buffer_.GetAllocator();
398   }
399 
GetBuffer()400   AssemblerBuffer* GetBuffer() {
401     return &buffer_;
402   }
403 
404  protected:
Assembler(ArenaAllocator * allocator)405   explicit Assembler(ArenaAllocator* allocator) : buffer_(allocator), cfi_(this) {}
406 
407   AssemblerBuffer buffer_;
408 
409   DebugFrameOpCodeWriterForAssembler cfi_;
410 };
411 
412 enum ScaleFactor {
413   TIMES_1 = 0,
414   TIMES_2 = 1,
415   TIMES_4 = 2,
416   TIMES_8 = 3
417 };
418 
419 }  // namespace art
420 
421 #endif  // ART_COMPILER_UTILS_ASSEMBLER_H_
422