• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
6 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
7 
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/globals.h"
11 #include "src/ia32/assembler-ia32.h"
12 #include "src/turbo-assembler.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 // Give alias names to registers for calling conventions.
18 constexpr Register kReturnRegister0 = eax;
19 constexpr Register kReturnRegister1 = edx;
20 constexpr Register kReturnRegister2 = edi;
21 constexpr Register kJSFunctionRegister = edi;
22 constexpr Register kContextRegister = esi;
23 constexpr Register kAllocateSizeRegister = edx;
24 constexpr Register kSpeculationPoisonRegister = ebx;
25 constexpr Register kInterpreterAccumulatorRegister = eax;
26 constexpr Register kInterpreterBytecodeOffsetRegister = edx;
27 constexpr Register kInterpreterBytecodeArrayRegister = edi;
28 constexpr Register kInterpreterDispatchTableRegister = esi;
29 
30 constexpr Register kJavaScriptCallArgCountRegister = eax;
31 constexpr Register kJavaScriptCallCodeStartRegister = ecx;
32 constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
33 constexpr Register kJavaScriptCallNewTargetRegister = edx;
34 
35 // The ExtraArg1Register not part of the real JS calling convention and is
36 // mostly there to simplify consistent interface descriptor definitions across
37 // platforms. Note that on ia32 it aliases kJavaScriptCallCodeStartRegister.
38 constexpr Register kJavaScriptCallExtraArg1Register = ecx;
39 
40 // The off-heap trampoline does not need a register on ia32 (it uses a
41 // pc-relative call instead).
42 constexpr Register kOffHeapTrampolineRegister = no_reg;
43 
44 constexpr Register kRuntimeCallFunctionRegister = edx;
45 constexpr Register kRuntimeCallArgCountRegister = eax;
46 constexpr Register kRuntimeCallArgvRegister = ecx;
47 constexpr Register kWasmInstanceRegister = esi;
48 
49 // TODO(v8:6666): Implement full support.
50 constexpr Register kRootRegister = ebx;
51 
52 // Convenience for platform-independent signatures.  We do not normally
53 // distinguish memory operands from other operands on ia32.
54 typedef Operand MemOperand;
55 
56 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
57 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
58 
59 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
60  public:
TurboAssembler(Isolate * isolate,const AssemblerOptions & options,void * buffer,int buffer_size,CodeObjectRequired create_code_object)61   TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
62                  void* buffer, int buffer_size,
63                  CodeObjectRequired create_code_object)
64       : TurboAssemblerBase(isolate, options, buffer, buffer_size,
65                            create_code_object) {}
66 
67   void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
68                      Label* condition_met,
69                      Label::Distance condition_met_distance = Label::kFar);
70 
71   // Activation support.
72   void EnterFrame(StackFrame::Type type);
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)73   void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
74     // Out-of-line constant pool not implemented on ia32.
75     UNREACHABLE();
76   }
77   void LeaveFrame(StackFrame::Type type);
78 
79 // Allocate a stack frame of given size (i.e. decrement {esp} by the value
80 // stored in the given register).
81 #ifdef V8_OS_WIN
82   // On win32, take special care if the number of bytes is greater than 4096:
83   // Ensure that each page within the new stack frame is touched once in
84   // decreasing order. See
85   // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
86   // Use {bytes_scratch} as scratch register for this procedure.
87   void AllocateStackFrame(Register bytes_scratch);
88 #else
AllocateStackFrame(Register bytes)89   void AllocateStackFrame(Register bytes) { sub(esp, bytes); }
90 #endif
91 
92   // Print a message to stdout and abort execution.
93   void Abort(AbortReason reason);
94 
95   // Calls Abort(msg) if the condition cc is not satisfied.
96   // Use --debug_code to enable.
97   void Assert(Condition cc, AbortReason reason);
98 
99   // Like Assert(), but without condition.
100   // Use --debug_code to enable.
101   void AssertUnreachable(AbortReason reason);
102 
103   // Like Assert(), but always enabled.
104   void Check(Condition cc, AbortReason reason);
105 
106   // Check that the stack is aligned.
107   void CheckStackAlignment();
108 
InitializeRootRegister()109   void InitializeRootRegister() {
110     // For now, only check sentinel value for root register.
111     // TODO(jgruber,v8:6666): Implement root register.
112     if (FLAG_ia32_verify_root_register && FLAG_embedded_builtins) {
113       mov(kRootRegister, kRootRegisterSentinel);
114     }
115   }
116 
VerifyRootRegister()117   void VerifyRootRegister() {
118     if (FLAG_ia32_verify_root_register && FLAG_embedded_builtins) {
119       Label root_register_ok;
120       cmp(kRootRegister, kRootRegisterSentinel);
121       j(equal, &root_register_ok);
122       int3();
123       bind(&root_register_ok);
124     }
125   }
126 
127   // Move a constant into a destination using the most efficient encoding.
128   void Move(Register dst, const Immediate& src);
Move(Register dst,Smi * src)129   void Move(Register dst, Smi* src) { Move(dst, Immediate(src)); }
130   void Move(Register dst, Handle<HeapObject> src);
131   void Move(Register dst, Register src);
132   void Move(Operand dst, const Immediate& src);
133 
134   // Move an immediate into an XMM register.
135   void Move(XMMRegister dst, uint32_t src);
136   void Move(XMMRegister dst, uint64_t src);
Move(XMMRegister dst,float src)137   void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
Move(XMMRegister dst,double src)138   void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
139 
Call(Register reg)140   void Call(Register reg) { call(reg); }
Call(Label * target)141   void Call(Label* target) { call(target); }
142   void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
143 
144   void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
145 
146   void RetpolineCall(Register reg);
147   void RetpolineCall(Address destination, RelocInfo::Mode rmode);
148 
149   void RetpolineJump(Register reg);
150 
CallForDeoptimization(Address target,int deopt_id,RelocInfo::Mode rmode)151   void CallForDeoptimization(Address target, int deopt_id,
152                              RelocInfo::Mode rmode) {
153     USE(deopt_id);
154     call(target, rmode);
155   }
156 
157   inline bool AllowThisStubCall(CodeStub* stub);
158   void CallStubDelayed(CodeStub* stub);
159 
160   // Call a runtime routine. This expects {centry} to contain a fitting CEntry
161   // builtin for the target runtime function and uses an indirect call.
162   void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
163 
164   // Jump the register contains a smi.
165   inline void JumpIfSmi(Register value, Label* smi_label,
166                         Label::Distance distance = Label::kFar) {
167     test(value, Immediate(kSmiTagMask));
168     j(zero, smi_label, distance);
169   }
170   // Jump if the operand is a smi.
171   inline void JumpIfSmi(Operand value, Label* smi_label,
172                         Label::Distance distance = Label::kFar) {
173     test(value, Immediate(kSmiTagMask));
174     j(zero, smi_label, distance);
175   }
176 
JumpIfEqual(Register a,int32_t b,Label * dest)177   void JumpIfEqual(Register a, int32_t b, Label* dest) {
178     cmp(a, Immediate(b));
179     j(equal, dest);
180   }
181 
JumpIfLessThan(Register a,int32_t b,Label * dest)182   void JumpIfLessThan(Register a, int32_t b, Label* dest) {
183     cmp(a, Immediate(b));
184     j(less, dest);
185   }
186 
SmiUntag(Register reg)187   void SmiUntag(Register reg) { sar(reg, kSmiTagSize); }
188 
189   // Removes current frame and its arguments from the stack preserving the
190   // arguments and a return address pushed to the stack for the next call. Both
191   // |callee_args_count| and |caller_args_count_reg| do not include receiver.
192   // |callee_args_count| is not modified, |caller_args_count_reg| is trashed.
193   // |number_of_temp_values_after_return_address| specifies the number of words
194   // pushed to the stack after the return address. This is to allow "allocation"
195   // of scratch registers that this function requires by saving their values on
196   // the stack.
197   void PrepareForTailCall(const ParameterCount& callee_args_count,
198                           Register caller_args_count_reg, Register scratch0,
199                           Register scratch1,
200                           int number_of_temp_values_after_return_address);
201 
202   // Before calling a C-function from generated code, align arguments on stack.
203   // After aligning the frame, arguments must be stored in esp[0], esp[4],
204   // etc., not pushed. The argument count assumes all arguments are word sized.
205   // Some compilers/platforms require the stack to be aligned when calling
206   // C++ code.
207   // Needs a scratch register to do some arithmetic. This register will be
208   // trashed.
209   void PrepareCallCFunction(int num_arguments, Register scratch);
210 
211   // Calls a C function and cleans up the space for arguments allocated
212   // by PrepareCallCFunction. The called function is not allowed to trigger a
213   // garbage collection, since that might move the code and invalidate the
214   // return address (unless this is somehow accounted for by the called
215   // function).
216   void CallCFunction(ExternalReference function, int num_arguments);
217   void CallCFunction(Register function, int num_arguments);
218 
219   void ShlPair(Register high, Register low, uint8_t imm8);
220   void ShlPair_cl(Register high, Register low);
221   void ShrPair(Register high, Register low, uint8_t imm8);
222   void ShrPair_cl(Register high, Register low);
223   void SarPair(Register high, Register low, uint8_t imm8);
224   void SarPair_cl(Register high, Register low);
225 
226   // Generates function and stub prologue code.
227   void StubPrologue(StackFrame::Type type);
228   void Prologue();
229 
Lzcnt(Register dst,Register src)230   void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
231   void Lzcnt(Register dst, Operand src);
232 
Tzcnt(Register dst,Register src)233   void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
234   void Tzcnt(Register dst, Operand src);
235 
Popcnt(Register dst,Register src)236   void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
237   void Popcnt(Register dst, Operand src);
238 
239   void Ret();
240 
241   void LoadRoot(Register destination, Heap::RootListIndex index) override;
242 
243   // Indirect root-relative loads.
244   void LoadFromConstantsTable(Register destination,
245                               int constant_index) override;
246   void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
247   void LoadRootRelative(Register destination, int32_t offset) override;
248 
249   void LoadAddress(Register destination, ExternalReference source);
250 
251   // Wrapper functions to ensure external reference operands produce
252   // isolate-independent code if needed.
253   Operand StaticVariable(const ExternalReference& ext);
254   Operand StaticArray(Register index, ScaleFactor scale,
255                       const ExternalReference& ext);
256 
257   // Return and drop arguments from stack, where the number of arguments
258   // may be bigger than 2^16 - 1.  Requires a scratch register.
259   void Ret(int bytes_dropped, Register scratch);
260 
Pshufhw(XMMRegister dst,XMMRegister src,uint8_t shuffle)261   void Pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
262     Pshufhw(dst, Operand(src), shuffle);
263   }
264   void Pshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
Pshuflw(XMMRegister dst,XMMRegister src,uint8_t shuffle)265   void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
266     Pshuflw(dst, Operand(src), shuffle);
267   }
268   void Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
Pshufd(XMMRegister dst,XMMRegister src,uint8_t shuffle)269   void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
270     Pshufd(dst, Operand(src), shuffle);
271   }
272   void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
273   void Psraw(XMMRegister dst, int8_t shift);
274   void Psrlw(XMMRegister dst, int8_t shift);
275 
276 // SSE/SSE2 instructions with AVX version.
277 #define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
278   void macro_name(dst_type dst, src_type src) {                 \
279     if (CpuFeatures::IsSupported(AVX)) {                        \
280       CpuFeatureScope scope(this, AVX);                         \
281       v##name(dst, src);                                        \
282     } else {                                                    \
283       name(dst, src);                                           \
284     }                                                           \
285   }
286 
AVX_OP2_WITH_TYPE(Rcpps,rcpps,XMMRegister,const Operand &)287   AVX_OP2_WITH_TYPE(Rcpps, rcpps, XMMRegister, const Operand&)
288   AVX_OP2_WITH_TYPE(Rsqrtps, rsqrtps, XMMRegister, const Operand&)
289   AVX_OP2_WITH_TYPE(Movdqu, movdqu, XMMRegister, Operand)
290   AVX_OP2_WITH_TYPE(Movdqu, movdqu, Operand, XMMRegister)
291   AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
292   AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Operand)
293   AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
294   AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
295   AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
296 
297 #undef AVX_OP2_WITH_TYPE
298 
299 // Only use these macros when non-destructive source of AVX version is not
300 // needed.
301 #define AVX_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
302   void macro_name(dst_type dst, src_type src) {                 \
303     if (CpuFeatures::IsSupported(AVX)) {                        \
304       CpuFeatureScope scope(this, AVX);                         \
305       v##name(dst, dst, src);                                   \
306     } else {                                                    \
307       name(dst, src);                                           \
308     }                                                           \
309   }
310 #define AVX_OP3_XO(macro_name, name)                            \
311   AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
312   AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
313 
314   AVX_OP3_XO(Packsswb, packsswb)
315   AVX_OP3_XO(Packuswb, packuswb)
316   AVX_OP3_XO(Pcmpeqb, pcmpeqb)
317   AVX_OP3_XO(Pcmpeqw, pcmpeqw)
318   AVX_OP3_XO(Pcmpeqd, pcmpeqd)
319   AVX_OP3_XO(Psubb, psubb)
320   AVX_OP3_XO(Psubw, psubw)
321   AVX_OP3_XO(Psubd, psubd)
322   AVX_OP3_XO(Punpcklbw, punpcklbw)
323   AVX_OP3_XO(Punpckhbw, punpckhbw)
324   AVX_OP3_XO(Pxor, pxor)
325   AVX_OP3_XO(Andps, andps)
326   AVX_OP3_XO(Andpd, andpd)
327   AVX_OP3_XO(Xorps, xorps)
328   AVX_OP3_XO(Xorpd, xorpd)
329   AVX_OP3_XO(Sqrtss, sqrtss)
330   AVX_OP3_XO(Sqrtsd, sqrtsd)
331 
332 #undef AVX_OP3_XO
333 #undef AVX_OP3_WITH_TYPE
334 
335 // Non-SSE2 instructions.
336 #define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
337                                 sse_scope)                            \
338   void macro_name(dst_type dst, src_type src) {                       \
339     if (CpuFeatures::IsSupported(AVX)) {                              \
340       CpuFeatureScope scope(this, AVX);                               \
341       v##name(dst, src);                                              \
342       return;                                                         \
343     }                                                                 \
344     if (CpuFeatures::IsSupported(sse_scope)) {                        \
345       CpuFeatureScope scope(this, sse_scope);                         \
346       name(dst, src);                                                 \
347       return;                                                         \
348     }                                                                 \
349     UNREACHABLE();                                                    \
350   }
351 #define AVX_OP2_XO_SSE4(macro_name, name)                                     \
352   AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
353   AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
354 
355   AVX_OP2_XO_SSE4(Ptest, ptest)
356   AVX_OP2_XO_SSE4(Pmovsxbw, pmovsxbw)
357   AVX_OP2_XO_SSE4(Pmovsxwd, pmovsxwd)
358   AVX_OP2_XO_SSE4(Pmovzxbw, pmovzxbw)
359   AVX_OP2_XO_SSE4(Pmovzxwd, pmovzxwd)
360 
361 #undef AVX_OP2_WITH_TYPE_SCOPE
362 #undef AVX_OP2_XO_SSE4
363 
364   void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
365   void Pshufb(XMMRegister dst, Operand src);
Pblendw(XMMRegister dst,XMMRegister src,uint8_t imm8)366   void Pblendw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
367     Pblendw(dst, Operand(src), imm8);
368   }
369   void Pblendw(XMMRegister dst, Operand src, uint8_t imm8);
370 
Psignb(XMMRegister dst,XMMRegister src)371   void Psignb(XMMRegister dst, XMMRegister src) { Psignb(dst, Operand(src)); }
372   void Psignb(XMMRegister dst, Operand src);
Psignw(XMMRegister dst,XMMRegister src)373   void Psignw(XMMRegister dst, XMMRegister src) { Psignw(dst, Operand(src)); }
374   void Psignw(XMMRegister dst, Operand src);
Psignd(XMMRegister dst,XMMRegister src)375   void Psignd(XMMRegister dst, XMMRegister src) { Psignd(dst, Operand(src)); }
376   void Psignd(XMMRegister dst, Operand src);
377 
Palignr(XMMRegister dst,XMMRegister src,uint8_t imm8)378   void Palignr(XMMRegister dst, XMMRegister src, uint8_t imm8) {
379     Palignr(dst, Operand(src), imm8);
380   }
381   void Palignr(XMMRegister dst, Operand src, uint8_t imm8);
382 
383   void Pextrb(Register dst, XMMRegister src, int8_t imm8);
384   void Pextrw(Register dst, XMMRegister src, int8_t imm8);
385   void Pextrd(Register dst, XMMRegister src, int8_t imm8);
386   void Pinsrd(XMMRegister dst, Register src, int8_t imm8,
387               bool is_64_bits = false) {
388     Pinsrd(dst, Operand(src), imm8, is_64_bits);
389   }
390   void Pinsrd(XMMRegister dst, Operand src, int8_t imm8,
391               bool is_64_bits = false);
392 
393   // Expression support
394   // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
395   // hinders register renaming and makes dependence chains longer. So we use
396   // xorps to clear the dst register before cvtsi2sd to solve this issue.
Cvtsi2ss(XMMRegister dst,Register src)397   void Cvtsi2ss(XMMRegister dst, Register src) { Cvtsi2ss(dst, Operand(src)); }
398   void Cvtsi2ss(XMMRegister dst, Operand src);
Cvtsi2sd(XMMRegister dst,Register src)399   void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
400   void Cvtsi2sd(XMMRegister dst, Operand src);
401 
Cvtui2ss(XMMRegister dst,Register src,Register tmp)402   void Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
403     Cvtui2ss(dst, Operand(src), tmp);
404   }
405   void Cvtui2ss(XMMRegister dst, Operand src, Register tmp);
Cvttss2ui(Register dst,XMMRegister src,XMMRegister tmp)406   void Cvttss2ui(Register dst, XMMRegister src, XMMRegister tmp) {
407     Cvttss2ui(dst, Operand(src), tmp);
408   }
409   void Cvttss2ui(Register dst, Operand src, XMMRegister tmp);
Cvtui2sd(XMMRegister dst,Register src)410   void Cvtui2sd(XMMRegister dst, Register src) { Cvtui2sd(dst, Operand(src)); }
411   void Cvtui2sd(XMMRegister dst, Operand src);
Cvttsd2ui(Register dst,XMMRegister src,XMMRegister tmp)412   void Cvttsd2ui(Register dst, XMMRegister src, XMMRegister tmp) {
413     Cvttsd2ui(dst, Operand(src), tmp);
414   }
415   void Cvttsd2ui(Register dst, Operand src, XMMRegister tmp);
416 
Push(Register src)417   void Push(Register src) { push(src); }
Push(Operand src)418   void Push(Operand src) { push(src); }
Push(Immediate value)419   void Push(Immediate value) { push(value); }
Push(Handle<HeapObject> handle)420   void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
Push(Smi * smi)421   void Push(Smi* smi) { Push(Immediate(smi)); }
422 
423   void SaveRegisters(RegList registers);
424   void RestoreRegisters(RegList registers);
425 
426   void CallRecordWriteStub(Register object, Register address,
427                            RememberedSetAction remembered_set_action,
428                            SaveFPRegsMode fp_mode);
429 
430   // Calculate how much stack space (in bytes) are required to store caller
431   // registers excluding those specified in the arguments.
432   int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
433                                       Register exclusion1 = no_reg,
434                                       Register exclusion2 = no_reg,
435                                       Register exclusion3 = no_reg) const;
436 
437   // PushCallerSaved and PopCallerSaved do not arrange the registers in any
438   // particular order so they are not useful for calls that can cause a GC.
439   // The caller can exclude up to 3 registers that do not need to be saved and
440   // restored.
441 
442   // Push caller saved registers on the stack, and return the number of bytes
443   // stack pointer is adjusted.
444   int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
445                       Register exclusion2 = no_reg,
446                       Register exclusion3 = no_reg);
447   // Restore caller saved registers from the stack, and return the number of
448   // bytes stack pointer is adjusted.
449   int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
450                      Register exclusion2 = no_reg,
451                      Register exclusion3 = no_reg);
452 
453   // Compute the start of the generated instruction stream from the current PC.
454   // This is an alternative to embedding the {CodeObject} handle as a reference.
455   void ComputeCodeStartAddress(Register dst);
456 
457   void ResetSpeculationPoisonRegister();
458 };
459 
460 // MacroAssembler implements a collection of frequently used macros.
461 class MacroAssembler : public TurboAssembler {
462  public:
MacroAssembler(Isolate * isolate,void * buffer,int size,CodeObjectRequired create_code_object)463   MacroAssembler(Isolate* isolate, void* buffer, int size,
464                  CodeObjectRequired create_code_object)
465       : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
466                        size, create_code_object) {}
467   MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
468                  void* buffer, int size, CodeObjectRequired create_code_object);
469 
470   // Load a register with a long value as efficiently as possible.
Set(Register dst,int32_t x)471   void Set(Register dst, int32_t x) {
472     if (x == 0) {
473       xor_(dst, dst);
474     } else {
475       mov(dst, Immediate(x));
476     }
477   }
Set(Operand dst,int32_t x)478   void Set(Operand dst, int32_t x) { mov(dst, Immediate(x)); }
479 
480   // Operations on roots in the root-array.
481   void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
482   // These methods can only be used with constant roots (i.e. non-writable
483   // and not in new space).
484   void CompareRoot(Register with, Heap::RootListIndex index);
485   void CompareRoot(Operand with, Heap::RootListIndex index);
486   void PushRoot(Heap::RootListIndex index);
487 
488   // Compare the object in a register to a value and jump if they are equal.
489   void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
490                   Label::Distance if_equal_distance = Label::kFar) {
491     CompareRoot(with, index);
492     j(equal, if_equal, if_equal_distance);
493   }
494   void JumpIfRoot(Operand with, Heap::RootListIndex index, Label* if_equal,
495                   Label::Distance if_equal_distance = Label::kFar) {
496     CompareRoot(with, index);
497     j(equal, if_equal, if_equal_distance);
498   }
499 
500   // Compare the object in a register to a value and jump if they are not equal.
501   void JumpIfNotRoot(Register with, Heap::RootListIndex index,
502                      Label* if_not_equal,
503                      Label::Distance if_not_equal_distance = Label::kFar) {
504     CompareRoot(with, index);
505     j(not_equal, if_not_equal, if_not_equal_distance);
506   }
507   void JumpIfNotRoot(Operand with, Heap::RootListIndex index,
508                      Label* if_not_equal,
509                      Label::Distance if_not_equal_distance = Label::kFar) {
510     CompareRoot(with, index);
511     j(not_equal, if_not_equal, if_not_equal_distance);
512   }
513 
514   // ---------------------------------------------------------------------------
515   // GC Support
516   // Notify the garbage collector that we wrote a pointer into an object.
517   // |object| is the object being stored into, |value| is the object being
518   // stored.  value and scratch registers are clobbered by the operation.
519   // The offset is the offset from the start of the object, not the offset from
520   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
521   void RecordWriteField(
522       Register object, int offset, Register value, Register scratch,
523       SaveFPRegsMode save_fp,
524       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
525       SmiCheck smi_check = INLINE_SMI_CHECK);
526 
527   // For page containing |object| mark region covering |address|
528   // dirty. |object| is the object being stored into, |value| is the
529   // object being stored. The address and value registers are clobbered by the
530   // operation. RecordWrite filters out smis so it does not update the
531   // write barrier if the value is a smi.
532   void RecordWrite(
533       Register object, Register address, Register value, SaveFPRegsMode save_fp,
534       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
535       SmiCheck smi_check = INLINE_SMI_CHECK);
536 
537   // Frame restart support
538   void MaybeDropFrames();
539 
540   // Enter specific kind of exit frame. Expects the number of
541   // arguments in register eax and sets up the number of arguments in
542   // register edi and the pointer to the first argument in register
543   // esi.
544   void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type);
545 
546   void EnterApiExitFrame(int argc);
547 
548   // Leave the current exit frame. Expects the return value in
549   // register eax:edx (untouched) and the pointer to the first
550   // argument in register esi (if pop_arguments == true).
551   void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
552 
553   // Leave the current exit frame. Expects the return value in
554   // register eax (untouched).
555   void LeaveApiExitFrame();
556 
557   // Load the global proxy from the current context.
558   void LoadGlobalProxy(Register dst);
559 
560   // Load the global function with the given index.
561   void LoadGlobalFunction(int index, Register function);
562 
563   // Push and pop the registers that can hold pointers.
PushSafepointRegisters()564   void PushSafepointRegisters() { pushad(); }
PopSafepointRegisters()565   void PopSafepointRegisters() { popad(); }
566 
567   // ---------------------------------------------------------------------------
568   // JavaScript invokes
569 
570 
571   // Invoke the JavaScript function code by either calling or jumping.
572 
573   void InvokeFunctionCode(Register function, Register new_target,
574                           const ParameterCount& expected,
575                           const ParameterCount& actual, InvokeFlag flag);
576 
577   // On function call, call into the debugger if necessary.
578   // This may clobber ecx.
579   void CheckDebugHook(Register fun, Register new_target,
580                       const ParameterCount& expected,
581                       const ParameterCount& actual);
582 
583   // Invoke the JavaScript function in the given register. Changes the
584   // current context to the context in the function before invoking.
585   void InvokeFunction(Register function, Register new_target,
586                       const ParameterCount& actual, InvokeFlag flag);
587 
588   void InvokeFunction(Register function, const ParameterCount& expected,
589                       const ParameterCount& actual, InvokeFlag flag);
590 
591   // Compare object type for heap object.
592   // Incoming register is heap_object and outgoing register is map.
593   void CmpObjectType(Register heap_object, InstanceType type, Register map);
594 
595   // Compare instance type for map.
596   void CmpInstanceType(Register map, InstanceType type);
597 
598   void DoubleToI(Register result_reg, XMMRegister input_reg,
599                  XMMRegister scratch, Label* lost_precision, Label* is_nan,
600                  Label::Distance dst = Label::kFar);
601 
602   // Smi tagging support.
SmiTag(Register reg)603   void SmiTag(Register reg) {
604     STATIC_ASSERT(kSmiTag == 0);
605     STATIC_ASSERT(kSmiTagSize == 1);
606     add(reg, reg);
607   }
608 
609   // Modifies the register even if it does not contain a Smi!
UntagSmi(Register reg,Label * is_smi)610   void UntagSmi(Register reg, Label* is_smi) {
611     STATIC_ASSERT(kSmiTagSize == 1);
612     sar(reg, kSmiTagSize);
613     STATIC_ASSERT(kSmiTag == 0);
614     j(not_carry, is_smi);
615   }
616 
617   // Jump if register contain a non-smi.
618   inline void JumpIfNotSmi(Register value, Label* not_smi_label,
619                            Label::Distance distance = Label::kFar) {
620     test(value, Immediate(kSmiTagMask));
621     j(not_zero, not_smi_label, distance);
622   }
623   // Jump if the operand is not a smi.
624   inline void JumpIfNotSmi(Operand value, Label* smi_label,
625                            Label::Distance distance = Label::kFar) {
626     test(value, Immediate(kSmiTagMask));
627     j(not_zero, smi_label, distance);
628   }
629 
630   template<typename Field>
DecodeField(Register reg)631   void DecodeField(Register reg) {
632     static const int shift = Field::kShift;
633     static const int mask = Field::kMask >> Field::kShift;
634     if (shift != 0) {
635       sar(reg, shift);
636     }
637     and_(reg, Immediate(mask));
638   }
639 
640   // Abort execution if argument is not a smi, enabled via --debug-code.
641   void AssertSmi(Register object);
642 
643   // Abort execution if argument is a smi, enabled via --debug-code.
644   void AssertNotSmi(Register object);
645 
646   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
647   void AssertFunction(Register object);
648 
649   // Abort execution if argument is not a Constructor, enabled via --debug-code.
650   void AssertConstructor(Register object);
651 
652   // Abort execution if argument is not a JSBoundFunction,
653   // enabled via --debug-code.
654   void AssertBoundFunction(Register object);
655 
656   // Abort execution if argument is not a JSGeneratorObject (or subclass),
657   // enabled via --debug-code.
658   void AssertGeneratorObject(Register object);
659 
660   // Abort execution if argument is not undefined or an AllocationSite, enabled
661   // via --debug-code.
662   void AssertUndefinedOrAllocationSite(Register object);
663 
664   // ---------------------------------------------------------------------------
665   // Exception handling
666 
667   // Push a new stack handler and link it into stack handler chain.
668   void PushStackHandler();
669 
670   // Unlink the stack handler on top of the stack from the stack handler chain.
671   void PopStackHandler();
672 
673   // ---------------------------------------------------------------------------
674   // Runtime calls
675 
676   // Call a code stub.  Generate the code if necessary.
677   void CallStub(CodeStub* stub);
678 
679   // Tail call a code stub (jump).  Generate the code if necessary.
680   void TailCallStub(CodeStub* stub);
681 
682   // Call a runtime routine.
683   void CallRuntime(const Runtime::Function* f, int num_arguments,
684                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
685 
686   // Convenience function: Same as above, but takes the fid instead.
687   void CallRuntime(Runtime::FunctionId fid,
688                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
689     const Runtime::Function* function = Runtime::FunctionForId(fid);
690     CallRuntime(function, function->nargs, save_doubles);
691   }
692 
693   // Convenience function: Same as above, but takes the fid instead.
694   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
695                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
696     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
697   }
698 
699   // Convenience function: tail call a runtime routine (jump).
700   void TailCallRuntime(Runtime::FunctionId fid);
701 
702   // Jump to a runtime routine.
703   void JumpToExternalReference(const ExternalReference& ext,
704                                bool builtin_exit_frame = false);
705 
706   // Generates a trampoline to jump to the off-heap instruction stream.
707   void JumpToInstructionStream(Address entry);
708 
709   // ---------------------------------------------------------------------------
710   // Utilities
711 
712   // Emit code to discard a non-negative number of pointer-sized elements
713   // from the stack, clobbering only the esp register.
714   void Drop(int element_count);
715 
Pop(Register dst)716   void Pop(Register dst) { pop(dst); }
Pop(Operand dst)717   void Pop(Operand dst) { pop(dst); }
PushReturnAddressFrom(Register src)718   void PushReturnAddressFrom(Register src) { push(src); }
PopReturnAddressTo(Register dst)719   void PopReturnAddressTo(Register dst) { pop(dst); }
720 
721   // ---------------------------------------------------------------------------
722   // In-place weak references.
723   void LoadWeakValue(Register in_out, Label* target_if_cleared);
724 
725   // ---------------------------------------------------------------------------
726   // StatsCounter support
727 
728   void IncrementCounter(StatsCounter* counter, int value);
729   void DecrementCounter(StatsCounter* counter, int value);
730 
SafepointRegisterStackIndex(Register reg)731   static int SafepointRegisterStackIndex(Register reg) {
732     return SafepointRegisterStackIndex(reg.code());
733   }
734 
735   void EnterBuiltinFrame(Register context, Register target, Register argc);
736   void LeaveBuiltinFrame(Register context, Register target, Register argc);
737 
738  private:
739   // Helper functions for generating invokes.
740   void InvokePrologue(const ParameterCount& expected,
741                       const ParameterCount& actual, Label* done,
742                       bool* definitely_mismatches, InvokeFlag flag,
743                       Label::Distance done_distance);
744 
745   void EnterExitFramePrologue(StackFrame::Type frame_type);
746   void EnterExitFrameEpilogue(int argc, bool save_doubles);
747 
748   void LeaveExitFrameEpilogue();
749 
750   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
751   void InNewSpace(Register object, Register scratch, Condition cc,
752                   Label* condition_met,
753                   Label::Distance condition_met_distance = Label::kFar);
754 
755   // Compute memory operands for safepoint stack slots.
756   static int SafepointRegisterStackIndex(int reg_code);
757 
758   // Needs access to SafepointRegisterStackIndex for compiled frame
759   // traversal.
760   friend class StandardFrame;
761 };
762 
763 // -----------------------------------------------------------------------------
764 // Static helper functions.
765 
766 // Generate an Operand for loading a field from an object.
FieldOperand(Register object,int offset)767 inline Operand FieldOperand(Register object, int offset) {
768   return Operand(object, offset - kHeapObjectTag);
769 }
770 
771 // Generate an Operand for loading an indexed field from an object.
FieldOperand(Register object,Register index,ScaleFactor scale,int offset)772 inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
773                             int offset) {
774   return Operand(object, index, scale, offset - kHeapObjectTag);
775 }
776 
777 inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
778                                         int additional_offset = 0) {
779   int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
780   return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
781 }
782 
ContextOperand(Register context,int index)783 inline Operand ContextOperand(Register context, int index) {
784   return Operand(context, Context::SlotOffset(index));
785 }
786 
ContextOperand(Register context,Register index)787 inline Operand ContextOperand(Register context, Register index) {
788   return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
789 }
790 
NativeContextOperand()791 inline Operand NativeContextOperand() {
792   return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
793 }
794 
795 #define ACCESS_MASM(masm) masm->
796 
797 }  // namespace internal
798 }  // namespace v8
799 
800 #endif  // V8_IA32_MACRO_ASSEMBLER_IA32_H_
801