• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
6 #define V8_X64_MACRO_ASSEMBLER_X64_H_
7 
8 #include "src/assembler.h"
9 #include "src/frames.h"
10 #include "src/globals.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 // Default scratch register used by MacroAssembler (and other code that needs
16 // a spare register). The register isn't callee save, and not used by the
17 // function calling convention.
18 const Register kScratchRegister = { 10 };      // r10.
19 const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
20 const Register kRootRegister = { 13 };         // r13 (callee save).
21 // Value of smi in kSmiConstantRegister.
22 const int kSmiConstantRegisterValue = 1;
23 // Actual value of root register is offset from the root array's start
24 // to take advantage of negitive 8-bit displacement values.
25 const int kRootRegisterBias = 128;
26 
27 // Convenience for platform-independent signatures.
28 typedef Operand MemOperand;
29 
30 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
31 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
32 enum PointersToHereCheck {
33   kPointersToHereMaybeInteresting,
34   kPointersToHereAreAlwaysInteresting
35 };
36 
37 enum SmiOperationConstraint {
38   PRESERVE_SOURCE_REGISTER,
39   BAILOUT_ON_NO_OVERFLOW,
40   BAILOUT_ON_OVERFLOW,
41   NUMBER_OF_CONSTRAINTS
42 };
43 
44 STATIC_ASSERT(NUMBER_OF_CONSTRAINTS <= 8);
45 
46 class SmiOperationExecutionMode : public EnumSet<SmiOperationConstraint, byte> {
47  public:
SmiOperationExecutionMode()48   SmiOperationExecutionMode() : EnumSet<SmiOperationConstraint, byte>(0) { }
SmiOperationExecutionMode(byte bits)49   explicit SmiOperationExecutionMode(byte bits)
50       : EnumSet<SmiOperationConstraint, byte>(bits) { }
51 };
52 
53 bool AreAliased(Register r1, Register r2, Register r3, Register r4);
54 
55 // Forward declaration.
56 class JumpTarget;
57 
58 struct SmiIndex {
SmiIndexSmiIndex59   SmiIndex(Register index_register, ScaleFactor scale)
60       : reg(index_register),
61         scale(scale) {}
62   Register reg;
63   ScaleFactor scale;
64 };
65 
66 
67 // MacroAssembler implements a collection of frequently used macros.
68 class MacroAssembler: public Assembler {
69  public:
70   // The isolate parameter can be NULL if the macro assembler should
71   // not use isolate-dependent functionality. In this case, it's the
72   // responsibility of the caller to never invoke such function on the
73   // macro assembler.
74   MacroAssembler(Isolate* isolate, void* buffer, int size);
75 
76   // Prevent the use of the RootArray during the lifetime of this
77   // scope object.
78   class NoRootArrayScope BASE_EMBEDDED {
79    public:
NoRootArrayScope(MacroAssembler * assembler)80     explicit NoRootArrayScope(MacroAssembler* assembler)
81         : variable_(&assembler->root_array_available_),
82           old_value_(assembler->root_array_available_) {
83       assembler->root_array_available_ = false;
84     }
~NoRootArrayScope()85     ~NoRootArrayScope() {
86       *variable_ = old_value_;
87     }
88    private:
89     bool* variable_;
90     bool old_value_;
91   };
92 
93   // Operand pointing to an external reference.
94   // May emit code to set up the scratch register. The operand is
95   // only guaranteed to be correct as long as the scratch register
96   // isn't changed.
97   // If the operand is used more than once, use a scratch register
98   // that is guaranteed not to be clobbered.
99   Operand ExternalOperand(ExternalReference reference,
100                           Register scratch = kScratchRegister);
101   // Loads and stores the value of an external reference.
102   // Special case code for load and store to take advantage of
103   // load_rax/store_rax if possible/necessary.
104   // For other operations, just use:
105   //   Operand operand = ExternalOperand(extref);
106   //   operation(operand, ..);
107   void Load(Register destination, ExternalReference source);
108   void Store(ExternalReference destination, Register source);
109   // Loads the address of the external reference into the destination
110   // register.
111   void LoadAddress(Register destination, ExternalReference source);
112   // Returns the size of the code generated by LoadAddress.
113   // Used by CallSize(ExternalReference) to find the size of a call.
114   int LoadAddressSize(ExternalReference source);
115   // Pushes the address of the external reference onto the stack.
116   void PushAddress(ExternalReference source);
117 
118   // Operations on roots in the root-array.
119   void LoadRoot(Register destination, Heap::RootListIndex index);
120   void StoreRoot(Register source, Heap::RootListIndex index);
121   // Load a root value where the index (or part of it) is variable.
122   // The variable_offset register is added to the fixed_offset value
123   // to get the index into the root-array.
124   void LoadRootIndexed(Register destination,
125                        Register variable_offset,
126                        int fixed_offset);
127   void CompareRoot(Register with, Heap::RootListIndex index);
128   void CompareRoot(const Operand& with, Heap::RootListIndex index);
129   void PushRoot(Heap::RootListIndex index);
130 
131   // These functions do not arrange the registers in any particular order so
132   // they are not useful for calls that can cause a GC.  The caller can
133   // exclude up to 3 registers that do not need to be saved and restored.
134   void PushCallerSaved(SaveFPRegsMode fp_mode,
135                        Register exclusion1 = no_reg,
136                        Register exclusion2 = no_reg,
137                        Register exclusion3 = no_reg);
138   void PopCallerSaved(SaveFPRegsMode fp_mode,
139                       Register exclusion1 = no_reg,
140                       Register exclusion2 = no_reg,
141                       Register exclusion3 = no_reg);
142 
143 // ---------------------------------------------------------------------------
144 // GC Support
145 
146 
147   enum RememberedSetFinalAction {
148     kReturnAtEnd,
149     kFallThroughAtEnd
150   };
151 
152   // Record in the remembered set the fact that we have a pointer to new space
153   // at the address pointed to by the addr register.  Only works if addr is not
154   // in new space.
155   void RememberedSetHelper(Register object,  // Used for debug code.
156                            Register addr,
157                            Register scratch,
158                            SaveFPRegsMode save_fp,
159                            RememberedSetFinalAction and_then);
160 
161   void CheckPageFlag(Register object,
162                      Register scratch,
163                      int mask,
164                      Condition cc,
165                      Label* condition_met,
166                      Label::Distance condition_met_distance = Label::kFar);
167 
168   void CheckMapDeprecated(Handle<Map> map,
169                           Register scratch,
170                           Label* if_deprecated);
171 
172   // Check if object is in new space.  Jumps if the object is not in new space.
173   // The register scratch can be object itself, but scratch will be clobbered.
174   void JumpIfNotInNewSpace(Register object,
175                            Register scratch,
176                            Label* branch,
177                            Label::Distance distance = Label::kFar) {
178     InNewSpace(object, scratch, not_equal, branch, distance);
179   }
180 
181   // Check if object is in new space.  Jumps if the object is in new space.
182   // The register scratch can be object itself, but it will be clobbered.
183   void JumpIfInNewSpace(Register object,
184                         Register scratch,
185                         Label* branch,
186                         Label::Distance distance = Label::kFar) {
187     InNewSpace(object, scratch, equal, branch, distance);
188   }
189 
190   // Check if an object has the black incremental marking color.  Also uses rcx!
191   void JumpIfBlack(Register object,
192                    Register scratch0,
193                    Register scratch1,
194                    Label* on_black,
195                    Label::Distance on_black_distance = Label::kFar);
196 
197   // Detects conservatively whether an object is data-only, i.e. it does need to
198   // be scanned by the garbage collector.
199   void JumpIfDataObject(Register value,
200                         Register scratch,
201                         Label* not_data_object,
202                         Label::Distance not_data_object_distance);
203 
204   // Checks the color of an object.  If the object is already grey or black
205   // then we just fall through, since it is already live.  If it is white and
206   // we can determine that it doesn't need to be scanned, then we just mark it
207   // black and fall through.  For the rest we jump to the label so the
208   // incremental marker can fix its assumptions.
209   void EnsureNotWhite(Register object,
210                       Register scratch1,
211                       Register scratch2,
212                       Label* object_is_white_and_not_data,
213                       Label::Distance distance);
214 
215   // Notify the garbage collector that we wrote a pointer into an object.
216   // |object| is the object being stored into, |value| is the object being
217   // stored.  value and scratch registers are clobbered by the operation.
218   // The offset is the offset from the start of the object, not the offset from
219   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
220   void RecordWriteField(
221       Register object,
222       int offset,
223       Register value,
224       Register scratch,
225       SaveFPRegsMode save_fp,
226       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
227       SmiCheck smi_check = INLINE_SMI_CHECK,
228       PointersToHereCheck pointers_to_here_check_for_value =
229           kPointersToHereMaybeInteresting);
230 
231   // As above, but the offset has the tag presubtracted.  For use with
232   // Operand(reg, off).
233   void RecordWriteContextSlot(
234       Register context,
235       int offset,
236       Register value,
237       Register scratch,
238       SaveFPRegsMode save_fp,
239       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
240       SmiCheck smi_check = INLINE_SMI_CHECK,
241       PointersToHereCheck pointers_to_here_check_for_value =
242           kPointersToHereMaybeInteresting) {
243     RecordWriteField(context,
244                      offset + kHeapObjectTag,
245                      value,
246                      scratch,
247                      save_fp,
248                      remembered_set_action,
249                      smi_check,
250                      pointers_to_here_check_for_value);
251   }
252 
253   // Notify the garbage collector that we wrote a pointer into a fixed array.
254   // |array| is the array being stored into, |value| is the
255   // object being stored.  |index| is the array index represented as a non-smi.
256   // All registers are clobbered by the operation RecordWriteArray
257   // filters out smis so it does not update the write barrier if the
258   // value is a smi.
259   void RecordWriteArray(
260       Register array,
261       Register value,
262       Register index,
263       SaveFPRegsMode save_fp,
264       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
265       SmiCheck smi_check = INLINE_SMI_CHECK,
266       PointersToHereCheck pointers_to_here_check_for_value =
267           kPointersToHereMaybeInteresting);
268 
269   void RecordWriteForMap(
270       Register object,
271       Register map,
272       Register dst,
273       SaveFPRegsMode save_fp);
274 
275   // For page containing |object| mark region covering |address|
276   // dirty. |object| is the object being stored into, |value| is the
277   // object being stored. The address and value registers are clobbered by the
278   // operation.  RecordWrite filters out smis so it does not update
279   // the write barrier if the value is a smi.
280   void RecordWrite(
281       Register object,
282       Register address,
283       Register value,
284       SaveFPRegsMode save_fp,
285       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
286       SmiCheck smi_check = INLINE_SMI_CHECK,
287       PointersToHereCheck pointers_to_here_check_for_value =
288           kPointersToHereMaybeInteresting);
289 
290   // ---------------------------------------------------------------------------
291   // Debugger Support
292 
293   void DebugBreak();
294 
295   // Generates function and stub prologue code.
296   void StubPrologue();
297   void Prologue(bool code_pre_aging);
298 
299   // Enter specific kind of exit frame; either in normal or
300   // debug mode. Expects the number of arguments in register rax and
301   // sets up the number of arguments in register rdi and the pointer
302   // to the first argument in register rsi.
303   //
304   // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
305   // accessible via StackSpaceOperand.
306   void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
307 
308   // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
309   // memory (not GCed) on the stack accessible via StackSpaceOperand.
310   void EnterApiExitFrame(int arg_stack_space);
311 
312   // Leave the current exit frame. Expects/provides the return value in
313   // register rax:rdx (untouched) and the pointer to the first
314   // argument in register rsi.
315   void LeaveExitFrame(bool save_doubles = false);
316 
317   // Leave the current exit frame. Expects/provides the return value in
318   // register rax (untouched).
319   void LeaveApiExitFrame(bool restore_context);
320 
321   // Push and pop the registers that can hold pointers.
PushSafepointRegisters()322   void PushSafepointRegisters() { Pushad(); }
PopSafepointRegisters()323   void PopSafepointRegisters() { Popad(); }
324   // Store the value in register src in the safepoint register stack
325   // slot for register dst.
326   void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
327   void StoreToSafepointRegisterSlot(Register dst, Register src);
328   void LoadFromSafepointRegisterSlot(Register dst, Register src);
329 
InitializeRootRegister()330   void InitializeRootRegister() {
331     ExternalReference roots_array_start =
332         ExternalReference::roots_array_start(isolate());
333     Move(kRootRegister, roots_array_start);
334     addp(kRootRegister, Immediate(kRootRegisterBias));
335   }
336 
337   // ---------------------------------------------------------------------------
338   // JavaScript invokes
339 
340   // Invoke the JavaScript function code by either calling or jumping.
341   void InvokeCode(Register code,
342                   const ParameterCount& expected,
343                   const ParameterCount& actual,
344                   InvokeFlag flag,
345                   const CallWrapper& call_wrapper);
346 
347   // Invoke the JavaScript function in the given register. Changes the
348   // current context to the context in the function before invoking.
349   void InvokeFunction(Register function,
350                       const ParameterCount& actual,
351                       InvokeFlag flag,
352                       const CallWrapper& call_wrapper);
353 
354   void InvokeFunction(Register function,
355                       const ParameterCount& expected,
356                       const ParameterCount& actual,
357                       InvokeFlag flag,
358                       const CallWrapper& call_wrapper);
359 
360   void InvokeFunction(Handle<JSFunction> function,
361                       const ParameterCount& expected,
362                       const ParameterCount& actual,
363                       InvokeFlag flag,
364                       const CallWrapper& call_wrapper);
365 
366   // Invoke specified builtin JavaScript function. Adds an entry to
367   // the unresolved list if the name does not resolve.
368   void InvokeBuiltin(Builtins::JavaScript id,
369                      InvokeFlag flag,
370                      const CallWrapper& call_wrapper = NullCallWrapper());
371 
372   // Store the function for the given builtin in the target register.
373   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
374 
375   // Store the code object for the given builtin in the target register.
376   void GetBuiltinEntry(Register target, Builtins::JavaScript id);
377 
378 
379   // ---------------------------------------------------------------------------
380   // Smi tagging, untagging and operations on tagged smis.
381 
382   // Support for constant splitting.
383   bool IsUnsafeInt(const int32_t x);
384   void SafeMove(Register dst, Smi* src);
385   void SafePush(Smi* src);
386 
InitializeSmiConstantRegister()387   void InitializeSmiConstantRegister() {
388     Move(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
389          Assembler::RelocInfoNone());
390   }
391 
392   // Conversions between tagged smi values and non-tagged integer values.
393 
394   // Tag an integer value. The result must be known to be a valid smi value.
395   // Only uses the low 32 bits of the src register. Sets the N and Z flags
396   // based on the value of the resulting smi.
397   void Integer32ToSmi(Register dst, Register src);
398 
399   // Stores an integer32 value into a memory field that already holds a smi.
400   void Integer32ToSmiField(const Operand& dst, Register src);
401 
402   // Adds constant to src and tags the result as a smi.
403   // Result must be a valid smi.
404   void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
405 
406   // Convert smi to 32-bit integer. I.e., not sign extended into
407   // high 32 bits of destination.
408   void SmiToInteger32(Register dst, Register src);
409   void SmiToInteger32(Register dst, const Operand& src);
410 
411   // Convert smi to 64-bit integer (sign extended if necessary).
412   void SmiToInteger64(Register dst, Register src);
413   void SmiToInteger64(Register dst, const Operand& src);
414 
415   // Multiply a positive smi's integer value by a power of two.
416   // Provides result as 64-bit integer value.
417   void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
418                                              Register src,
419                                              int power);
420 
421   // Divide a positive smi's integer value by a power of two.
422   // Provides result as 32-bit integer value.
423   void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
424                                            Register src,
425                                            int power);
426 
427   // Perform the logical or of two smi values and return a smi value.
428   // If either argument is not a smi, jump to on_not_smis and retain
429   // the original values of source registers. The destination register
430   // may be changed if it's not one of the source registers.
431   void SmiOrIfSmis(Register dst,
432                    Register src1,
433                    Register src2,
434                    Label* on_not_smis,
435                    Label::Distance near_jump = Label::kFar);
436 
437 
438   // Simple comparison of smis.  Both sides must be known smis to use these,
439   // otherwise use Cmp.
440   void SmiCompare(Register smi1, Register smi2);
441   void SmiCompare(Register dst, Smi* src);
442   void SmiCompare(Register dst, const Operand& src);
443   void SmiCompare(const Operand& dst, Register src);
444   void SmiCompare(const Operand& dst, Smi* src);
445   // Compare the int32 in src register to the value of the smi stored at dst.
446   void SmiCompareInteger32(const Operand& dst, Register src);
447   // Sets sign and zero flags depending on value of smi in register.
448   void SmiTest(Register src);
449 
450   // Functions performing a check on a known or potential smi. Returns
451   // a condition that is satisfied if the check is successful.
452 
453   // Is the value a tagged smi.
454   Condition CheckSmi(Register src);
455   Condition CheckSmi(const Operand& src);
456 
457   // Is the value a non-negative tagged smi.
458   Condition CheckNonNegativeSmi(Register src);
459 
460   // Are both values tagged smis.
461   Condition CheckBothSmi(Register first, Register second);
462 
463   // Are both values non-negative tagged smis.
464   Condition CheckBothNonNegativeSmi(Register first, Register second);
465 
466   // Are either value a tagged smi.
467   Condition CheckEitherSmi(Register first,
468                            Register second,
469                            Register scratch = kScratchRegister);
470 
471   // Is the value the minimum smi value (since we are using
472   // two's complement numbers, negating the value is known to yield
473   // a non-smi value).
474   Condition CheckIsMinSmi(Register src);
475 
476   // Checks whether an 32-bit integer value is a valid for conversion
477   // to a smi.
478   Condition CheckInteger32ValidSmiValue(Register src);
479 
480   // Checks whether an 32-bit unsigned integer value is a valid for
481   // conversion to a smi.
482   Condition CheckUInteger32ValidSmiValue(Register src);
483 
484   // Check whether src is a Smi, and set dst to zero if it is a smi,
485   // and to one if it isn't.
486   void CheckSmiToIndicator(Register dst, Register src);
487   void CheckSmiToIndicator(Register dst, const Operand& src);
488 
489   // Test-and-jump functions. Typically combines a check function
490   // above with a conditional jump.
491 
492   // Jump if the value can be represented by a smi.
493   void JumpIfValidSmiValue(Register src, Label* on_valid,
494                            Label::Distance near_jump = Label::kFar);
495 
496   // Jump if the value cannot be represented by a smi.
497   void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
498                               Label::Distance near_jump = Label::kFar);
499 
500   // Jump if the unsigned integer value can be represented by a smi.
501   void JumpIfUIntValidSmiValue(Register src, Label* on_valid,
502                                Label::Distance near_jump = Label::kFar);
503 
504   // Jump if the unsigned integer value cannot be represented by a smi.
505   void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
506                                   Label::Distance near_jump = Label::kFar);
507 
508   // Jump to label if the value is a tagged smi.
509   void JumpIfSmi(Register src,
510                  Label* on_smi,
511                  Label::Distance near_jump = Label::kFar);
512 
513   // Jump to label if the value is not a tagged smi.
514   void JumpIfNotSmi(Register src,
515                     Label* on_not_smi,
516                     Label::Distance near_jump = Label::kFar);
517 
518   // Jump to label if the value is not a non-negative tagged smi.
519   void JumpUnlessNonNegativeSmi(Register src,
520                                 Label* on_not_smi,
521                                 Label::Distance near_jump = Label::kFar);
522 
523   // Jump to label if the value, which must be a tagged smi, has value equal
524   // to the constant.
525   void JumpIfSmiEqualsConstant(Register src,
526                                Smi* constant,
527                                Label* on_equals,
528                                Label::Distance near_jump = Label::kFar);
529 
530   // Jump if either or both register are not smi values.
531   void JumpIfNotBothSmi(Register src1,
532                         Register src2,
533                         Label* on_not_both_smi,
534                         Label::Distance near_jump = Label::kFar);
535 
536   // Jump if either or both register are not non-negative smi values.
537   void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
538                                     Label* on_not_both_smi,
539                                     Label::Distance near_jump = Label::kFar);
540 
541   // Operations on tagged smi values.
542 
543   // Smis represent a subset of integers. The subset is always equivalent to
544   // a two's complement interpretation of a fixed number of bits.
545 
546   // Add an integer constant to a tagged smi, giving a tagged smi as result.
547   // No overflow testing on the result is done.
548   void SmiAddConstant(Register dst, Register src, Smi* constant);
549 
550   // Add an integer constant to a tagged smi, giving a tagged smi as result.
551   // No overflow testing on the result is done.
552   void SmiAddConstant(const Operand& dst, Smi* constant);
553 
554   // Add an integer constant to a tagged smi, giving a tagged smi as result,
555   // or jumping to a label if the result cannot be represented by a smi.
556   void SmiAddConstant(Register dst,
557                       Register src,
558                       Smi* constant,
559                       SmiOperationExecutionMode mode,
560                       Label* bailout_label,
561                       Label::Distance near_jump = Label::kFar);
562 
563   // Subtract an integer constant from a tagged smi, giving a tagged smi as
564   // result. No testing on the result is done. Sets the N and Z flags
565   // based on the value of the resulting integer.
566   void SmiSubConstant(Register dst, Register src, Smi* constant);
567 
568   // Subtract an integer constant from a tagged smi, giving a tagged smi as
569   // result, or jumping to a label if the result cannot be represented by a smi.
570   void SmiSubConstant(Register dst,
571                       Register src,
572                       Smi* constant,
573                       SmiOperationExecutionMode mode,
574                       Label* bailout_label,
575                       Label::Distance near_jump = Label::kFar);
576 
577   // Negating a smi can give a negative zero or too large positive value.
578   // NOTICE: This operation jumps on success, not failure!
579   void SmiNeg(Register dst,
580               Register src,
581               Label* on_smi_result,
582               Label::Distance near_jump = Label::kFar);
583 
584   // Adds smi values and return the result as a smi.
585   // If dst is src1, then src1 will be destroyed if the operation is
586   // successful, otherwise kept intact.
587   void SmiAdd(Register dst,
588               Register src1,
589               Register src2,
590               Label* on_not_smi_result,
591               Label::Distance near_jump = Label::kFar);
592   void SmiAdd(Register dst,
593               Register src1,
594               const Operand& src2,
595               Label* on_not_smi_result,
596               Label::Distance near_jump = Label::kFar);
597 
598   void SmiAdd(Register dst,
599               Register src1,
600               Register src2);
601 
602   // Subtracts smi values and return the result as a smi.
603   // If dst is src1, then src1 will be destroyed if the operation is
604   // successful, otherwise kept intact.
605   void SmiSub(Register dst,
606               Register src1,
607               Register src2,
608               Label* on_not_smi_result,
609               Label::Distance near_jump = Label::kFar);
610   void SmiSub(Register dst,
611               Register src1,
612               const Operand& src2,
613               Label* on_not_smi_result,
614               Label::Distance near_jump = Label::kFar);
615 
616   void SmiSub(Register dst,
617               Register src1,
618               Register src2);
619 
620   void SmiSub(Register dst,
621               Register src1,
622               const Operand& src2);
623 
624   // Multiplies smi values and return the result as a smi,
625   // if possible.
626   // If dst is src1, then src1 will be destroyed, even if
627   // the operation is unsuccessful.
628   void SmiMul(Register dst,
629               Register src1,
630               Register src2,
631               Label* on_not_smi_result,
632               Label::Distance near_jump = Label::kFar);
633 
634   // Divides one smi by another and returns the quotient.
635   // Clobbers rax and rdx registers.
636   void SmiDiv(Register dst,
637               Register src1,
638               Register src2,
639               Label* on_not_smi_result,
640               Label::Distance near_jump = Label::kFar);
641 
642   // Divides one smi by another and returns the remainder.
643   // Clobbers rax and rdx registers.
644   void SmiMod(Register dst,
645               Register src1,
646               Register src2,
647               Label* on_not_smi_result,
648               Label::Distance near_jump = Label::kFar);
649 
650   // Bitwise operations.
651   void SmiNot(Register dst, Register src);
652   void SmiAnd(Register dst, Register src1, Register src2);
653   void SmiOr(Register dst, Register src1, Register src2);
654   void SmiXor(Register dst, Register src1, Register src2);
655   void SmiAndConstant(Register dst, Register src1, Smi* constant);
656   void SmiOrConstant(Register dst, Register src1, Smi* constant);
657   void SmiXorConstant(Register dst, Register src1, Smi* constant);
658 
659   void SmiShiftLeftConstant(Register dst,
660                             Register src,
661                             int shift_value,
662                             Label* on_not_smi_result = NULL,
663                             Label::Distance near_jump = Label::kFar);
664   void SmiShiftLogicalRightConstant(Register dst,
665                                     Register src,
666                                     int shift_value,
667                                     Label* on_not_smi_result,
668                                     Label::Distance near_jump = Label::kFar);
669   void SmiShiftArithmeticRightConstant(Register dst,
670                                        Register src,
671                                        int shift_value);
672 
673   // Shifts a smi value to the left, and returns the result if that is a smi.
674   // Uses and clobbers rcx, so dst may not be rcx.
675   void SmiShiftLeft(Register dst,
676                     Register src1,
677                     Register src2,
678                     Label* on_not_smi_result = NULL,
679                     Label::Distance near_jump = Label::kFar);
680   // Shifts a smi value to the right, shifting in zero bits at the top, and
681   // returns the unsigned intepretation of the result if that is a smi.
682   // Uses and clobbers rcx, so dst may not be rcx.
683   void SmiShiftLogicalRight(Register dst,
684                             Register src1,
685                             Register src2,
686                             Label* on_not_smi_result,
687                             Label::Distance near_jump = Label::kFar);
688   // Shifts a smi value to the right, sign extending the top, and
689   // returns the signed intepretation of the result. That will always
690   // be a valid smi value, since it's numerically smaller than the
691   // original.
692   // Uses and clobbers rcx, so dst may not be rcx.
693   void SmiShiftArithmeticRight(Register dst,
694                                Register src1,
695                                Register src2);
696 
697   // Specialized operations
698 
699   // Select the non-smi register of two registers where exactly one is a
700   // smi. If neither are smis, jump to the failure label.
701   void SelectNonSmi(Register dst,
702                     Register src1,
703                     Register src2,
704                     Label* on_not_smis,
705                     Label::Distance near_jump = Label::kFar);
706 
707   // Converts, if necessary, a smi to a combination of number and
708   // multiplier to be used as a scaled index.
709   // The src register contains a *positive* smi value. The shift is the
710   // power of two to multiply the index value by (e.g.
711   // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
712   // The returned index register may be either src or dst, depending
713   // on what is most efficient. If src and dst are different registers,
714   // src is always unchanged.
715   SmiIndex SmiToIndex(Register dst, Register src, int shift);
716 
717   // Converts a positive smi to a negative index.
718   SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
719 
720   // Add the value of a smi in memory to an int32 register.
721   // Sets flags as a normal add.
722   void AddSmiField(Register dst, const Operand& src);
723 
724   // Basic Smi operations.
Move(Register dst,Smi * source)725   void Move(Register dst, Smi* source) {
726     LoadSmiConstant(dst, source);
727   }
728 
Move(const Operand & dst,Smi * source)729   void Move(const Operand& dst, Smi* source) {
730     Register constant = GetSmiConstant(source);
731     movp(dst, constant);
732   }
733 
734   void Push(Smi* smi);
735 
736   // Save away a raw integer with pointer size on the stack as two integers
737   // masquerading as smis so that the garbage collector skips visiting them.
738   void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister);
739   // Reconstruct a raw integer with pointer size from two integers masquerading
740   // as smis on the top of stack.
741   void PopRegisterAsTwoSmis(Register dst, Register scratch = kScratchRegister);
742 
743   void Test(const Operand& dst, Smi* source);
744 
745 
746   // ---------------------------------------------------------------------------
747   // String macros.
748 
749   // Generate code to do a lookup in the number string cache. If the number in
750   // the register object is found in the cache the generated code falls through
751   // with the result in the result register. The object and the result register
752   // can be the same. If the number is not found in the cache the code jumps to
753   // the label not_found with only the content of register object unchanged.
754   void LookupNumberStringCache(Register object,
755                                Register result,
756                                Register scratch1,
757                                Register scratch2,
758                                Label* not_found);
759 
760   // If object is a string, its map is loaded into object_map.
761   void JumpIfNotString(Register object,
762                        Register object_map,
763                        Label* not_string,
764                        Label::Distance near_jump = Label::kFar);
765 
766 
767   void JumpIfNotBothSequentialAsciiStrings(
768       Register first_object,
769       Register second_object,
770       Register scratch1,
771       Register scratch2,
772       Label* on_not_both_flat_ascii,
773       Label::Distance near_jump = Label::kFar);
774 
775   // Check whether the instance type represents a flat ASCII string. Jump to the
776   // label if not. If the instance type can be scratched specify same register
777   // for both instance type and scratch.
778   void JumpIfInstanceTypeIsNotSequentialAscii(
779       Register instance_type,
780       Register scratch,
781       Label*on_not_flat_ascii_string,
782       Label::Distance near_jump = Label::kFar);
783 
784   void JumpIfBothInstanceTypesAreNotSequentialAscii(
785       Register first_object_instance_type,
786       Register second_object_instance_type,
787       Register scratch1,
788       Register scratch2,
789       Label* on_fail,
790       Label::Distance near_jump = Label::kFar);
791 
792   void EmitSeqStringSetCharCheck(Register string,
793                                  Register index,
794                                  Register value,
795                                  uint32_t encoding_mask);
796 
797   // Checks if the given register or operand is a unique name
798   void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
799                            Label::Distance distance = Label::kFar);
800   void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
801                            Label::Distance distance = Label::kFar);
802 
803   // ---------------------------------------------------------------------------
804   // Macro instructions.
805 
806   // Load/store with specific representation.
807   void Load(Register dst, const Operand& src, Representation r);
808   void Store(const Operand& dst, Register src, Representation r);
809 
810   // Load a register with a long value as efficiently as possible.
811   void Set(Register dst, int64_t x);
812   void Set(const Operand& dst, intptr_t x);
813 
814   // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
815   // hinders register renaming and makes dependence chains longer. So we use
816   // xorps to clear the dst register before cvtsi2sd to solve this issue.
817   void Cvtlsi2sd(XMMRegister dst, Register src);
818   void Cvtlsi2sd(XMMRegister dst, const Operand& src);
819 
820   // Move if the registers are not identical.
821   void Move(Register target, Register source);
822 
823   // TestBit and Load SharedFunctionInfo special field.
824   void TestBitSharedFunctionInfoSpecialField(Register base,
825                                              int offset,
826                                              int bit_index);
827   void LoadSharedFunctionInfoSpecialField(Register dst,
828                                           Register base,
829                                           int offset);
830 
831   // Handle support
832   void Move(Register dst, Handle<Object> source);
833   void Move(const Operand& dst, Handle<Object> source);
834   void Cmp(Register dst, Handle<Object> source);
835   void Cmp(const Operand& dst, Handle<Object> source);
836   void Cmp(Register dst, Smi* src);
837   void Cmp(const Operand& dst, Smi* src);
838   void Push(Handle<Object> source);
839 
840   // Load a heap object and handle the case of new-space objects by
841   // indirecting via a global cell.
842   void MoveHeapObject(Register result, Handle<Object> object);
843 
844   // Load a global cell into a register.
845   void LoadGlobalCell(Register dst, Handle<Cell> cell);
846 
847   // Emit code to discard a non-negative number of pointer-sized elements
848   // from the stack, clobbering only the rsp register.
849   void Drop(int stack_elements);
850   // Emit code to discard a positive number of pointer-sized elements
851   // from the stack under the return address which remains on the top,
852   // clobbering the rsp register.
853   void DropUnderReturnAddress(int stack_elements,
854                               Register scratch = kScratchRegister);
855 
Call(Label * target)856   void Call(Label* target) { call(target); }
857   void Push(Register src);
858   void Push(const Operand& src);
859   void PushQuad(const Operand& src);
860   void Push(Immediate value);
861   void PushImm32(int32_t imm32);
862   void Pop(Register dst);
863   void Pop(const Operand& dst);
864   void PopQuad(const Operand& dst);
PushReturnAddressFrom(Register src)865   void PushReturnAddressFrom(Register src) { pushq(src); }
PopReturnAddressTo(Register dst)866   void PopReturnAddressTo(Register dst) { popq(dst); }
Move(Register dst,ExternalReference ext)867   void Move(Register dst, ExternalReference ext) {
868     movp(dst, reinterpret_cast<void*>(ext.address()),
869          RelocInfo::EXTERNAL_REFERENCE);
870   }
871 
872   // Loads a pointer into a register with a relocation mode.
Move(Register dst,void * ptr,RelocInfo::Mode rmode)873   void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
874     // This method must not be used with heap object references. The stored
875     // address is not GC safe. Use the handle version instead.
876     ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
877     movp(dst, ptr, rmode);
878   }
879 
Move(Register dst,Handle<Object> value,RelocInfo::Mode rmode)880   void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
881     AllowDeferredHandleDereference using_raw_address;
882     ASSERT(!RelocInfo::IsNone(rmode));
883     ASSERT(value->IsHeapObject());
884     ASSERT(!isolate()->heap()->InNewSpace(*value));
885     movp(dst, reinterpret_cast<void*>(value.location()), rmode);
886   }
887 
888   // Control Flow
889   void Jump(Address destination, RelocInfo::Mode rmode);
890   void Jump(ExternalReference ext);
891   void Jump(const Operand& op);
892   void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
893 
894   void Call(Address destination, RelocInfo::Mode rmode);
895   void Call(ExternalReference ext);
896   void Call(const Operand& op);
897   void Call(Handle<Code> code_object,
898             RelocInfo::Mode rmode,
899             TypeFeedbackId ast_id = TypeFeedbackId::None());
900 
901   // The size of the code generated for different call instructions.
CallSize(Address destination)902   int CallSize(Address destination) {
903     return kCallSequenceLength;
904   }
905   int CallSize(ExternalReference ext);
CallSize(Handle<Code> code_object)906   int CallSize(Handle<Code> code_object) {
907     // Code calls use 32-bit relative addressing.
908     return kShortCallInstructionLength;
909   }
CallSize(Register target)910   int CallSize(Register target) {
911     // Opcode: REX_opt FF /2 m64
912     return (target.high_bit() != 0) ? 3 : 2;
913   }
CallSize(const Operand & target)914   int CallSize(const Operand& target) {
915     // Opcode: REX_opt FF /2 m64
916     return (target.requires_rex() ? 2 : 1) + target.operand_size();
917   }
918 
919   // Emit call to the code we are currently generating.
CallSelf()920   void CallSelf() {
921     Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
922     Call(self, RelocInfo::CODE_TARGET);
923   }
924 
925   // Non-x64 instructions.
926   // Push/pop all general purpose registers.
927   // Does not push rsp/rbp nor any of the assembler's special purpose registers
928   // (kScratchRegister, kSmiConstantRegister, kRootRegister).
929   void Pushad();
930   void Popad();
931   // Sets the stack as after performing Popad, without actually loading the
932   // registers.
933   void Dropad();
934 
935   // Compare object type for heap object.
936   // Always use unsigned comparisons: above and below, not less and greater.
937   // Incoming register is heap_object and outgoing register is map.
938   // They may be the same register, and may be kScratchRegister.
939   void CmpObjectType(Register heap_object, InstanceType type, Register map);
940 
941   // Compare instance type for map.
942   // Always use unsigned comparisons: above and below, not less and greater.
943   void CmpInstanceType(Register map, InstanceType type);
944 
945   // Check if a map for a JSObject indicates that the object has fast elements.
946   // Jump to the specified label if it does not.
947   void CheckFastElements(Register map,
948                          Label* fail,
949                          Label::Distance distance = Label::kFar);
950 
951   // Check if a map for a JSObject indicates that the object can have both smi
952   // and HeapObject elements.  Jump to the specified label if it does not.
953   void CheckFastObjectElements(Register map,
954                                Label* fail,
955                                Label::Distance distance = Label::kFar);
956 
957   // Check if a map for a JSObject indicates that the object has fast smi only
958   // elements.  Jump to the specified label if it does not.
959   void CheckFastSmiElements(Register map,
960                             Label* fail,
961                             Label::Distance distance = Label::kFar);
962 
963   // Check to see if maybe_number can be stored as a double in
964   // FastDoubleElements. If it can, store it at the index specified by index in
965   // the FastDoubleElements array elements, otherwise jump to fail.  Note that
966   // index must not be smi-tagged.
967   void StoreNumberToDoubleElements(Register maybe_number,
968                                    Register elements,
969                                    Register index,
970                                    XMMRegister xmm_scratch,
971                                    Label* fail,
972                                    int elements_offset = 0);
973 
974   // Compare an object's map with the specified map.
975   void CompareMap(Register obj, Handle<Map> map);
976 
977   // Check if the map of an object is equal to a specified map and branch to
978   // label if not. Skip the smi check if not required (object is known to be a
979   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
980   // against maps that are ElementsKind transition maps of the specified map.
981   void CheckMap(Register obj,
982                 Handle<Map> map,
983                 Label* fail,
984                 SmiCheckType smi_check_type);
985 
986   // Check if the map of an object is equal to a specified map and branch to a
987   // specified target if equal. Skip the smi check if not required (object is
988   // known to be a heap object)
989   void DispatchMap(Register obj,
990                    Register unused,
991                    Handle<Map> map,
992                    Handle<Code> success,
993                    SmiCheckType smi_check_type);
994 
995   // Check if the object in register heap_object is a string. Afterwards the
996   // register map contains the object map and the register instance_type
997   // contains the instance_type. The registers map and instance_type can be the
998   // same in which case it contains the instance type afterwards. Either of the
999   // registers map and instance_type can be the same as heap_object.
1000   Condition IsObjectStringType(Register heap_object,
1001                                Register map,
1002                                Register instance_type);
1003 
1004   // Check if the object in register heap_object is a name. Afterwards the
1005   // register map contains the object map and the register instance_type
1006   // contains the instance_type. The registers map and instance_type can be the
1007   // same in which case it contains the instance type afterwards. Either of the
1008   // registers map and instance_type can be the same as heap_object.
1009   Condition IsObjectNameType(Register heap_object,
1010                              Register map,
1011                              Register instance_type);
1012 
1013   // FCmp compares and pops the two values on top of the FPU stack.
1014   // The flag results are similar to integer cmp, but requires unsigned
1015   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
1016   void FCmp();
1017 
1018   void ClampUint8(Register reg);
1019 
1020   void ClampDoubleToUint8(XMMRegister input_reg,
1021                           XMMRegister temp_xmm_reg,
1022                           Register result_reg);
1023 
1024   void SlowTruncateToI(Register result_reg, Register input_reg,
1025       int offset = HeapNumber::kValueOffset - kHeapObjectTag);
1026 
1027   void TruncateHeapNumberToI(Register result_reg, Register input_reg);
1028   void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
1029 
1030   void DoubleToI(Register result_reg, XMMRegister input_reg,
1031       XMMRegister scratch, MinusZeroMode minus_zero_mode,
1032       Label* conversion_failed, Label::Distance dst = Label::kFar);
1033 
1034   void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
1035       MinusZeroMode minus_zero_mode, Label* lost_precision,
1036       Label::Distance dst = Label::kFar);
1037 
1038   void LoadUint32(XMMRegister dst, Register src);
1039 
1040   void LoadInstanceDescriptors(Register map, Register descriptors);
1041   void EnumLength(Register dst, Register map);
1042   void NumberOfOwnDescriptors(Register dst, Register map);
1043 
1044   template<typename Field>
DecodeField(Register reg)1045   void DecodeField(Register reg) {
1046     static const int shift = Field::kShift;
1047     static const int mask = Field::kMask >> Field::kShift;
1048     if (shift != 0) {
1049       shrp(reg, Immediate(shift));
1050     }
1051     andp(reg, Immediate(mask));
1052   }
1053 
1054   template<typename Field>
DecodeFieldToSmi(Register reg)1055   void DecodeFieldToSmi(Register reg) {
1056     if (SmiValuesAre32Bits()) {
1057       andp(reg, Immediate(Field::kMask));
1058       shlp(reg, Immediate(kSmiShift - Field::kShift));
1059     } else {
1060       static const int shift = Field::kShift;
1061       static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
1062       ASSERT(SmiValuesAre31Bits());
1063       ASSERT(kSmiShift == kSmiTagSize);
1064       ASSERT((mask & 0x80000000u) == 0);
1065       if (shift < kSmiShift) {
1066         shlp(reg, Immediate(kSmiShift - shift));
1067       } else if (shift > kSmiShift) {
1068         sarp(reg, Immediate(shift - kSmiShift));
1069       }
1070       andp(reg, Immediate(mask));
1071     }
1072   }
1073 
1074   // Abort execution if argument is not a number, enabled via --debug-code.
1075   void AssertNumber(Register object);
1076 
1077   // Abort execution if argument is a smi, enabled via --debug-code.
1078   void AssertNotSmi(Register object);
1079 
1080   // Abort execution if argument is not a smi, enabled via --debug-code.
1081   void AssertSmi(Register object);
1082   void AssertSmi(const Operand& object);
1083 
1084   // Abort execution if a 64 bit register containing a 32 bit payload does not
1085   // have zeros in the top 32 bits, enabled via --debug-code.
1086   void AssertZeroExtended(Register reg);
1087 
1088   // Abort execution if argument is not a string, enabled via --debug-code.
1089   void AssertString(Register object);
1090 
1091   // Abort execution if argument is not a name, enabled via --debug-code.
1092   void AssertName(Register object);
1093 
1094   // Abort execution if argument is not undefined or an AllocationSite, enabled
1095   // via --debug-code.
1096   void AssertUndefinedOrAllocationSite(Register object);
1097 
1098   // Abort execution if argument is not the root value with the given index,
1099   // enabled via --debug-code.
1100   void AssertRootValue(Register src,
1101                        Heap::RootListIndex root_value_index,
1102                        BailoutReason reason);
1103 
1104   // ---------------------------------------------------------------------------
1105   // Exception handling
1106 
1107   // Push a new try handler and link it into try handler chain.
1108   void PushTryHandler(StackHandler::Kind kind, int handler_index);
1109 
1110   // Unlink the stack handler on top of the stack from the try handler chain.
1111   void PopTryHandler();
1112 
1113   // Activate the top handler in the try hander chain and pass the
1114   // thrown value.
1115   void Throw(Register value);
1116 
1117   // Propagate an uncatchable exception out of the current JS stack.
1118   void ThrowUncatchable(Register value);
1119 
1120   // ---------------------------------------------------------------------------
1121   // Inline caching support
1122 
1123   // Generate code for checking access rights - used for security checks
1124   // on access to global objects across environments. The holder register
1125   // is left untouched, but the scratch register and kScratchRegister,
1126   // which must be different, are clobbered.
1127   void CheckAccessGlobalProxy(Register holder_reg,
1128                               Register scratch,
1129                               Label* miss);
1130 
1131   void GetNumberHash(Register r0, Register scratch);
1132 
1133   void LoadFromNumberDictionary(Label* miss,
1134                                 Register elements,
1135                                 Register key,
1136                                 Register r0,
1137                                 Register r1,
1138                                 Register r2,
1139                                 Register result);
1140 
1141 
1142   // ---------------------------------------------------------------------------
1143   // Allocation support
1144 
1145   // Allocate an object in new space or old pointer space. If the given space
1146   // is exhausted control continues at the gc_required label. The allocated
1147   // object is returned in result and end of the new object is returned in
1148   // result_end. The register scratch can be passed as no_reg in which case
1149   // an additional object reference will be added to the reloc info. The
1150   // returned pointers in result and result_end have not yet been tagged as
1151   // heap objects. If result_contains_top_on_entry is true the content of
1152   // result is known to be the allocation top on entry (could be result_end
1153   // from a previous call). If result_contains_top_on_entry is true scratch
1154   // should be no_reg as it is never used.
1155   void Allocate(int object_size,
1156                 Register result,
1157                 Register result_end,
1158                 Register scratch,
1159                 Label* gc_required,
1160                 AllocationFlags flags);
1161 
1162   void Allocate(int header_size,
1163                 ScaleFactor element_size,
1164                 Register element_count,
1165                 Register result,
1166                 Register result_end,
1167                 Register scratch,
1168                 Label* gc_required,
1169                 AllocationFlags flags);
1170 
1171   void Allocate(Register object_size,
1172                 Register result,
1173                 Register result_end,
1174                 Register scratch,
1175                 Label* gc_required,
1176                 AllocationFlags flags);
1177 
1178   // Undo allocation in new space. The object passed and objects allocated after
1179   // it will no longer be allocated. Make sure that no pointers are left to the
1180   // object(s) no longer allocated as they would be invalid when allocation is
1181   // un-done.
1182   void UndoAllocationInNewSpace(Register object);
1183 
1184   // Allocate a heap number in new space with undefined value. Returns
1185   // tagged pointer in result register, or jumps to gc_required if new
1186   // space is full.
1187   void AllocateHeapNumber(Register result,
1188                           Register scratch,
1189                           Label* gc_required);
1190 
1191   // Allocate a sequential string. All the header fields of the string object
1192   // are initialized.
1193   void AllocateTwoByteString(Register result,
1194                              Register length,
1195                              Register scratch1,
1196                              Register scratch2,
1197                              Register scratch3,
1198                              Label* gc_required);
1199   void AllocateAsciiString(Register result,
1200                            Register length,
1201                            Register scratch1,
1202                            Register scratch2,
1203                            Register scratch3,
1204                            Label* gc_required);
1205 
1206   // Allocate a raw cons string object. Only the map field of the result is
1207   // initialized.
1208   void AllocateTwoByteConsString(Register result,
1209                           Register scratch1,
1210                           Register scratch2,
1211                           Label* gc_required);
1212   void AllocateAsciiConsString(Register result,
1213                                Register scratch1,
1214                                Register scratch2,
1215                                Label* gc_required);
1216 
1217   // Allocate a raw sliced string object. Only the map field of the result is
1218   // initialized.
1219   void AllocateTwoByteSlicedString(Register result,
1220                             Register scratch1,
1221                             Register scratch2,
1222                             Label* gc_required);
1223   void AllocateAsciiSlicedString(Register result,
1224                                  Register scratch1,
1225                                  Register scratch2,
1226                                  Label* gc_required);
1227 
1228   // ---------------------------------------------------------------------------
1229   // Support functions.
1230 
1231   // Check if result is zero and op is negative.
1232   void NegativeZeroTest(Register result, Register op, Label* then_label);
1233 
1234   // Check if result is zero and op is negative in code using jump targets.
1235   void NegativeZeroTest(CodeGenerator* cgen,
1236                         Register result,
1237                         Register op,
1238                         JumpTarget* then_target);
1239 
1240   // Check if result is zero and any of op1 and op2 are negative.
1241   // Register scratch is destroyed, and it must be different from op2.
1242   void NegativeZeroTest(Register result, Register op1, Register op2,
1243                         Register scratch, Label* then_label);
1244 
1245   // Try to get function prototype of a function and puts the value in
1246   // the result register. Checks that the function really is a
1247   // function and jumps to the miss label if the fast checks fail. The
1248   // function register will be untouched; the other register may be
1249   // clobbered.
1250   void TryGetFunctionPrototype(Register function,
1251                                Register result,
1252                                Label* miss,
1253                                bool miss_on_bound_function = false);
1254 
1255   // Picks out an array index from the hash field.
1256   // Register use:
1257   //   hash - holds the index's hash. Clobbered.
1258   //   index - holds the overwritten index on exit.
1259   void IndexFromHash(Register hash, Register index);
1260 
1261   // Find the function context up the context chain.
1262   void LoadContext(Register dst, int context_chain_length);
1263 
1264   // Conditionally load the cached Array transitioned map of type
1265   // transitioned_kind from the native context if the map in register
1266   // map_in_out is the cached Array map in the native context of
1267   // expected_kind.
1268   void LoadTransitionedArrayMapConditional(
1269       ElementsKind expected_kind,
1270       ElementsKind transitioned_kind,
1271       Register map_in_out,
1272       Register scratch,
1273       Label* no_map_match);
1274 
1275   // Load the global function with the given index.
1276   void LoadGlobalFunction(int index, Register function);
1277 
1278   // Load the initial map from the global function. The registers
1279   // function and map can be the same.
1280   void LoadGlobalFunctionInitialMap(Register function, Register map);
1281 
1282   // ---------------------------------------------------------------------------
1283   // Runtime calls
1284 
1285   // Call a code stub.
1286   void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1287 
1288   // Tail call a code stub (jump).
1289   void TailCallStub(CodeStub* stub);
1290 
1291   // Return from a code stub after popping its arguments.
1292   void StubReturn(int argc);
1293 
1294   // Call a runtime routine.
1295   void CallRuntime(const Runtime::Function* f,
1296                    int num_arguments,
1297                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1298 
1299   // Call a runtime function and save the value of XMM registers.
CallRuntimeSaveDoubles(Runtime::FunctionId id)1300   void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1301     const Runtime::Function* function = Runtime::FunctionForId(id);
1302     CallRuntime(function, function->nargs, kSaveFPRegs);
1303   }
1304 
1305   // Convenience function: Same as above, but takes the fid instead.
1306   void CallRuntime(Runtime::FunctionId id,
1307                    int num_arguments,
1308                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1309     CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1310   }
1311 
1312   // Convenience function: call an external reference.
1313   void CallExternalReference(const ExternalReference& ext,
1314                              int num_arguments);
1315 
1316   // Tail call of a runtime routine (jump).
1317   // Like JumpToExternalReference, but also takes care of passing the number
1318   // of parameters.
1319   void TailCallExternalReference(const ExternalReference& ext,
1320                                  int num_arguments,
1321                                  int result_size);
1322 
1323   // Convenience function: tail call a runtime routine (jump).
1324   void TailCallRuntime(Runtime::FunctionId fid,
1325                        int num_arguments,
1326                        int result_size);
1327 
1328   // Jump to a runtime routine.
1329   void JumpToExternalReference(const ExternalReference& ext, int result_size);
1330 
1331   // Prepares stack to put arguments (aligns and so on).  WIN64 calling
1332   // convention requires to put the pointer to the return value slot into
1333   // rcx (rcx must be preserverd until CallApiFunctionAndReturn).  Saves
1334   // context (rsi).  Clobbers rax.  Allocates arg_stack_space * kPointerSize
1335   // inside the exit frame (not GCed) accessible via StackSpaceOperand.
1336   void PrepareCallApiFunction(int arg_stack_space);
1337 
1338   // Calls an API function.  Allocates HandleScope, extracts returned value
1339   // from handle and propagates exceptions.  Clobbers r14, r15, rbx and
1340   // caller-save registers.  Restores context.  On return removes
1341   // stack_space * kPointerSize (GCed).
1342   void CallApiFunctionAndReturn(Register function_address,
1343                                 ExternalReference thunk_ref,
1344                                 Register thunk_last_arg,
1345                                 int stack_space,
1346                                 Operand return_value_operand,
1347                                 Operand* context_restore_operand);
1348 
1349   // Before calling a C-function from generated code, align arguments on stack.
1350   // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
1351   // etc., not pushed. The argument count assumes all arguments are word sized.
1352   // The number of slots reserved for arguments depends on platform. On Windows
1353   // stack slots are reserved for the arguments passed in registers. On other
1354   // platforms stack slots are only reserved for the arguments actually passed
1355   // on the stack.
1356   void PrepareCallCFunction(int num_arguments);
1357 
1358   // Calls a C function and cleans up the space for arguments allocated
1359   // by PrepareCallCFunction. The called function is not allowed to trigger a
1360   // garbage collection, since that might move the code and invalidate the
1361   // return address (unless this is somehow accounted for by the called
1362   // function).
1363   void CallCFunction(ExternalReference function, int num_arguments);
1364   void CallCFunction(Register function, int num_arguments);
1365 
1366   // Calculate the number of stack slots to reserve for arguments when calling a
1367   // C function.
1368   int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1369 
1370   // ---------------------------------------------------------------------------
1371   // Utilities
1372 
1373   void Ret();
1374 
1375   // Return and drop arguments from stack, where the number of arguments
1376   // may be bigger than 2^16 - 1.  Requires a scratch register.
1377   void Ret(int bytes_dropped, Register scratch);
1378 
CodeObject()1379   Handle<Object> CodeObject() {
1380     ASSERT(!code_object_.is_null());
1381     return code_object_;
1382   }
1383 
1384   // Copy length bytes from source to destination.
1385   // Uses scratch register internally (if you have a low-eight register
1386   // free, do use it, otherwise kScratchRegister will be used).
1387   // The min_length is a minimum limit on the value that length will have.
1388   // The algorithm has some special cases that might be omitted if the string
1389   // is known to always be long.
1390   void CopyBytes(Register destination,
1391                  Register source,
1392                  Register length,
1393                  int min_length = 0,
1394                  Register scratch = kScratchRegister);
1395 
1396   // Initialize fields with filler values.  Fields starting at |start_offset|
1397   // not including end_offset are overwritten with the value in |filler|.  At
1398   // the end the loop, |start_offset| takes the value of |end_offset|.
1399   void InitializeFieldsWithFiller(Register start_offset,
1400                                   Register end_offset,
1401                                   Register filler);
1402 
1403 
1404   // Emit code for a truncating division by a constant. The dividend register is
1405   // unchanged, the result is in rdx, and rax gets clobbered.
1406   void TruncatingDiv(Register dividend, int32_t divisor);
1407 
1408   // ---------------------------------------------------------------------------
1409   // StatsCounter support
1410 
1411   void SetCounter(StatsCounter* counter, int value);
1412   void IncrementCounter(StatsCounter* counter, int value);
1413   void DecrementCounter(StatsCounter* counter, int value);
1414 
1415 
1416   // ---------------------------------------------------------------------------
1417   // Debugging
1418 
1419   // Calls Abort(msg) if the condition cc is not satisfied.
1420   // Use --debug_code to enable.
1421   void Assert(Condition cc, BailoutReason reason);
1422 
1423   void AssertFastElements(Register elements);
1424 
1425   // Like Assert(), but always enabled.
1426   void Check(Condition cc, BailoutReason reason);
1427 
1428   // Print a message to stdout and abort execution.
1429   void Abort(BailoutReason msg);
1430 
1431   // Check that the stack is aligned.
1432   void CheckStackAlignment();
1433 
1434   // Verify restrictions about code generated in stubs.
set_generating_stub(bool value)1435   void set_generating_stub(bool value) { generating_stub_ = value; }
generating_stub()1436   bool generating_stub() { return generating_stub_; }
set_has_frame(bool value)1437   void set_has_frame(bool value) { has_frame_ = value; }
has_frame()1438   bool has_frame() { return has_frame_; }
1439   inline bool AllowThisStubCall(CodeStub* stub);
1440 
SafepointRegisterStackIndex(Register reg)1441   static int SafepointRegisterStackIndex(Register reg) {
1442     return SafepointRegisterStackIndex(reg.code());
1443   }
1444 
1445   // Activation support.
1446   void EnterFrame(StackFrame::Type type);
1447   void LeaveFrame(StackFrame::Type type);
1448 
1449   // Expects object in rax and returns map with validated enum cache
1450   // in rax.  Assumes that any other register can be used as a scratch.
1451   void CheckEnumCache(Register null_value,
1452                       Label* call_runtime);
1453 
1454   // AllocationMemento support. Arrays may have an associated
1455   // AllocationMemento object that can be checked for in order to pretransition
1456   // to another type.
1457   // On entry, receiver_reg should point to the array object.
1458   // scratch_reg gets clobbered.
1459   // If allocation info is present, condition flags are set to equal.
1460   void TestJSArrayForAllocationMemento(Register receiver_reg,
1461                                        Register scratch_reg,
1462                                        Label* no_memento_found);
1463 
JumpIfJSArrayHasAllocationMemento(Register receiver_reg,Register scratch_reg,Label * memento_found)1464   void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1465                                          Register scratch_reg,
1466                                          Label* memento_found) {
1467     Label no_memento_found;
1468     TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1469                                     &no_memento_found);
1470     j(equal, memento_found);
1471     bind(&no_memento_found);
1472   }
1473 
1474   // Jumps to found label if a prototype map has dictionary elements.
1475   void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1476                                         Register scratch1, Label* found);
1477 
1478  private:
1479   // Order general registers are pushed by Pushad.
1480   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1481   static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
1482   static const int kNumSafepointSavedRegisters = 11;
1483   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1484 
1485   bool generating_stub_;
1486   bool has_frame_;
1487   bool root_array_available_;
1488 
1489   // Returns a register holding the smi value. The register MUST NOT be
1490   // modified. It may be the "smi 1 constant" register.
1491   Register GetSmiConstant(Smi* value);
1492 
1493   int64_t RootRegisterDelta(ExternalReference other);
1494 
1495   // Moves the smi value to the destination register.
1496   void LoadSmiConstant(Register dst, Smi* value);
1497 
1498   // This handle will be patched with the code object on installation.
1499   Handle<Object> code_object_;
1500 
1501   // Helper functions for generating invokes.
1502   void InvokePrologue(const ParameterCount& expected,
1503                       const ParameterCount& actual,
1504                       Handle<Code> code_constant,
1505                       Register code_register,
1506                       Label* done,
1507                       bool* definitely_mismatches,
1508                       InvokeFlag flag,
1509                       Label::Distance near_jump = Label::kFar,
1510                       const CallWrapper& call_wrapper = NullCallWrapper());
1511 
1512   void EnterExitFramePrologue(bool save_rax);
1513 
1514   // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1515   // accessible via StackSpaceOperand.
1516   void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1517 
1518   void LeaveExitFrameEpilogue(bool restore_context);
1519 
1520   // Allocation support helpers.
1521   // Loads the top of new-space into the result register.
1522   // Otherwise the address of the new-space top is loaded into scratch (if
1523   // scratch is valid), and the new-space top is loaded into result.
1524   void LoadAllocationTopHelper(Register result,
1525                                Register scratch,
1526                                AllocationFlags flags);
1527 
1528   void MakeSureDoubleAlignedHelper(Register result,
1529                                    Register scratch,
1530                                    Label* gc_required,
1531                                    AllocationFlags flags);
1532 
1533   // Update allocation top with value in result_end register.
1534   // If scratch is valid, it contains the address of the allocation top.
1535   void UpdateAllocationTopHelper(Register result_end,
1536                                  Register scratch,
1537                                  AllocationFlags flags);
1538 
1539   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1540   void InNewSpace(Register object,
1541                   Register scratch,
1542                   Condition cc,
1543                   Label* branch,
1544                   Label::Distance distance = Label::kFar);
1545 
1546   // Helper for finding the mark bits for an address.  Afterwards, the
1547   // bitmap register points at the word with the mark bits and the mask
1548   // the position of the first bit.  Uses rcx as scratch and leaves addr_reg
1549   // unchanged.
1550   inline void GetMarkBits(Register addr_reg,
1551                           Register bitmap_reg,
1552                           Register mask_reg);
1553 
1554   // Helper for throwing exceptions.  Compute a handler address and jump to
1555   // it.  See the implementation for register usage.
1556   void JumpToHandlerEntry();
1557 
1558   // Compute memory operands for safepoint stack slots.
1559   Operand SafepointRegisterSlot(Register reg);
SafepointRegisterStackIndex(int reg_code)1560   static int SafepointRegisterStackIndex(int reg_code) {
1561     return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1562   }
1563 
1564   // Needs access to SafepointRegisterStackIndex for compiled frame
1565   // traversal.
1566   friend class StandardFrame;
1567 };
1568 
1569 
1570 // The code patcher is used to patch (typically) small parts of code e.g. for
1571 // debugging and other types of instrumentation. When using the code patcher
1572 // the exact number of bytes specified must be emitted. Is not legal to emit
1573 // relocation information. If any of these constraints are violated it causes
1574 // an assertion.
1575 class CodePatcher {
1576  public:
1577   CodePatcher(byte* address, int size);
1578   virtual ~CodePatcher();
1579 
1580   // Macro assembler to emit code.
masm()1581   MacroAssembler* masm() { return &masm_; }
1582 
1583  private:
1584   byte* address_;  // The address of the code being patched.
1585   int size_;  // Number of bytes of the expected patch size.
1586   MacroAssembler masm_;  // Macro assembler used to generate the code.
1587 };
1588 
1589 
1590 // -----------------------------------------------------------------------------
1591 // Static helper functions.
1592 
1593 // Generate an Operand for loading a field from an object.
FieldOperand(Register object,int offset)1594 inline Operand FieldOperand(Register object, int offset) {
1595   return Operand(object, offset - kHeapObjectTag);
1596 }
1597 
1598 
1599 // Generate an Operand for loading an indexed field from an object.
FieldOperand(Register object,Register index,ScaleFactor scale,int offset)1600 inline Operand FieldOperand(Register object,
1601                             Register index,
1602                             ScaleFactor scale,
1603                             int offset) {
1604   return Operand(object, index, scale, offset - kHeapObjectTag);
1605 }
1606 
1607 
ContextOperand(Register context,int index)1608 inline Operand ContextOperand(Register context, int index) {
1609   return Operand(context, Context::SlotOffset(index));
1610 }
1611 
1612 
GlobalObjectOperand()1613 inline Operand GlobalObjectOperand() {
1614   return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
1615 }
1616 
1617 
1618 // Provides access to exit frame stack space (not GCed).
StackSpaceOperand(int index)1619 inline Operand StackSpaceOperand(int index) {
1620 #ifdef _WIN64
1621   const int kShaddowSpace = 4;
1622   return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1623 #else
1624   return Operand(rsp, index * kPointerSize);
1625 #endif
1626 }
1627 
1628 
StackOperandForReturnAddress(int32_t disp)1629 inline Operand StackOperandForReturnAddress(int32_t disp) {
1630   return Operand(rsp, disp);
1631 }
1632 
1633 
1634 #ifdef GENERATED_CODE_COVERAGE
1635 extern void LogGeneratedCodeCoverage(const char* file_line);
1636 #define CODE_COVERAGE_STRINGIFY(x) #x
1637 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1638 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1639 #define ACCESS_MASM(masm) {                                                  \
1640     Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
1641     masm->pushfq();                                                          \
1642     masm->Pushad();                                                          \
1643     masm->Push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));            \
1644     masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE);        \
1645     masm->Pop(rax);                                                          \
1646     masm->Popad();                                                           \
1647     masm->popfq();                                                           \
1648   }                                                                          \
1649   masm->
1650 #else
1651 #define ACCESS_MASM(masm) masm->
1652 #endif
1653 
1654 } }  // namespace v8::internal
1655 
1656 #endif  // V8_X64_MACRO_ASSEMBLER_X64_H_
1657