• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29 #define V8_X64_MACRO_ASSEMBLER_X64_H_
30 
31 #include "assembler.h"
32 #include "frames.h"
33 #include "v8globals.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // Flags used for the AllocateInNewSpace functions.
39 enum AllocationFlags {
40   // No special flags.
41   NO_ALLOCATION_FLAGS = 0,
42   // Return the pointer to the allocated already tagged as a heap object.
43   TAG_OBJECT = 1 << 0,
44   // The content of the result register already contains the allocation top in
45   // new space.
46   RESULT_CONTAINS_TOP = 1 << 1
47 };
48 
49 
50 // Default scratch register used by MacroAssembler (and other code that needs
51 // a spare register). The register isn't callee save, and not used by the
52 // function calling convention.
53 const Register kScratchRegister = { 10 };      // r10.
54 const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
55 const Register kRootRegister = { 13 };         // r13 (callee save).
56 // Value of smi in kSmiConstantRegister.
57 const int kSmiConstantRegisterValue = 1;
58 // Actual value of root register is offset from the root array's start
59 // to take advantage of negitive 8-bit displacement values.
60 const int kRootRegisterBias = 128;
61 
62 // Convenience for platform-independent signatures.
63 typedef Operand MemOperand;
64 
65 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
66 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
67 
68 bool AreAliased(Register r1, Register r2, Register r3, Register r4);
69 
70 // Forward declaration.
71 class JumpTarget;
72 
73 struct SmiIndex {
SmiIndexSmiIndex74   SmiIndex(Register index_register, ScaleFactor scale)
75       : reg(index_register),
76         scale(scale) {}
77   Register reg;
78   ScaleFactor scale;
79 };
80 
81 
82 // MacroAssembler implements a collection of frequently used macros.
83 class MacroAssembler: public Assembler {
84  public:
85   // The isolate parameter can be NULL if the macro assembler should
86   // not use isolate-dependent functionality. In this case, it's the
87   // responsibility of the caller to never invoke such function on the
88   // macro assembler.
89   MacroAssembler(Isolate* isolate, void* buffer, int size);
90 
91   // Prevent the use of the RootArray during the lifetime of this
92   // scope object.
93   class NoRootArrayScope BASE_EMBEDDED {
94    public:
NoRootArrayScope(MacroAssembler * assembler)95     explicit NoRootArrayScope(MacroAssembler* assembler)
96         : variable_(&assembler->root_array_available_),
97           old_value_(assembler->root_array_available_) {
98       assembler->root_array_available_ = false;
99     }
~NoRootArrayScope()100     ~NoRootArrayScope() {
101       *variable_ = old_value_;
102     }
103    private:
104     bool* variable_;
105     bool old_value_;
106   };
107 
108   // Operand pointing to an external reference.
109   // May emit code to set up the scratch register. The operand is
110   // only guaranteed to be correct as long as the scratch register
111   // isn't changed.
112   // If the operand is used more than once, use a scratch register
113   // that is guaranteed not to be clobbered.
114   Operand ExternalOperand(ExternalReference reference,
115                           Register scratch = kScratchRegister);
116   // Loads and stores the value of an external reference.
117   // Special case code for load and store to take advantage of
118   // load_rax/store_rax if possible/necessary.
119   // For other operations, just use:
120   //   Operand operand = ExternalOperand(extref);
121   //   operation(operand, ..);
122   void Load(Register destination, ExternalReference source);
123   void Store(ExternalReference destination, Register source);
124   // Loads the address of the external reference into the destination
125   // register.
126   void LoadAddress(Register destination, ExternalReference source);
127   // Returns the size of the code generated by LoadAddress.
128   // Used by CallSize(ExternalReference) to find the size of a call.
129   int LoadAddressSize(ExternalReference source);
130 
131   // Operations on roots in the root-array.
132   void LoadRoot(Register destination, Heap::RootListIndex index);
133   void StoreRoot(Register source, Heap::RootListIndex index);
134   // Load a root value where the index (or part of it) is variable.
135   // The variable_offset register is added to the fixed_offset value
136   // to get the index into the root-array.
137   void LoadRootIndexed(Register destination,
138                        Register variable_offset,
139                        int fixed_offset);
140   void CompareRoot(Register with, Heap::RootListIndex index);
141   void CompareRoot(const Operand& with, Heap::RootListIndex index);
142   void PushRoot(Heap::RootListIndex index);
143 
144   // These functions do not arrange the registers in any particular order so
145   // they are not useful for calls that can cause a GC.  The caller can
146   // exclude up to 3 registers that do not need to be saved and restored.
147   void PushCallerSaved(SaveFPRegsMode fp_mode,
148                        Register exclusion1 = no_reg,
149                        Register exclusion2 = no_reg,
150                        Register exclusion3 = no_reg);
151   void PopCallerSaved(SaveFPRegsMode fp_mode,
152                       Register exclusion1 = no_reg,
153                       Register exclusion2 = no_reg,
154                       Register exclusion3 = no_reg);
155 
156 // ---------------------------------------------------------------------------
157 // GC Support
158 
159 
160   enum RememberedSetFinalAction {
161     kReturnAtEnd,
162     kFallThroughAtEnd
163   };
164 
165   // Record in the remembered set the fact that we have a pointer to new space
166   // at the address pointed to by the addr register.  Only works if addr is not
167   // in new space.
168   void RememberedSetHelper(Register object,  // Used for debug code.
169                            Register addr,
170                            Register scratch,
171                            SaveFPRegsMode save_fp,
172                            RememberedSetFinalAction and_then);
173 
174   void CheckPageFlag(Register object,
175                      Register scratch,
176                      int mask,
177                      Condition cc,
178                      Label* condition_met,
179                      Label::Distance condition_met_distance = Label::kFar);
180 
181   // Check if object is in new space.  Jumps if the object is not in new space.
182   // The register scratch can be object itself, but scratch will be clobbered.
183   void JumpIfNotInNewSpace(Register object,
184                            Register scratch,
185                            Label* branch,
186                            Label::Distance distance = Label::kFar) {
187     InNewSpace(object, scratch, not_equal, branch, distance);
188   }
189 
190   // Check if object is in new space.  Jumps if the object is in new space.
191   // The register scratch can be object itself, but it will be clobbered.
192   void JumpIfInNewSpace(Register object,
193                         Register scratch,
194                         Label* branch,
195                         Label::Distance distance = Label::kFar) {
196     InNewSpace(object, scratch, equal, branch, distance);
197   }
198 
199   // Check if an object has the black incremental marking color.  Also uses rcx!
200   void JumpIfBlack(Register object,
201                    Register scratch0,
202                    Register scratch1,
203                    Label* on_black,
204                    Label::Distance on_black_distance = Label::kFar);
205 
206   // Detects conservatively whether an object is data-only, i.e. it does need to
207   // be scanned by the garbage collector.
208   void JumpIfDataObject(Register value,
209                         Register scratch,
210                         Label* not_data_object,
211                         Label::Distance not_data_object_distance);
212 
213   // Checks the color of an object.  If the object is already grey or black
214   // then we just fall through, since it is already live.  If it is white and
215   // we can determine that it doesn't need to be scanned, then we just mark it
216   // black and fall through.  For the rest we jump to the label so the
217   // incremental marker can fix its assumptions.
218   void EnsureNotWhite(Register object,
219                       Register scratch1,
220                       Register scratch2,
221                       Label* object_is_white_and_not_data,
222                       Label::Distance distance);
223 
224   // Notify the garbage collector that we wrote a pointer into an object.
225   // |object| is the object being stored into, |value| is the object being
226   // stored.  value and scratch registers are clobbered by the operation.
227   // The offset is the offset from the start of the object, not the offset from
228   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
229   void RecordWriteField(
230       Register object,
231       int offset,
232       Register value,
233       Register scratch,
234       SaveFPRegsMode save_fp,
235       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
236       SmiCheck smi_check = INLINE_SMI_CHECK);
237 
238   // As above, but the offset has the tag presubtracted.  For use with
239   // Operand(reg, off).
240   void RecordWriteContextSlot(
241       Register context,
242       int offset,
243       Register value,
244       Register scratch,
245       SaveFPRegsMode save_fp,
246       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
247       SmiCheck smi_check = INLINE_SMI_CHECK) {
248     RecordWriteField(context,
249                      offset + kHeapObjectTag,
250                      value,
251                      scratch,
252                      save_fp,
253                      remembered_set_action,
254                      smi_check);
255   }
256 
257   // Notify the garbage collector that we wrote a pointer into a fixed array.
258   // |array| is the array being stored into, |value| is the
259   // object being stored.  |index| is the array index represented as a non-smi.
260   // All registers are clobbered by the operation RecordWriteArray
261   // filters out smis so it does not update the write barrier if the
262   // value is a smi.
263   void RecordWriteArray(
264       Register array,
265       Register value,
266       Register index,
267       SaveFPRegsMode save_fp,
268       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
269       SmiCheck smi_check = INLINE_SMI_CHECK);
270 
271   // For page containing |object| mark region covering |address|
272   // dirty. |object| is the object being stored into, |value| is the
273   // object being stored. The address and value registers are clobbered by the
274   // operation.  RecordWrite filters out smis so it does not update
275   // the write barrier if the value is a smi.
276   void RecordWrite(
277       Register object,
278       Register address,
279       Register value,
280       SaveFPRegsMode save_fp,
281       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
282       SmiCheck smi_check = INLINE_SMI_CHECK);
283 
284 #ifdef ENABLE_DEBUGGER_SUPPORT
285   // ---------------------------------------------------------------------------
286   // Debugger Support
287 
288   void DebugBreak();
289 #endif
290 
291   // Enter specific kind of exit frame; either in normal or
292   // debug mode. Expects the number of arguments in register rax and
293   // sets up the number of arguments in register rdi and the pointer
294   // to the first argument in register rsi.
295   //
296   // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
297   // accessible via StackSpaceOperand.
298   void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
299 
300   // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
301   // memory (not GCed) on the stack accessible via StackSpaceOperand.
302   void EnterApiExitFrame(int arg_stack_space);
303 
304   // Leave the current exit frame. Expects/provides the return value in
305   // register rax:rdx (untouched) and the pointer to the first
306   // argument in register rsi.
307   void LeaveExitFrame(bool save_doubles = false);
308 
309   // Leave the current exit frame. Expects/provides the return value in
310   // register rax (untouched).
311   void LeaveApiExitFrame();
312 
313   // Push and pop the registers that can hold pointers.
PushSafepointRegisters()314   void PushSafepointRegisters() { Pushad(); }
PopSafepointRegisters()315   void PopSafepointRegisters() { Popad(); }
316   // Store the value in register src in the safepoint register stack
317   // slot for register dst.
318   void StoreToSafepointRegisterSlot(Register dst, Register src);
319   void LoadFromSafepointRegisterSlot(Register dst, Register src);
320 
InitializeRootRegister()321   void InitializeRootRegister() {
322     ExternalReference roots_array_start =
323         ExternalReference::roots_array_start(isolate());
324     movq(kRootRegister, roots_array_start);
325     addq(kRootRegister, Immediate(kRootRegisterBias));
326   }
327 
328   // ---------------------------------------------------------------------------
329   // JavaScript invokes
330 
331   // Set up call kind marking in rcx. The method takes rcx as an
332   // explicit first parameter to make the code more readable at the
333   // call sites.
334   void SetCallKind(Register dst, CallKind kind);
335 
336   // Invoke the JavaScript function code by either calling or jumping.
337   void InvokeCode(Register code,
338                   const ParameterCount& expected,
339                   const ParameterCount& actual,
340                   InvokeFlag flag,
341                   const CallWrapper& call_wrapper,
342                   CallKind call_kind);
343 
344   void InvokeCode(Handle<Code> code,
345                   const ParameterCount& expected,
346                   const ParameterCount& actual,
347                   RelocInfo::Mode rmode,
348                   InvokeFlag flag,
349                   const CallWrapper& call_wrapper,
350                   CallKind call_kind);
351 
352   // Invoke the JavaScript function in the given register. Changes the
353   // current context to the context in the function before invoking.
354   void InvokeFunction(Register function,
355                       const ParameterCount& actual,
356                       InvokeFlag flag,
357                       const CallWrapper& call_wrapper,
358                       CallKind call_kind);
359 
360   void InvokeFunction(Handle<JSFunction> function,
361                       const ParameterCount& actual,
362                       InvokeFlag flag,
363                       const CallWrapper& call_wrapper,
364                       CallKind call_kind);
365 
366   // Invoke specified builtin JavaScript function. Adds an entry to
367   // the unresolved list if the name does not resolve.
368   void InvokeBuiltin(Builtins::JavaScript id,
369                      InvokeFlag flag,
370                      const CallWrapper& call_wrapper = NullCallWrapper());
371 
372   // Store the function for the given builtin in the target register.
373   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
374 
375   // Store the code object for the given builtin in the target register.
376   void GetBuiltinEntry(Register target, Builtins::JavaScript id);
377 
378 
379   // ---------------------------------------------------------------------------
380   // Smi tagging, untagging and operations on tagged smis.
381 
InitializeSmiConstantRegister()382   void InitializeSmiConstantRegister() {
383     movq(kSmiConstantRegister,
384          reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
385          RelocInfo::NONE);
386   }
387 
388   // Conversions between tagged smi values and non-tagged integer values.
389 
390   // Tag an integer value. The result must be known to be a valid smi value.
391   // Only uses the low 32 bits of the src register. Sets the N and Z flags
392   // based on the value of the resulting smi.
393   void Integer32ToSmi(Register dst, Register src);
394 
395   // Stores an integer32 value into a memory field that already holds a smi.
396   void Integer32ToSmiField(const Operand& dst, Register src);
397 
398   // Adds constant to src and tags the result as a smi.
399   // Result must be a valid smi.
400   void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
401 
402   // Convert smi to 32-bit integer. I.e., not sign extended into
403   // high 32 bits of destination.
404   void SmiToInteger32(Register dst, Register src);
405   void SmiToInteger32(Register dst, const Operand& src);
406 
407   // Convert smi to 64-bit integer (sign extended if necessary).
408   void SmiToInteger64(Register dst, Register src);
409   void SmiToInteger64(Register dst, const Operand& src);
410 
411   // Multiply a positive smi's integer value by a power of two.
412   // Provides result as 64-bit integer value.
413   void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
414                                              Register src,
415                                              int power);
416 
417   // Divide a positive smi's integer value by a power of two.
418   // Provides result as 32-bit integer value.
419   void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
420                                            Register src,
421                                            int power);
422 
423   // Perform the logical or of two smi values and return a smi value.
424   // If either argument is not a smi, jump to on_not_smis and retain
425   // the original values of source registers. The destination register
426   // may be changed if it's not one of the source registers.
427   void SmiOrIfSmis(Register dst,
428                    Register src1,
429                    Register src2,
430                    Label* on_not_smis,
431                    Label::Distance near_jump = Label::kFar);
432 
433 
434   // Simple comparison of smis.  Both sides must be known smis to use these,
435   // otherwise use Cmp.
436   void SmiCompare(Register smi1, Register smi2);
437   void SmiCompare(Register dst, Smi* src);
438   void SmiCompare(Register dst, const Operand& src);
439   void SmiCompare(const Operand& dst, Register src);
440   void SmiCompare(const Operand& dst, Smi* src);
441   // Compare the int32 in src register to the value of the smi stored at dst.
442   void SmiCompareInteger32(const Operand& dst, Register src);
443   // Sets sign and zero flags depending on value of smi in register.
444   void SmiTest(Register src);
445 
446   // Functions performing a check on a known or potential smi. Returns
447   // a condition that is satisfied if the check is successful.
448 
449   // Is the value a tagged smi.
450   Condition CheckSmi(Register src);
451   Condition CheckSmi(const Operand& src);
452 
453   // Is the value a non-negative tagged smi.
454   Condition CheckNonNegativeSmi(Register src);
455 
456   // Are both values tagged smis.
457   Condition CheckBothSmi(Register first, Register second);
458 
459   // Are both values non-negative tagged smis.
460   Condition CheckBothNonNegativeSmi(Register first, Register second);
461 
462   // Are either value a tagged smi.
463   Condition CheckEitherSmi(Register first,
464                            Register second,
465                            Register scratch = kScratchRegister);
466 
467   // Is the value the minimum smi value (since we are using
468   // two's complement numbers, negating the value is known to yield
469   // a non-smi value).
470   Condition CheckIsMinSmi(Register src);
471 
472   // Checks whether an 32-bit integer value is a valid for conversion
473   // to a smi.
474   Condition CheckInteger32ValidSmiValue(Register src);
475 
476   // Checks whether an 32-bit unsigned integer value is a valid for
477   // conversion to a smi.
478   Condition CheckUInteger32ValidSmiValue(Register src);
479 
480   // Check whether src is a Smi, and set dst to zero if it is a smi,
481   // and to one if it isn't.
482   void CheckSmiToIndicator(Register dst, Register src);
483   void CheckSmiToIndicator(Register dst, const Operand& src);
484 
485   // Test-and-jump functions. Typically combines a check function
486   // above with a conditional jump.
487 
488   // Jump if the value cannot be represented by a smi.
489   void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
490                               Label::Distance near_jump = Label::kFar);
491 
492   // Jump if the unsigned integer value cannot be represented by a smi.
493   void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
494                                   Label::Distance near_jump = Label::kFar);
495 
496   // Jump to label if the value is a tagged smi.
497   void JumpIfSmi(Register src,
498                  Label* on_smi,
499                  Label::Distance near_jump = Label::kFar);
500 
501   // Jump to label if the value is not a tagged smi.
502   void JumpIfNotSmi(Register src,
503                     Label* on_not_smi,
504                     Label::Distance near_jump = Label::kFar);
505 
506   // Jump to label if the value is not a non-negative tagged smi.
507   void JumpUnlessNonNegativeSmi(Register src,
508                                 Label* on_not_smi,
509                                 Label::Distance near_jump = Label::kFar);
510 
511   // Jump to label if the value, which must be a tagged smi, has value equal
512   // to the constant.
513   void JumpIfSmiEqualsConstant(Register src,
514                                Smi* constant,
515                                Label* on_equals,
516                                Label::Distance near_jump = Label::kFar);
517 
518   // Jump if either or both register are not smi values.
519   void JumpIfNotBothSmi(Register src1,
520                         Register src2,
521                         Label* on_not_both_smi,
522                         Label::Distance near_jump = Label::kFar);
523 
524   // Jump if either or both register are not non-negative smi values.
525   void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
526                                     Label* on_not_both_smi,
527                                     Label::Distance near_jump = Label::kFar);
528 
529   // Operations on tagged smi values.
530 
531   // Smis represent a subset of integers. The subset is always equivalent to
532   // a two's complement interpretation of a fixed number of bits.
533 
534   // Optimistically adds an integer constant to a supposed smi.
535   // If the src is not a smi, or the result is not a smi, jump to
536   // the label.
537   void SmiTryAddConstant(Register dst,
538                          Register src,
539                          Smi* constant,
540                          Label* on_not_smi_result,
541                          Label::Distance near_jump = Label::kFar);
542 
543   // Add an integer constant to a tagged smi, giving a tagged smi as result.
544   // No overflow testing on the result is done.
545   void SmiAddConstant(Register dst, Register src, Smi* constant);
546 
547   // Add an integer constant to a tagged smi, giving a tagged smi as result.
548   // No overflow testing on the result is done.
549   void SmiAddConstant(const Operand& dst, Smi* constant);
550 
551   // Add an integer constant to a tagged smi, giving a tagged smi as result,
552   // or jumping to a label if the result cannot be represented by a smi.
553   void SmiAddConstant(Register dst,
554                       Register src,
555                       Smi* constant,
556                       Label* on_not_smi_result,
557                       Label::Distance near_jump = Label::kFar);
558 
559   // Subtract an integer constant from a tagged smi, giving a tagged smi as
560   // result. No testing on the result is done. Sets the N and Z flags
561   // based on the value of the resulting integer.
562   void SmiSubConstant(Register dst, Register src, Smi* constant);
563 
564   // Subtract an integer constant from a tagged smi, giving a tagged smi as
565   // result, or jumping to a label if the result cannot be represented by a smi.
566   void SmiSubConstant(Register dst,
567                       Register src,
568                       Smi* constant,
569                       Label* on_not_smi_result,
570                       Label::Distance near_jump = Label::kFar);
571 
572   // Negating a smi can give a negative zero or too large positive value.
573   // NOTICE: This operation jumps on success, not failure!
574   void SmiNeg(Register dst,
575               Register src,
576               Label* on_smi_result,
577               Label::Distance near_jump = Label::kFar);
578 
579   // Adds smi values and return the result as a smi.
580   // If dst is src1, then src1 will be destroyed, even if
581   // the operation is unsuccessful.
582   void SmiAdd(Register dst,
583               Register src1,
584               Register src2,
585               Label* on_not_smi_result,
586               Label::Distance near_jump = Label::kFar);
587   void SmiAdd(Register dst,
588               Register src1,
589               const Operand& src2,
590               Label* on_not_smi_result,
591               Label::Distance near_jump = Label::kFar);
592 
593   void SmiAdd(Register dst,
594               Register src1,
595               Register src2);
596 
597   // Subtracts smi values and return the result as a smi.
598   // If dst is src1, then src1 will be destroyed, even if
599   // the operation is unsuccessful.
600   void SmiSub(Register dst,
601               Register src1,
602               Register src2,
603               Label* on_not_smi_result,
604               Label::Distance near_jump = Label::kFar);
605 
606   void SmiSub(Register dst,
607               Register src1,
608               Register src2);
609 
610   void SmiSub(Register dst,
611               Register src1,
612               const Operand& src2,
613               Label* on_not_smi_result,
614               Label::Distance near_jump = Label::kFar);
615 
616   void SmiSub(Register dst,
617               Register src1,
618               const Operand& src2);
619 
620   // Multiplies smi values and return the result as a smi,
621   // if possible.
622   // If dst is src1, then src1 will be destroyed, even if
623   // the operation is unsuccessful.
624   void SmiMul(Register dst,
625               Register src1,
626               Register src2,
627               Label* on_not_smi_result,
628               Label::Distance near_jump = Label::kFar);
629 
630   // Divides one smi by another and returns the quotient.
631   // Clobbers rax and rdx registers.
632   void SmiDiv(Register dst,
633               Register src1,
634               Register src2,
635               Label* on_not_smi_result,
636               Label::Distance near_jump = Label::kFar);
637 
638   // Divides one smi by another and returns the remainder.
639   // Clobbers rax and rdx registers.
640   void SmiMod(Register dst,
641               Register src1,
642               Register src2,
643               Label* on_not_smi_result,
644               Label::Distance near_jump = Label::kFar);
645 
646   // Bitwise operations.
647   void SmiNot(Register dst, Register src);
648   void SmiAnd(Register dst, Register src1, Register src2);
649   void SmiOr(Register dst, Register src1, Register src2);
650   void SmiXor(Register dst, Register src1, Register src2);
651   void SmiAndConstant(Register dst, Register src1, Smi* constant);
652   void SmiOrConstant(Register dst, Register src1, Smi* constant);
653   void SmiXorConstant(Register dst, Register src1, Smi* constant);
654 
655   void SmiShiftLeftConstant(Register dst,
656                             Register src,
657                             int shift_value);
658   void SmiShiftLogicalRightConstant(Register dst,
659                                   Register src,
660                                   int shift_value,
661                                   Label* on_not_smi_result,
662                                   Label::Distance near_jump = Label::kFar);
663   void SmiShiftArithmeticRightConstant(Register dst,
664                                        Register src,
665                                        int shift_value);
666 
667   // Shifts a smi value to the left, and returns the result if that is a smi.
668   // Uses and clobbers rcx, so dst may not be rcx.
669   void SmiShiftLeft(Register dst,
670                     Register src1,
671                     Register src2);
672   // Shifts a smi value to the right, shifting in zero bits at the top, and
673   // returns the unsigned intepretation of the result if that is a smi.
674   // Uses and clobbers rcx, so dst may not be rcx.
675   void SmiShiftLogicalRight(Register dst,
676                             Register src1,
677                             Register src2,
678                             Label* on_not_smi_result,
679                             Label::Distance near_jump = Label::kFar);
680   // Shifts a smi value to the right, sign extending the top, and
681   // returns the signed intepretation of the result. That will always
682   // be a valid smi value, since it's numerically smaller than the
683   // original.
684   // Uses and clobbers rcx, so dst may not be rcx.
685   void SmiShiftArithmeticRight(Register dst,
686                                Register src1,
687                                Register src2);
688 
689   // Specialized operations
690 
691   // Select the non-smi register of two registers where exactly one is a
692   // smi. If neither are smis, jump to the failure label.
693   void SelectNonSmi(Register dst,
694                     Register src1,
695                     Register src2,
696                     Label* on_not_smis,
697                     Label::Distance near_jump = Label::kFar);
698 
699   // Converts, if necessary, a smi to a combination of number and
700   // multiplier to be used as a scaled index.
701   // The src register contains a *positive* smi value. The shift is the
702   // power of two to multiply the index value by (e.g.
703   // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
704   // The returned index register may be either src or dst, depending
705   // on what is most efficient. If src and dst are different registers,
706   // src is always unchanged.
707   SmiIndex SmiToIndex(Register dst, Register src, int shift);
708 
709   // Converts a positive smi to a negative index.
710   SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
711 
712   // Add the value of a smi in memory to an int32 register.
713   // Sets flags as a normal add.
714   void AddSmiField(Register dst, const Operand& src);
715 
716   // Basic Smi operations.
Move(Register dst,Smi * source)717   void Move(Register dst, Smi* source) {
718     LoadSmiConstant(dst, source);
719   }
720 
Move(const Operand & dst,Smi * source)721   void Move(const Operand& dst, Smi* source) {
722     Register constant = GetSmiConstant(source);
723     movq(dst, constant);
724   }
725 
726   void Push(Smi* smi);
727   void Test(const Operand& dst, Smi* source);
728 
729 
730   // ---------------------------------------------------------------------------
731   // String macros.
732 
733   // If object is a string, its map is loaded into object_map.
734   void JumpIfNotString(Register object,
735                        Register object_map,
736                        Label* not_string,
737                        Label::Distance near_jump = Label::kFar);
738 
739 
740   void JumpIfNotBothSequentialAsciiStrings(
741       Register first_object,
742       Register second_object,
743       Register scratch1,
744       Register scratch2,
745       Label* on_not_both_flat_ascii,
746       Label::Distance near_jump = Label::kFar);
747 
748   // Check whether the instance type represents a flat ASCII string. Jump to the
749   // label if not. If the instance type can be scratched specify same register
750   // for both instance type and scratch.
751   void JumpIfInstanceTypeIsNotSequentialAscii(
752       Register instance_type,
753       Register scratch,
754       Label*on_not_flat_ascii_string,
755       Label::Distance near_jump = Label::kFar);
756 
757   void JumpIfBothInstanceTypesAreNotSequentialAscii(
758       Register first_object_instance_type,
759       Register second_object_instance_type,
760       Register scratch1,
761       Register scratch2,
762       Label* on_fail,
763       Label::Distance near_jump = Label::kFar);
764 
765   // ---------------------------------------------------------------------------
766   // Macro instructions.
767 
768   // Load a register with a long value as efficiently as possible.
769   void Set(Register dst, int64_t x);
770   void Set(const Operand& dst, int64_t x);
771 
772   // Move if the registers are not identical.
773   void Move(Register target, Register source);
774 
775   // Bit-field support.
776   void TestBit(const Operand& dst, int bit_index);
777 
778   // Handle support
779   void Move(Register dst, Handle<Object> source);
780   void Move(const Operand& dst, Handle<Object> source);
781   void Cmp(Register dst, Handle<Object> source);
782   void Cmp(const Operand& dst, Handle<Object> source);
783   void Cmp(Register dst, Smi* src);
784   void Cmp(const Operand& dst, Smi* src);
785   void Push(Handle<Object> source);
786 
787   // Load a heap object and handle the case of new-space objects by
788   // indirecting via a global cell.
789   void LoadHeapObject(Register result, Handle<HeapObject> object);
790   void PushHeapObject(Handle<HeapObject> object);
791 
LoadObject(Register result,Handle<Object> object)792   void LoadObject(Register result, Handle<Object> object) {
793     if (object->IsHeapObject()) {
794       LoadHeapObject(result, Handle<HeapObject>::cast(object));
795     } else {
796       Move(result, object);
797     }
798   }
799 
800   // Load a global cell into a register.
801   void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
802 
803   // Emit code to discard a non-negative number of pointer-sized elements
804   // from the stack, clobbering only the rsp register.
805   void Drop(int stack_elements);
806 
Call(Label * target)807   void Call(Label* target) { call(target); }
808 
809   // Control Flow
810   void Jump(Address destination, RelocInfo::Mode rmode);
811   void Jump(ExternalReference ext);
812   void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
813 
814   void Call(Address destination, RelocInfo::Mode rmode);
815   void Call(ExternalReference ext);
816   void Call(Handle<Code> code_object,
817             RelocInfo::Mode rmode,
818             unsigned ast_id = kNoASTId);
819 
820   // The size of the code generated for different call instructions.
CallSize(Address destination,RelocInfo::Mode rmode)821   int CallSize(Address destination, RelocInfo::Mode rmode) {
822     return kCallInstructionLength;
823   }
824   int CallSize(ExternalReference ext);
CallSize(Handle<Code> code_object)825   int CallSize(Handle<Code> code_object) {
826     // Code calls use 32-bit relative addressing.
827     return kShortCallInstructionLength;
828   }
CallSize(Register target)829   int CallSize(Register target) {
830     // Opcode: REX_opt FF /2 m64
831     return (target.high_bit() != 0) ? 3 : 2;
832   }
CallSize(const Operand & target)833   int CallSize(const Operand& target) {
834     // Opcode: REX_opt FF /2 m64
835     return (target.requires_rex() ? 2 : 1) + target.operand_size();
836   }
837 
838   // Emit call to the code we are currently generating.
CallSelf()839   void CallSelf() {
840     Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
841     Call(self, RelocInfo::CODE_TARGET);
842   }
843 
844   // Non-x64 instructions.
845   // Push/pop all general purpose registers.
846   // Does not push rsp/rbp nor any of the assembler's special purpose registers
847   // (kScratchRegister, kSmiConstantRegister, kRootRegister).
848   void Pushad();
849   void Popad();
850   // Sets the stack as after performing Popad, without actually loading the
851   // registers.
852   void Dropad();
853 
854   // Compare object type for heap object.
855   // Always use unsigned comparisons: above and below, not less and greater.
856   // Incoming register is heap_object and outgoing register is map.
857   // They may be the same register, and may be kScratchRegister.
858   void CmpObjectType(Register heap_object, InstanceType type, Register map);
859 
860   // Compare instance type for map.
861   // Always use unsigned comparisons: above and below, not less and greater.
862   void CmpInstanceType(Register map, InstanceType type);
863 
864   // Check if a map for a JSObject indicates that the object has fast elements.
865   // Jump to the specified label if it does not.
866   void CheckFastElements(Register map,
867                          Label* fail,
868                          Label::Distance distance = Label::kFar);
869 
870   // Check if a map for a JSObject indicates that the object can have both smi
871   // and HeapObject elements.  Jump to the specified label if it does not.
872   void CheckFastObjectElements(Register map,
873                                Label* fail,
874                                Label::Distance distance = Label::kFar);
875 
876   // Check if a map for a JSObject indicates that the object has fast smi only
877   // elements.  Jump to the specified label if it does not.
878   void CheckFastSmiOnlyElements(Register map,
879                                 Label* fail,
880                                 Label::Distance distance = Label::kFar);
881 
882   // Check to see if maybe_number can be stored as a double in
883   // FastDoubleElements. If it can, store it at the index specified by index in
884   // the FastDoubleElements array elements, otherwise jump to fail.  Note that
885   // index must not be smi-tagged.
886   void StoreNumberToDoubleElements(Register maybe_number,
887                                    Register elements,
888                                    Register index,
889                                    XMMRegister xmm_scratch,
890                                    Label* fail);
891 
892   // Compare an object's map with the specified map and its transitioned
893   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
894   // result of map compare. If multiple map compares are required, the compare
895   // sequences branches to early_success.
896   void CompareMap(Register obj,
897                   Handle<Map> map,
898                   Label* early_success,
899                   CompareMapMode mode = REQUIRE_EXACT_MAP);
900 
901   // Check if the map of an object is equal to a specified map and branch to
902   // label if not. Skip the smi check if not required (object is known to be a
903   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
904   // against maps that are ElementsKind transition maps of the specified map.
905   void CheckMap(Register obj,
906                 Handle<Map> map,
907                 Label* fail,
908                 SmiCheckType smi_check_type,
909                 CompareMapMode mode = REQUIRE_EXACT_MAP);
910 
911   // Check if the map of an object is equal to a specified map and branch to a
912   // specified target if equal. Skip the smi check if not required (object is
913   // known to be a heap object)
914   void DispatchMap(Register obj,
915                    Handle<Map> map,
916                    Handle<Code> success,
917                    SmiCheckType smi_check_type);
918 
919   // Check if the object in register heap_object is a string. Afterwards the
920   // register map contains the object map and the register instance_type
921   // contains the instance_type. The registers map and instance_type can be the
922   // same in which case it contains the instance type afterwards. Either of the
923   // registers map and instance_type can be the same as heap_object.
924   Condition IsObjectStringType(Register heap_object,
925                                Register map,
926                                Register instance_type);
927 
928   // FCmp compares and pops the two values on top of the FPU stack.
929   // The flag results are similar to integer cmp, but requires unsigned
930   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
931   void FCmp();
932 
933   void ClampUint8(Register reg);
934 
935   void ClampDoubleToUint8(XMMRegister input_reg,
936                           XMMRegister temp_xmm_reg,
937                           Register result_reg,
938                           Register temp_reg);
939 
940   void LoadInstanceDescriptors(Register map, Register descriptors);
941 
942   // Abort execution if argument is not a number. Used in debug code.
943   void AbortIfNotNumber(Register object);
944 
945   // Abort execution if argument is a smi. Used in debug code.
946   void AbortIfSmi(Register object);
947 
948   // Abort execution if argument is not a smi. Used in debug code.
949   void AbortIfNotSmi(Register object);
950   void AbortIfNotSmi(const Operand& object);
951 
952   // Abort execution if a 64 bit register containing a 32 bit payload does not
953   // have zeros in the top 32 bits.
954   void AbortIfNotZeroExtended(Register reg);
955 
956   // Abort execution if argument is a string. Used in debug code.
957   void AbortIfNotString(Register object);
958 
959   // Abort execution if argument is not the root value with the given index.
960   void AbortIfNotRootValue(Register src,
961                            Heap::RootListIndex root_value_index,
962                            const char* message);
963 
964   // ---------------------------------------------------------------------------
965   // Exception handling
966 
967   // Push a new try handler and link it into try handler chain.
968   void PushTryHandler(StackHandler::Kind kind, int handler_index);
969 
970   // Unlink the stack handler on top of the stack from the try handler chain.
971   void PopTryHandler();
972 
973   // Activate the top handler in the try hander chain and pass the
974   // thrown value.
975   void Throw(Register value);
976 
977   // Propagate an uncatchable exception out of the current JS stack.
978   void ThrowUncatchable(Register value);
979 
980   // ---------------------------------------------------------------------------
981   // Inline caching support
982 
983   // Generate code for checking access rights - used for security checks
984   // on access to global objects across environments. The holder register
985   // is left untouched, but the scratch register and kScratchRegister,
986   // which must be different, are clobbered.
987   void CheckAccessGlobalProxy(Register holder_reg,
988                               Register scratch,
989                               Label* miss);
990 
991   void GetNumberHash(Register r0, Register scratch);
992 
993   void LoadFromNumberDictionary(Label* miss,
994                                 Register elements,
995                                 Register key,
996                                 Register r0,
997                                 Register r1,
998                                 Register r2,
999                                 Register result);
1000 
1001 
1002   // ---------------------------------------------------------------------------
1003   // Allocation support
1004 
1005   // Allocate an object in new space. If the new space is exhausted control
1006   // continues at the gc_required label. The allocated object is returned in
1007   // result and end of the new object is returned in result_end. The register
1008   // scratch can be passed as no_reg in which case an additional object
1009   // reference will be added to the reloc info. The returned pointers in result
1010   // and result_end have not yet been tagged as heap objects. If
1011   // result_contains_top_on_entry is true the content of result is known to be
1012   // the allocation top on entry (could be result_end from a previous call to
1013   // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
1014   // should be no_reg as it is never used.
1015   void AllocateInNewSpace(int object_size,
1016                           Register result,
1017                           Register result_end,
1018                           Register scratch,
1019                           Label* gc_required,
1020                           AllocationFlags flags);
1021 
1022   void AllocateInNewSpace(int header_size,
1023                           ScaleFactor element_size,
1024                           Register element_count,
1025                           Register result,
1026                           Register result_end,
1027                           Register scratch,
1028                           Label* gc_required,
1029                           AllocationFlags flags);
1030 
1031   void AllocateInNewSpace(Register object_size,
1032                           Register result,
1033                           Register result_end,
1034                           Register scratch,
1035                           Label* gc_required,
1036                           AllocationFlags flags);
1037 
1038   // Undo allocation in new space. The object passed and objects allocated after
1039   // it will no longer be allocated. Make sure that no pointers are left to the
1040   // object(s) no longer allocated as they would be invalid when allocation is
1041   // un-done.
1042   void UndoAllocationInNewSpace(Register object);
1043 
1044   // Allocate a heap number in new space with undefined value. Returns
1045   // tagged pointer in result register, or jumps to gc_required if new
1046   // space is full.
1047   void AllocateHeapNumber(Register result,
1048                           Register scratch,
1049                           Label* gc_required);
1050 
1051   // Allocate a sequential string. All the header fields of the string object
1052   // are initialized.
1053   void AllocateTwoByteString(Register result,
1054                              Register length,
1055                              Register scratch1,
1056                              Register scratch2,
1057                              Register scratch3,
1058                              Label* gc_required);
1059   void AllocateAsciiString(Register result,
1060                            Register length,
1061                            Register scratch1,
1062                            Register scratch2,
1063                            Register scratch3,
1064                            Label* gc_required);
1065 
1066   // Allocate a raw cons string object. Only the map field of the result is
1067   // initialized.
1068   void AllocateTwoByteConsString(Register result,
1069                           Register scratch1,
1070                           Register scratch2,
1071                           Label* gc_required);
1072   void AllocateAsciiConsString(Register result,
1073                                Register scratch1,
1074                                Register scratch2,
1075                                Label* gc_required);
1076 
1077   // Allocate a raw sliced string object. Only the map field of the result is
1078   // initialized.
1079   void AllocateTwoByteSlicedString(Register result,
1080                             Register scratch1,
1081                             Register scratch2,
1082                             Label* gc_required);
1083   void AllocateAsciiSlicedString(Register result,
1084                                  Register scratch1,
1085                                  Register scratch2,
1086                                  Label* gc_required);
1087 
1088   // ---------------------------------------------------------------------------
1089   // Support functions.
1090 
1091   // Check if result is zero and op is negative.
1092   void NegativeZeroTest(Register result, Register op, Label* then_label);
1093 
1094   // Check if result is zero and op is negative in code using jump targets.
1095   void NegativeZeroTest(CodeGenerator* cgen,
1096                         Register result,
1097                         Register op,
1098                         JumpTarget* then_target);
1099 
1100   // Check if result is zero and any of op1 and op2 are negative.
1101   // Register scratch is destroyed, and it must be different from op2.
1102   void NegativeZeroTest(Register result, Register op1, Register op2,
1103                         Register scratch, Label* then_label);
1104 
1105   // Try to get function prototype of a function and puts the value in
1106   // the result register. Checks that the function really is a
1107   // function and jumps to the miss label if the fast checks fail. The
1108   // function register will be untouched; the other register may be
1109   // clobbered.
1110   void TryGetFunctionPrototype(Register function,
1111                                Register result,
1112                                Label* miss,
1113                                bool miss_on_bound_function = false);
1114 
1115   // Generates code for reporting that an illegal operation has
1116   // occurred.
1117   void IllegalOperation(int num_arguments);
1118 
1119   // Picks out an array index from the hash field.
1120   // Register use:
1121   //   hash - holds the index's hash. Clobbered.
1122   //   index - holds the overwritten index on exit.
1123   void IndexFromHash(Register hash, Register index);
1124 
1125   // Find the function context up the context chain.
1126   void LoadContext(Register dst, int context_chain_length);
1127 
1128   // Conditionally load the cached Array transitioned map of type
1129   // transitioned_kind from the global context if the map in register
1130   // map_in_out is the cached Array map in the global context of
1131   // expected_kind.
1132   void LoadTransitionedArrayMapConditional(
1133       ElementsKind expected_kind,
1134       ElementsKind transitioned_kind,
1135       Register map_in_out,
1136       Register scratch,
1137       Label* no_map_match);
1138 
1139   // Load the initial map for new Arrays from a JSFunction.
1140   void LoadInitialArrayMap(Register function_in,
1141                            Register scratch,
1142                            Register map_out);
1143 
1144   // Load the global function with the given index.
1145   void LoadGlobalFunction(int index, Register function);
1146 
1147   // Load the initial map from the global function. The registers
1148   // function and map can be the same.
1149   void LoadGlobalFunctionInitialMap(Register function, Register map);
1150 
1151   // ---------------------------------------------------------------------------
1152   // Runtime calls
1153 
1154   // Call a code stub.
1155   void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
1156 
1157   // Tail call a code stub (jump).
1158   void TailCallStub(CodeStub* stub);
1159 
1160   // Return from a code stub after popping its arguments.
1161   void StubReturn(int argc);
1162 
1163   // Call a runtime routine.
1164   void CallRuntime(const Runtime::Function* f, int num_arguments);
1165 
1166   // Call a runtime function and save the value of XMM registers.
1167   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
1168 
1169   // Convenience function: Same as above, but takes the fid instead.
1170   void CallRuntime(Runtime::FunctionId id, int num_arguments);
1171 
1172   // Convenience function: call an external reference.
1173   void CallExternalReference(const ExternalReference& ext,
1174                              int num_arguments);
1175 
1176   // Tail call of a runtime routine (jump).
1177   // Like JumpToExternalReference, but also takes care of passing the number
1178   // of parameters.
1179   void TailCallExternalReference(const ExternalReference& ext,
1180                                  int num_arguments,
1181                                  int result_size);
1182 
1183   // Convenience function: tail call a runtime routine (jump).
1184   void TailCallRuntime(Runtime::FunctionId fid,
1185                        int num_arguments,
1186                        int result_size);
1187 
1188   // Jump to a runtime routine.
1189   void JumpToExternalReference(const ExternalReference& ext, int result_size);
1190 
1191   // Prepares stack to put arguments (aligns and so on).  WIN64 calling
1192   // convention requires to put the pointer to the return value slot into
1193   // rcx (rcx must be preserverd until CallApiFunctionAndReturn).  Saves
1194   // context (rsi).  Clobbers rax.  Allocates arg_stack_space * kPointerSize
1195   // inside the exit frame (not GCed) accessible via StackSpaceOperand.
1196   void PrepareCallApiFunction(int arg_stack_space);
1197 
1198   // Calls an API function.  Allocates HandleScope, extracts returned value
1199   // from handle and propagates exceptions.  Clobbers r14, r15, rbx and
1200   // caller-save registers.  Restores context.  On return removes
1201   // stack_space * kPointerSize (GCed).
1202   void CallApiFunctionAndReturn(Address function_address, int stack_space);
1203 
1204   // Before calling a C-function from generated code, align arguments on stack.
1205   // After aligning the frame, arguments must be stored in esp[0], esp[4],
1206   // etc., not pushed. The argument count assumes all arguments are word sized.
1207   // The number of slots reserved for arguments depends on platform. On Windows
1208   // stack slots are reserved for the arguments passed in registers. On other
1209   // platforms stack slots are only reserved for the arguments actually passed
1210   // on the stack.
1211   void PrepareCallCFunction(int num_arguments);
1212 
1213   // Calls a C function and cleans up the space for arguments allocated
1214   // by PrepareCallCFunction. The called function is not allowed to trigger a
1215   // garbage collection, since that might move the code and invalidate the
1216   // return address (unless this is somehow accounted for by the called
1217   // function).
1218   void CallCFunction(ExternalReference function, int num_arguments);
1219   void CallCFunction(Register function, int num_arguments);
1220 
1221   // Calculate the number of stack slots to reserve for arguments when calling a
1222   // C function.
1223   int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1224 
1225   // ---------------------------------------------------------------------------
1226   // Utilities
1227 
1228   void Ret();
1229 
1230   // Return and drop arguments from stack, where the number of arguments
1231   // may be bigger than 2^16 - 1.  Requires a scratch register.
1232   void Ret(int bytes_dropped, Register scratch);
1233 
CodeObject()1234   Handle<Object> CodeObject() {
1235     ASSERT(!code_object_.is_null());
1236     return code_object_;
1237   }
1238 
1239   // Copy length bytes from source to destination.
1240   // Uses scratch register internally (if you have a low-eight register
1241   // free, do use it, otherwise kScratchRegister will be used).
1242   // The min_length is a minimum limit on the value that length will have.
1243   // The algorithm has some special cases that might be omitted if the string
1244   // is known to always be long.
1245   void CopyBytes(Register destination,
1246                  Register source,
1247                  Register length,
1248                  int min_length = 0,
1249                  Register scratch = kScratchRegister);
1250 
1251   // Initialize fields with filler values.  Fields starting at |start_offset|
1252   // not including end_offset are overwritten with the value in |filler|.  At
1253   // the end the loop, |start_offset| takes the value of |end_offset|.
1254   void InitializeFieldsWithFiller(Register start_offset,
1255                                   Register end_offset,
1256                                   Register filler);
1257 
1258 
1259   // ---------------------------------------------------------------------------
1260   // StatsCounter support
1261 
1262   void SetCounter(StatsCounter* counter, int value);
1263   void IncrementCounter(StatsCounter* counter, int value);
1264   void DecrementCounter(StatsCounter* counter, int value);
1265 
1266 
1267   // ---------------------------------------------------------------------------
1268   // Debugging
1269 
1270   // Calls Abort(msg) if the condition cc is not satisfied.
1271   // Use --debug_code to enable.
1272   void Assert(Condition cc, const char* msg);
1273 
1274   void AssertFastElements(Register elements);
1275 
1276   // Like Assert(), but always enabled.
1277   void Check(Condition cc, const char* msg);
1278 
1279   // Print a message to stdout and abort execution.
1280   void Abort(const char* msg);
1281 
1282   // Check that the stack is aligned.
1283   void CheckStackAlignment();
1284 
1285   // Verify restrictions about code generated in stubs.
set_generating_stub(bool value)1286   void set_generating_stub(bool value) { generating_stub_ = value; }
generating_stub()1287   bool generating_stub() { return generating_stub_; }
set_allow_stub_calls(bool value)1288   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
allow_stub_calls()1289   bool allow_stub_calls() { return allow_stub_calls_; }
set_has_frame(bool value)1290   void set_has_frame(bool value) { has_frame_ = value; }
has_frame()1291   bool has_frame() { return has_frame_; }
1292   inline bool AllowThisStubCall(CodeStub* stub);
1293 
SafepointRegisterStackIndex(Register reg)1294   static int SafepointRegisterStackIndex(Register reg) {
1295     return SafepointRegisterStackIndex(reg.code());
1296   }
1297 
1298   // Activation support.
1299   void EnterFrame(StackFrame::Type type);
1300   void LeaveFrame(StackFrame::Type type);
1301 
1302   // Expects object in rax and returns map with validated enum cache
1303   // in rax.  Assumes that any other register can be used as a scratch.
1304   void CheckEnumCache(Register null_value,
1305                       Label* call_runtime);
1306 
1307  private:
1308   // Order general registers are pushed by Pushad.
1309   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1310   static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
1311   static const int kNumSafepointSavedRegisters = 11;
1312   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1313 
1314   bool generating_stub_;
1315   bool allow_stub_calls_;
1316   bool has_frame_;
1317   bool root_array_available_;
1318 
1319   // Returns a register holding the smi value. The register MUST NOT be
1320   // modified. It may be the "smi 1 constant" register.
1321   Register GetSmiConstant(Smi* value);
1322 
1323   // Moves the smi value to the destination register.
1324   void LoadSmiConstant(Register dst, Smi* value);
1325 
1326   // This handle will be patched with the code object on installation.
1327   Handle<Object> code_object_;
1328 
1329   // Helper functions for generating invokes.
1330   void InvokePrologue(const ParameterCount& expected,
1331                       const ParameterCount& actual,
1332                       Handle<Code> code_constant,
1333                       Register code_register,
1334                       Label* done,
1335                       bool* definitely_mismatches,
1336                       InvokeFlag flag,
1337                       Label::Distance near_jump = Label::kFar,
1338                       const CallWrapper& call_wrapper = NullCallWrapper(),
1339                       CallKind call_kind = CALL_AS_METHOD);
1340 
1341   void EnterExitFramePrologue(bool save_rax);
1342 
1343   // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1344   // accessible via StackSpaceOperand.
1345   void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1346 
1347   void LeaveExitFrameEpilogue();
1348 
1349   // Allocation support helpers.
1350   // Loads the top of new-space into the result register.
1351   // Otherwise the address of the new-space top is loaded into scratch (if
1352   // scratch is valid), and the new-space top is loaded into result.
1353   void LoadAllocationTopHelper(Register result,
1354                                Register scratch,
1355                                AllocationFlags flags);
1356   // Update allocation top with value in result_end register.
1357   // If scratch is valid, it contains the address of the allocation top.
1358   void UpdateAllocationTopHelper(Register result_end, Register scratch);
1359 
1360   // Helper for PopHandleScope.  Allowed to perform a GC and returns
1361   // NULL if gc_allowed.  Does not perform a GC if !gc_allowed, and
1362   // possibly returns a failure object indicating an allocation failure.
1363   Object* PopHandleScopeHelper(Register saved,
1364                                Register scratch,
1365                                bool gc_allowed);
1366 
1367   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1368   void InNewSpace(Register object,
1369                   Register scratch,
1370                   Condition cc,
1371                   Label* branch,
1372                   Label::Distance distance = Label::kFar);
1373 
1374   // Helper for finding the mark bits for an address.  Afterwards, the
1375   // bitmap register points at the word with the mark bits and the mask
1376   // the position of the first bit.  Uses rcx as scratch and leaves addr_reg
1377   // unchanged.
1378   inline void GetMarkBits(Register addr_reg,
1379                           Register bitmap_reg,
1380                           Register mask_reg);
1381 
1382   // Helper for throwing exceptions.  Compute a handler address and jump to
1383   // it.  See the implementation for register usage.
1384   void JumpToHandlerEntry();
1385 
1386   // Compute memory operands for safepoint stack slots.
1387   Operand SafepointRegisterSlot(Register reg);
SafepointRegisterStackIndex(int reg_code)1388   static int SafepointRegisterStackIndex(int reg_code) {
1389     return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1390   }
1391 
1392   // Needs access to SafepointRegisterStackIndex for optimized frame
1393   // traversal.
1394   friend class OptimizedFrame;
1395 };
1396 
1397 
1398 // The code patcher is used to patch (typically) small parts of code e.g. for
1399 // debugging and other types of instrumentation. When using the code patcher
1400 // the exact number of bytes specified must be emitted. Is not legal to emit
1401 // relocation information. If any of these constraints are violated it causes
1402 // an assertion.
1403 class CodePatcher {
1404  public:
1405   CodePatcher(byte* address, int size);
1406   virtual ~CodePatcher();
1407 
1408   // Macro assembler to emit code.
masm()1409   MacroAssembler* masm() { return &masm_; }
1410 
1411  private:
1412   byte* address_;  // The address of the code being patched.
1413   int size_;  // Number of bytes of the expected patch size.
1414   MacroAssembler masm_;  // Macro assembler used to generate the code.
1415 };
1416 
1417 
1418 // -----------------------------------------------------------------------------
1419 // Static helper functions.
1420 
1421 // Generate an Operand for loading a field from an object.
FieldOperand(Register object,int offset)1422 inline Operand FieldOperand(Register object, int offset) {
1423   return Operand(object, offset - kHeapObjectTag);
1424 }
1425 
1426 
1427 // Generate an Operand for loading an indexed field from an object.
FieldOperand(Register object,Register index,ScaleFactor scale,int offset)1428 inline Operand FieldOperand(Register object,
1429                             Register index,
1430                             ScaleFactor scale,
1431                             int offset) {
1432   return Operand(object, index, scale, offset - kHeapObjectTag);
1433 }
1434 
1435 
ContextOperand(Register context,int index)1436 inline Operand ContextOperand(Register context, int index) {
1437   return Operand(context, Context::SlotOffset(index));
1438 }
1439 
1440 
GlobalObjectOperand()1441 inline Operand GlobalObjectOperand() {
1442   return ContextOperand(rsi, Context::GLOBAL_INDEX);
1443 }
1444 
1445 
1446 // Provides access to exit frame stack space (not GCed).
StackSpaceOperand(int index)1447 inline Operand StackSpaceOperand(int index) {
1448 #ifdef _WIN64
1449   const int kShaddowSpace = 4;
1450   return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1451 #else
1452   return Operand(rsp, index * kPointerSize);
1453 #endif
1454 }
1455 
1456 
1457 
1458 #ifdef GENERATED_CODE_COVERAGE
1459 extern void LogGeneratedCodeCoverage(const char* file_line);
1460 #define CODE_COVERAGE_STRINGIFY(x) #x
1461 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1462 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1463 #define ACCESS_MASM(masm) {                                               \
1464     byte* x64_coverage_function =                                         \
1465         reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
1466     masm->pushfd();                                                       \
1467     masm->pushad();                                                       \
1468     masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));         \
1469     masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY);          \
1470     masm->pop(rax);                                                       \
1471     masm->popad();                                                        \
1472     masm->popfd();                                                        \
1473   }                                                                       \
1474   masm->
1475 #else
1476 #define ACCESS_MASM(masm) masm->
1477 #endif
1478 
1479 } }  // namespace v8::internal
1480 
1481 #endif  // V8_X64_MACRO_ASSEMBLER_X64_H_
1482