• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
6 #define V8_S390_MACRO_ASSEMBLER_S390_H_
7 
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 // Give alias names to registers for calling conventions.
17 const Register kReturnRegister0 = {Register::kCode_r2};
18 const Register kReturnRegister1 = {Register::kCode_r3};
19 const Register kReturnRegister2 = {Register::kCode_r4};
20 const Register kJSFunctionRegister = {Register::kCode_r3};
21 const Register kContextRegister = {Register::kCode_r13};
22 const Register kAllocateSizeRegister = {Register::kCode_r3};
23 const Register kInterpreterAccumulatorRegister = {Register::kCode_r2};
24 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r6};
25 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r7};
26 const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
27 const Register kJavaScriptCallArgCountRegister = {Register::kCode_r2};
28 const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r5};
29 const Register kRuntimeCallFunctionRegister = {Register::kCode_r3};
30 const Register kRuntimeCallArgCountRegister = {Register::kCode_r2};
31 
32 // ----------------------------------------------------------------------------
33 // Static helper functions
34 
35 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)36 inline MemOperand FieldMemOperand(Register object, int offset) {
37   return MemOperand(object, offset - kHeapObjectTag);
38 }
39 
40 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,Register index,int offset)41 inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
42   return MemOperand(object, index, offset - kHeapObjectTag);
43 }
44 
45 // Generate a MemOperand for loading a field from Root register
RootMemOperand(Heap::RootListIndex index)46 inline MemOperand RootMemOperand(Heap::RootListIndex index) {
47   return MemOperand(kRootRegister, index << kPointerSizeLog2);
48 }
49 
50 // Flags used for AllocateHeapNumber
51 enum TaggingMode {
52   // Tag the result.
53   TAG_RESULT,
54   // Don't tag
55   DONT_TAG_RESULT
56 };
57 
58 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
59 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
60 enum PointersToHereCheck {
61   kPointersToHereMaybeInteresting,
62   kPointersToHereAreAlwaysInteresting
63 };
64 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
65 
66 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
67                                    Register reg3 = no_reg,
68                                    Register reg4 = no_reg,
69                                    Register reg5 = no_reg,
70                                    Register reg6 = no_reg);
71 
72 #ifdef DEBUG
73 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
74                 Register reg4 = no_reg, Register reg5 = no_reg,
75                 Register reg6 = no_reg, Register reg7 = no_reg,
76                 Register reg8 = no_reg, Register reg9 = no_reg,
77                 Register reg10 = no_reg);
78 #endif
79 
80 // These exist to provide portability between 32 and 64bit
81 #if V8_TARGET_ARCH_S390X
82 #define Div divd
83 
84 // The length of the arithmetic operation is the length
85 // of the register.
86 
87 // Length:
88 // H = halfword
89 // W = word
90 
91 // arithmetics and bitwise
92 #define AddMI agsi
93 #define AddRR agr
94 #define SubRR sgr
95 #define AndRR ngr
96 #define OrRR ogr
97 #define XorRR xgr
98 #define LoadComplementRR lcgr
99 #define LoadNegativeRR lngr
100 
101 // Distinct Operands
102 #define AddP_RRR agrk
103 #define AddPImm_RRI aghik
104 #define AddLogicalP_RRR algrk
105 #define SubP_RRR sgrk
106 #define SubLogicalP_RRR slgrk
107 #define AndP_RRR ngrk
108 #define OrP_RRR ogrk
109 #define XorP_RRR xgrk
110 
111 // Load / Store
112 #define LoadRR lgr
113 #define LoadAndTestRR ltgr
114 #define LoadImmP lghi
115 
116 // Compare
117 #define CmpPH cghi
118 #define CmpLogicalPW clgfi
119 
120 // Shifts
121 #define ShiftLeftP sllg
122 #define ShiftRightP srlg
123 #define ShiftLeftArithP slag
124 #define ShiftRightArithP srag
125 #else
126 
127 // arithmetics and bitwise
128 // Reg2Reg
129 #define AddMI asi
130 #define AddRR ar
131 #define SubRR sr
132 #define AndRR nr
133 #define OrRR or_z
134 #define XorRR xr
135 #define LoadComplementRR lcr
136 #define LoadNegativeRR lnr
137 
138 // Distinct Operands
139 #define AddP_RRR ark
140 #define AddPImm_RRI ahik
141 #define AddLogicalP_RRR alrk
142 #define SubP_RRR srk
143 #define SubLogicalP_RRR slrk
144 #define AndP_RRR nrk
145 #define OrP_RRR ork
146 #define XorP_RRR xrk
147 
148 // Load / Store
149 #define LoadRR lr
150 #define LoadAndTestRR ltr
151 #define LoadImmP lhi
152 
153 // Compare
154 #define CmpPH chi
155 #define CmpLogicalPW clfi
156 
157 // Shifts
158 #define ShiftLeftP ShiftLeft
159 #define ShiftRightP ShiftRight
160 #define ShiftLeftArithP ShiftLeftArith
161 #define ShiftRightArithP ShiftRightArith
162 
163 #endif
164 
165 // MacroAssembler implements a collection of frequently used macros.
166 class MacroAssembler : public Assembler {
167  public:
168   MacroAssembler(Isolate* isolate, void* buffer, int size,
169                  CodeObjectRequired create_code_object);
170 
171   // Returns the size of a call in instructions.
172   static int CallSize(Register target);
173   int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
174   static int CallSizeNotPredictableCodeSize(Address target,
175                                             RelocInfo::Mode rmode,
176                                             Condition cond = al);
177 
178   // Jump, Call, and Ret pseudo instructions implementing inter-working.
179   void Jump(Register target);
180   void JumpToJSEntry(Register target);
181   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
182             CRegister cr = cr7);
183   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
184   void Call(Register target);
185   void CallJSEntry(Register target);
186   void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
187   int CallSize(Handle<Code> code,
188                RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
189                TypeFeedbackId ast_id = TypeFeedbackId::None(),
190                Condition cond = al);
191   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
192             TypeFeedbackId ast_id = TypeFeedbackId::None(),
193             Condition cond = al);
Ret()194   void Ret() { b(r14); }
Ret(Condition cond)195   void Ret(Condition cond) { b(cond, r14); }
196 
197   // Emit code that loads |parameter_index|'th parameter from the stack to
198   // the register according to the CallInterfaceDescriptor definition.
199   // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
200   // below the caller's sp.
201   template <class Descriptor>
202   void LoadParameterFromStack(
203       Register reg, typename Descriptor::ParameterIndices parameter_index,
204       int sp_to_ra_offset_in_words = 0) {
205     DCHECK(Descriptor::kPassLastArgsOnStack);
206     UNIMPLEMENTED();
207   }
208 
209   // Emit code to discard a non-negative number of pointer-sized elements
210   // from the stack, clobbering only the sp register.
211   void Drop(int count);
212   void Drop(Register count, Register scratch = r0);
213 
Ret(int drop)214   void Ret(int drop) {
215     Drop(drop);
216     Ret();
217   }
218 
219   void Call(Label* target);
220 
221   // Register move. May do nothing if the registers are identical.
Move(Register dst,Smi * smi)222   void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
223   void Move(Register dst, Handle<Object> value);
224   void Move(Register dst, Register src, Condition cond = al);
225   void Move(DoubleRegister dst, DoubleRegister src);
226 
227   void MultiPush(RegList regs, Register location = sp);
228   void MultiPop(RegList regs, Register location = sp);
229 
230   void MultiPushDoubles(RegList dregs, Register location = sp);
231   void MultiPopDoubles(RegList dregs, Register location = sp);
232 
233   // Load an object from the root table.
234   void LoadRoot(Register destination, Heap::RootListIndex index,
235                 Condition cond = al);
236   // Store an object to the root table.
237   void StoreRoot(Register source, Heap::RootListIndex index,
238                  Condition cond = al);
239 
240   //--------------------------------------------------------------------------
241   // S390 Macro Assemblers for Instructions
242   //--------------------------------------------------------------------------
243 
244   // Arithmetic Operations
245 
246   // Add (Register - Immediate)
247   void Add32(Register dst, const Operand& imm);
248   void Add32_RI(Register dst, const Operand& imm);
249   void AddP(Register dst, const Operand& imm);
250   void Add32(Register dst, Register src, const Operand& imm);
251   void Add32_RRI(Register dst, Register src, const Operand& imm);
252   void AddP(Register dst, Register src, const Operand& imm);
253 
254   // Add (Register - Register)
255   void Add32(Register dst, Register src);
256   void AddP(Register dst, Register src);
257   void AddP_ExtendSrc(Register dst, Register src);
258   void Add32(Register dst, Register src1, Register src2);
259   void AddP(Register dst, Register src1, Register src2);
260   void AddP_ExtendSrc(Register dst, Register src1, Register src2);
261 
262   // Add (Register - Mem)
263   void Add32(Register dst, const MemOperand& opnd);
264   void AddP(Register dst, const MemOperand& opnd);
265   void AddP_ExtendSrc(Register dst, const MemOperand& opnd);
266 
267   // Add (Mem - Immediate)
268   void Add32(const MemOperand& opnd, const Operand& imm);
269   void AddP(const MemOperand& opnd, const Operand& imm);
270 
271   // Add Logical (Register - Register)
272   void AddLogical32(Register dst, Register src1, Register src2);
273 
274   // Add Logical With Carry (Register - Register)
275   void AddLogicalWithCarry32(Register dst, Register src1, Register src2);
276 
277   // Add Logical (Register - Immediate)
278   void AddLogical(Register dst, const Operand& imm);
279   void AddLogicalP(Register dst, const Operand& imm);
280 
281   // Add Logical (Register - Mem)
282   void AddLogical(Register dst, const MemOperand& opnd);
283   void AddLogicalP(Register dst, const MemOperand& opnd);
284 
285   // Subtract (Register - Immediate)
286   void Sub32(Register dst, const Operand& imm);
Sub32_RI(Register dst,const Operand & imm)287   void Sub32_RI(Register dst, const Operand& imm) { Sub32(dst, imm); }
288   void SubP(Register dst, const Operand& imm);
289   void Sub32(Register dst, Register src, const Operand& imm);
Sub32_RRI(Register dst,Register src,const Operand & imm)290   void Sub32_RRI(Register dst, Register src, const Operand& imm) {
291     Sub32(dst, src, imm);
292   }
293   void SubP(Register dst, Register src, const Operand& imm);
294 
295   // Subtract (Register - Register)
296   void Sub32(Register dst, Register src);
297   void SubP(Register dst, Register src);
298   void SubP_ExtendSrc(Register dst, Register src);
299   void Sub32(Register dst, Register src1, Register src2);
300   void SubP(Register dst, Register src1, Register src2);
301   void SubP_ExtendSrc(Register dst, Register src1, Register src2);
302 
303   // Subtract (Register - Mem)
304   void Sub32(Register dst, const MemOperand& opnd);
305   void SubP(Register dst, const MemOperand& opnd);
306   void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
307 
308   // Subtract Logical (Register - Mem)
309   void SubLogical(Register dst, const MemOperand& opnd);
310   void SubLogicalP(Register dst, const MemOperand& opnd);
311   void SubLogicalP_ExtendSrc(Register dst, const MemOperand& opnd);
312   // Subtract Logical 32-bit
313   void SubLogical32(Register dst, Register src1, Register src2);
314   // Subtract Logical With Borrow 32-bit
315   void SubLogicalWithBorrow32(Register dst, Register src1, Register src2);
316 
317   // Multiply
318   void MulP(Register dst, const Operand& opnd);
319   void MulP(Register dst, Register src);
320   void MulP(Register dst, const MemOperand& opnd);
321   void Mul(Register dst, Register src1, Register src2);
322   void Mul32(Register dst, const MemOperand& src1);
323   void Mul32(Register dst, Register src1);
324   void Mul32(Register dst, const Operand& src1);
325   void MulHigh32(Register dst, Register src1, const MemOperand& src2);
326   void MulHigh32(Register dst, Register src1, Register src2);
327   void MulHigh32(Register dst, Register src1, const Operand& src2);
328   void MulHighU32(Register dst, Register src1, const MemOperand& src2);
329   void MulHighU32(Register dst, Register src1, Register src2);
330   void MulHighU32(Register dst, Register src1, const Operand& src2);
331   void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
332                                     const MemOperand& src2);
333   void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Register src2);
334   void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
335                                     const Operand& src2);
336   void Mul64(Register dst, const MemOperand& src1);
337   void Mul64(Register dst, Register src1);
338   void Mul64(Register dst, const Operand& src1);
339   void MulPWithCondition(Register dst, Register src1, Register src2);
340 
341   // Divide
342   void DivP(Register dividend, Register divider);
343   void Div32(Register dst, Register src1, const MemOperand& src2);
344   void Div32(Register dst, Register src1, Register src2);
345   void Div32(Register dst, Register src1, const Operand& src2);
346   void DivU32(Register dst, Register src1, const MemOperand& src2);
347   void DivU32(Register dst, Register src1, Register src2);
348   void DivU32(Register dst, Register src1, const Operand& src2);
349 
350   // Mod
351   void Mod32(Register dst, Register src1, const MemOperand& src2);
352   void Mod32(Register dst, Register src1, Register src2);
353   void Mod32(Register dst, Register src1, const Operand& src2);
354   void ModU32(Register dst, Register src1, const MemOperand& src2);
355   void ModU32(Register dst, Register src1, Register src2);
356   void ModU32(Register dst, Register src1, const Operand& src2);
357 
358   // Square root
359   void Sqrt(DoubleRegister result, DoubleRegister input);
360   void Sqrt(DoubleRegister result, const MemOperand& input);
361 
362   // Compare
363   void Cmp32(Register src1, Register src2);
364   void CmpP(Register src1, Register src2);
365   void Cmp32(Register dst, const Operand& opnd);
366   void CmpP(Register dst, const Operand& opnd);
367   void Cmp32(Register dst, const MemOperand& opnd);
368   void CmpP(Register dst, const MemOperand& opnd);
369 
370   // Compare Logical
371   void CmpLogical32(Register src1, Register src2);
372   void CmpLogicalP(Register src1, Register src2);
373   void CmpLogical32(Register src1, const Operand& opnd);
374   void CmpLogicalP(Register src1, const Operand& opnd);
375   void CmpLogical32(Register dst, const MemOperand& opnd);
376   void CmpLogicalP(Register dst, const MemOperand& opnd);
377 
378   // Compare Logical Byte (CLI/CLIY)
379   void CmpLogicalByte(const MemOperand& mem, const Operand& imm);
380 
381   // Load 32bit
382   void Load(Register dst, const MemOperand& opnd);
383   void Load(Register dst, const Operand& opnd);
384   void LoadW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
385   void LoadW(Register dst, Register src);
386   void LoadlW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
387   void LoadlW(Register dst, Register src);
388   void LoadLogicalHalfWordP(Register dst, const MemOperand& opnd);
389   void LoadLogicalHalfWordP(Register dst, Register src);
390   void LoadB(Register dst, const MemOperand& opnd);
391   void LoadB(Register dst, Register src);
392   void LoadlB(Register dst, const MemOperand& opnd);
393   void LoadlB(Register dst, Register src);
394 
395   void LoadLogicalReversedWordP(Register dst, const MemOperand& opnd);
396   void LoadLogicalReversedHalfWordP(Register dst, const MemOperand& opnd);
397 
398   // Load And Test
399   void LoadAndTest32(Register dst, Register src);
400   void LoadAndTestP_ExtendSrc(Register dst, Register src);
401   void LoadAndTestP(Register dst, Register src);
402 
403   void LoadAndTest32(Register dst, const MemOperand& opnd);
404   void LoadAndTestP(Register dst, const MemOperand& opnd);
405 
406   // Load Floating Point
407   void LoadDouble(DoubleRegister dst, const MemOperand& opnd);
408   void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
409   void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
410 
411   // Load On Condition
412   void LoadOnConditionP(Condition cond, Register dst, Register src);
413 
414   void LoadPositiveP(Register result, Register input);
415   void LoadPositive32(Register result, Register input);
416 
417   // Store Floating Point
418   void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
419   void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
420   void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
421                             DoubleRegister scratch);
422 
423   void Branch(Condition c, const Operand& opnd);
424   void BranchOnCount(Register r1, Label* l);
425 
426   // Shifts
427   void ShiftLeft(Register dst, Register src, Register val);
428   void ShiftLeft(Register dst, Register src, const Operand& val);
429   void ShiftRight(Register dst, Register src, Register val);
430   void ShiftRight(Register dst, Register src, const Operand& val);
431   void ShiftLeftArith(Register dst, Register src, Register shift);
432   void ShiftLeftArith(Register dst, Register src, const Operand& val);
433   void ShiftRightArith(Register dst, Register src, Register shift);
434   void ShiftRightArith(Register dst, Register src, const Operand& val);
435 
436   void ClearRightImm(Register dst, Register src, const Operand& val);
437 
438   // Bitwise operations
439   void And(Register dst, Register src);
440   void AndP(Register dst, Register src);
441   void And(Register dst, Register src1, Register src2);
442   void AndP(Register dst, Register src1, Register src2);
443   void And(Register dst, const MemOperand& opnd);
444   void AndP(Register dst, const MemOperand& opnd);
445   void And(Register dst, const Operand& opnd);
446   void AndP(Register dst, const Operand& opnd);
447   void And(Register dst, Register src, const Operand& opnd);
448   void AndP(Register dst, Register src, const Operand& opnd);
449   void Or(Register dst, Register src);
450   void OrP(Register dst, Register src);
451   void Or(Register dst, Register src1, Register src2);
452   void OrP(Register dst, Register src1, Register src2);
453   void Or(Register dst, const MemOperand& opnd);
454   void OrP(Register dst, const MemOperand& opnd);
455   void Or(Register dst, const Operand& opnd);
456   void OrP(Register dst, const Operand& opnd);
457   void Or(Register dst, Register src, const Operand& opnd);
458   void OrP(Register dst, Register src, const Operand& opnd);
459   void Xor(Register dst, Register src);
460   void XorP(Register dst, Register src);
461   void Xor(Register dst, Register src1, Register src2);
462   void XorP(Register dst, Register src1, Register src2);
463   void Xor(Register dst, const MemOperand& opnd);
464   void XorP(Register dst, const MemOperand& opnd);
465   void Xor(Register dst, const Operand& opnd);
466   void XorP(Register dst, const Operand& opnd);
467   void Xor(Register dst, Register src, const Operand& opnd);
468   void XorP(Register dst, Register src, const Operand& opnd);
469   void Popcnt32(Register dst, Register src);
470   void Not32(Register dst, Register src = no_reg);
471   void Not64(Register dst, Register src = no_reg);
472   void NotP(Register dst, Register src = no_reg);
473 
474 #ifdef V8_TARGET_ARCH_S390X
475   void Popcnt64(Register dst, Register src);
476 #endif
477 
478   void mov(Register dst, const Operand& src);
479 
CleanUInt32(Register x)480   void CleanUInt32(Register x) {
481 #ifdef V8_TARGET_ARCH_S390X
482     llgfr(x, x);
483 #endif
484   }
485 
486   // ---------------------------------------------------------------------------
487   // GC Support
488 
489   void IncrementalMarkingRecordWriteHelper(Register object, Register value,
490                                            Register address);
491 
492   enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
493 
494   // Record in the remembered set the fact that we have a pointer to new space
495   // at the address pointed to by the addr register.  Only works if addr is not
496   // in new space.
497   void RememberedSetHelper(Register object,  // Used for debug code.
498                            Register addr, Register scratch,
499                            SaveFPRegsMode save_fp,
500                            RememberedSetFinalAction and_then);
501 
502   void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
503                      Label* condition_met);
504 
505   // Check if object is in new space.  Jumps if the object is not in new space.
506   // The register scratch can be object itself, but scratch will be clobbered.
JumpIfNotInNewSpace(Register object,Register scratch,Label * branch)507   void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
508     InNewSpace(object, scratch, eq, branch);
509   }
510 
511   // Check if object is in new space.  Jumps if the object is in new space.
512   // The register scratch can be object itself, but it will be clobbered.
JumpIfInNewSpace(Register object,Register scratch,Label * branch)513   void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
514     InNewSpace(object, scratch, ne, branch);
515   }
516 
517   // Check if an object has a given incremental marking color.
518   void HasColor(Register object, Register scratch0, Register scratch1,
519                 Label* has_color, int first_bit, int second_bit);
520 
521   void JumpIfBlack(Register object, Register scratch0, Register scratch1,
522                    Label* on_black);
523 
524   // Checks the color of an object.  If the object is white we jump to the
525   // incremental marker.
526   void JumpIfWhite(Register value, Register scratch1, Register scratch2,
527                    Register scratch3, Label* value_is_white);
528 
529   // Notify the garbage collector that we wrote a pointer into an object.
530   // |object| is the object being stored into, |value| is the object being
531   // stored.  value and scratch registers are clobbered by the operation.
532   // The offset is the offset from the start of the object, not the offset from
533   // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
534   void RecordWriteField(
535       Register object, int offset, Register value, Register scratch,
536       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
537       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
538       SmiCheck smi_check = INLINE_SMI_CHECK,
539       PointersToHereCheck pointers_to_here_check_for_value =
540           kPointersToHereMaybeInteresting);
541 
542   // As above, but the offset has the tag presubtracted.  For use with
543   // MemOperand(reg, off).
544   inline void RecordWriteContextSlot(
545       Register context, int offset, Register value, Register scratch,
546       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
547       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
548       SmiCheck smi_check = INLINE_SMI_CHECK,
549       PointersToHereCheck pointers_to_here_check_for_value =
550           kPointersToHereMaybeInteresting) {
551     RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
552                      lr_status, save_fp, remembered_set_action, smi_check,
553                      pointers_to_here_check_for_value);
554   }
555 
556   // Notify the garbage collector that we wrote a code entry into a
557   // JSFunction. Only scratch is clobbered by the operation.
558   void RecordWriteCodeEntryField(Register js_function, Register code_entry,
559                                  Register scratch);
560 
561   void RecordWriteForMap(Register object, Register map, Register dst,
562                          LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
563 
564   // For a given |object| notify the garbage collector that the slot |address|
565   // has been written.  |value| is the object being stored. The value and
566   // address registers are clobbered by the operation.
567   void RecordWrite(
568       Register object, Register address, Register value,
569       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
570       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
571       SmiCheck smi_check = INLINE_SMI_CHECK,
572       PointersToHereCheck pointers_to_here_check_for_value =
573           kPointersToHereMaybeInteresting);
574 
push(Register src)575   void push(Register src) {
576     lay(sp, MemOperand(sp, -kPointerSize));
577     StoreP(src, MemOperand(sp));
578   }
579 
pop(Register dst)580   void pop(Register dst) {
581     LoadP(dst, MemOperand(sp));
582     la(sp, MemOperand(sp, kPointerSize));
583   }
584 
pop()585   void pop() { la(sp, MemOperand(sp, kPointerSize)); }
586 
Push(Register src)587   void Push(Register src) { push(src); }
588 
589   // Push a handle.
590   void Push(Handle<Object> handle);
Push(Smi * smi)591   void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
592 
593   // Push two registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)594   void Push(Register src1, Register src2) {
595     lay(sp, MemOperand(sp, -kPointerSize * 2));
596     StoreP(src1, MemOperand(sp, kPointerSize));
597     StoreP(src2, MemOperand(sp, 0));
598   }
599 
600   // Push three registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)601   void Push(Register src1, Register src2, Register src3) {
602     lay(sp, MemOperand(sp, -kPointerSize * 3));
603     StoreP(src1, MemOperand(sp, kPointerSize * 2));
604     StoreP(src2, MemOperand(sp, kPointerSize));
605     StoreP(src3, MemOperand(sp, 0));
606   }
607 
608   // Push four registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)609   void Push(Register src1, Register src2, Register src3, Register src4) {
610     lay(sp, MemOperand(sp, -kPointerSize * 4));
611     StoreP(src1, MemOperand(sp, kPointerSize * 3));
612     StoreP(src2, MemOperand(sp, kPointerSize * 2));
613     StoreP(src3, MemOperand(sp, kPointerSize));
614     StoreP(src4, MemOperand(sp, 0));
615   }
616 
617   // Push five registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)618   void Push(Register src1, Register src2, Register src3, Register src4,
619             Register src5) {
620     DCHECK(!src1.is(src2));
621     DCHECK(!src1.is(src3));
622     DCHECK(!src2.is(src3));
623     DCHECK(!src1.is(src4));
624     DCHECK(!src2.is(src4));
625     DCHECK(!src3.is(src4));
626     DCHECK(!src1.is(src5));
627     DCHECK(!src2.is(src5));
628     DCHECK(!src3.is(src5));
629     DCHECK(!src4.is(src5));
630 
631     lay(sp, MemOperand(sp, -kPointerSize * 5));
632     StoreP(src1, MemOperand(sp, kPointerSize * 4));
633     StoreP(src2, MemOperand(sp, kPointerSize * 3));
634     StoreP(src3, MemOperand(sp, kPointerSize * 2));
635     StoreP(src4, MemOperand(sp, kPointerSize));
636     StoreP(src5, MemOperand(sp, 0));
637   }
638 
Pop(Register dst)639   void Pop(Register dst) { pop(dst); }
640 
641   // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)642   void Pop(Register src1, Register src2) {
643     LoadP(src2, MemOperand(sp, 0));
644     LoadP(src1, MemOperand(sp, kPointerSize));
645     la(sp, MemOperand(sp, 2 * kPointerSize));
646   }
647 
648   // Pop three registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)649   void Pop(Register src1, Register src2, Register src3) {
650     LoadP(src3, MemOperand(sp, 0));
651     LoadP(src2, MemOperand(sp, kPointerSize));
652     LoadP(src1, MemOperand(sp, 2 * kPointerSize));
653     la(sp, MemOperand(sp, 3 * kPointerSize));
654   }
655 
656   // Pop four registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4)657   void Pop(Register src1, Register src2, Register src3, Register src4) {
658     LoadP(src4, MemOperand(sp, 0));
659     LoadP(src3, MemOperand(sp, kPointerSize));
660     LoadP(src2, MemOperand(sp, 2 * kPointerSize));
661     LoadP(src1, MemOperand(sp, 3 * kPointerSize));
662     la(sp, MemOperand(sp, 4 * kPointerSize));
663   }
664 
665   // Pop five registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4,Register src5)666   void Pop(Register src1, Register src2, Register src3, Register src4,
667            Register src5) {
668     LoadP(src5, MemOperand(sp, 0));
669     LoadP(src4, MemOperand(sp, kPointerSize));
670     LoadP(src3, MemOperand(sp, 2 * kPointerSize));
671     LoadP(src2, MemOperand(sp, 3 * kPointerSize));
672     LoadP(src1, MemOperand(sp, 4 * kPointerSize));
673     la(sp, MemOperand(sp, 5 * kPointerSize));
674   }
675 
676   // Push a fixed frame, consisting of lr, fp, constant pool.
677   void PushCommonFrame(Register marker_reg = no_reg);
678 
679   // Push a standard frame, consisting of lr, fp, constant pool,
680   // context and JS function
681   void PushStandardFrame(Register function_reg);
682 
683   void PopCommonFrame(Register marker_reg = no_reg);
684 
685   // Restore caller's frame pointer and return address prior to being
686   // overwritten by tail call stack preparation.
687   void RestoreFrameStateForTailCall();
688 
689   // Push and pop the registers that can hold pointers, as defined by the
690   // RegList constant kSafepointSavedRegisters.
691   void PushSafepointRegisters();
692   void PopSafepointRegisters();
693   // Store value in register src in the safepoint stack slot for
694   // register dst.
695   void StoreToSafepointRegisterSlot(Register src, Register dst);
696   // Load the value of the src register from its safepoint stack slot
697   // into register dst.
698   void LoadFromSafepointRegisterSlot(Register dst, Register src);
699 
700   // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
701   // from C.
702   // Does not handle errors.
703   void FlushICache(Register address, size_t size, Register scratch);
704 
705   // If the value is a NaN, canonicalize the value else, do nothing.
706   void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
CanonicalizeNaN(const DoubleRegister value)707   void CanonicalizeNaN(const DoubleRegister value) {
708     CanonicalizeNaN(value, value);
709   }
710 
711   // Converts the integer (untagged smi) in |src| to a double, storing
712   // the result to |dst|
713   void ConvertIntToDouble(Register src, DoubleRegister dst);
714 
715   // Converts the unsigned integer (untagged smi) in |src| to
716   // a double, storing the result to |dst|
717   void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
718 
719   // Converts the integer (untagged smi) in |src| to
720   // a float, storing the result in |dst|
721   void ConvertIntToFloat(Register src, DoubleRegister dst);
722 
723   // Converts the unsigned integer (untagged smi) in |src| to
724   // a float, storing the result in |dst|
725   void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
726 
727 #if V8_TARGET_ARCH_S390X
728   void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
729   void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
730   void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
731   void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
732 #endif
733 
734   void MovIntToFloat(DoubleRegister dst, Register src);
735   void MovFloatToInt(Register dst, DoubleRegister src);
736   void MovDoubleToInt64(Register dst, DoubleRegister src);
737   void MovInt64ToDouble(DoubleRegister dst, Register src);
738   // Converts the double_input to an integer.  Note that, upon return,
739   // the contents of double_dst will also hold the fixed point representation.
740   void ConvertFloat32ToInt64(const DoubleRegister double_input,
741 #if !V8_TARGET_ARCH_S390X
742                              const Register dst_hi,
743 #endif
744                              const Register dst,
745                              const DoubleRegister double_dst,
746                              FPRoundingMode rounding_mode = kRoundToZero);
747 
748   // Converts the double_input to an integer.  Note that, upon return,
749   // the contents of double_dst will also hold the fixed point representation.
750   void ConvertDoubleToInt64(const DoubleRegister double_input,
751 #if !V8_TARGET_ARCH_S390X
752                             const Register dst_hi,
753 #endif
754                             const Register dst, const DoubleRegister double_dst,
755                             FPRoundingMode rounding_mode = kRoundToZero);
756 
757   void ConvertFloat32ToInt32(const DoubleRegister double_input,
758                              const Register dst,
759                              const DoubleRegister double_dst,
760                              FPRoundingMode rounding_mode);
761   void ConvertFloat32ToUnsignedInt32(
762       const DoubleRegister double_input, const Register dst,
763       const DoubleRegister double_dst,
764       FPRoundingMode rounding_mode = kRoundToZero);
765 #if V8_TARGET_ARCH_S390X
766   // Converts the double_input to an unsigned integer.  Note that, upon return,
767   // the contents of double_dst will also hold the fixed point representation.
768   void ConvertDoubleToUnsignedInt64(
769       const DoubleRegister double_input, const Register dst,
770       const DoubleRegister double_dst,
771       FPRoundingMode rounding_mode = kRoundToZero);
772   void ConvertFloat32ToUnsignedInt64(
773       const DoubleRegister double_input, const Register dst,
774       const DoubleRegister double_dst,
775       FPRoundingMode rounding_mode = kRoundToZero);
776 #endif
777 
778 #if !V8_TARGET_ARCH_S390X
779   void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
780                      Register src_high, Register scratch, Register shift);
781   void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
782                      Register src_high, uint32_t shift);
783   void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
784                       Register src_high, Register scratch, Register shift);
785   void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
786                       Register src_high, uint32_t shift);
787   void ShiftRightArithPair(Register dst_low, Register dst_high,
788                            Register src_low, Register src_high,
789                            Register scratch, Register shift);
790   void ShiftRightArithPair(Register dst_low, Register dst_high,
791                            Register src_low, Register src_high, uint32_t shift);
792 #endif
793 
794   // Generates function and stub prologue code.
795   void StubPrologue(StackFrame::Type type, Register base = no_reg,
796                     int prologue_offset = 0);
797   void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
798 
799   // Enter exit frame.
800   // stack_space - extra stack space, used for parameters before call to C.
801   // At least one slot (for the return address) should be provided.
802   void EnterExitFrame(bool save_doubles, int stack_space = 1,
803                       StackFrame::Type frame_type = StackFrame::EXIT);
804 
805   // Leave the current exit frame. Expects the return value in r0.
806   // Expect the number of values, pushed prior to the exit frame, to
807   // remove in a register (or no_reg, if there is nothing to remove).
808   void LeaveExitFrame(bool save_doubles, Register argument_count,
809                       bool restore_context,
810                       bool argument_count_is_length = false);
811 
812   // Get the actual activation frame alignment for target environment.
813   static int ActivationFrameAlignment();
814 
815   void LoadContext(Register dst, int context_chain_length);
816 
817   // Load the global object from the current context.
LoadGlobalObject(Register dst)818   void LoadGlobalObject(Register dst) {
819     LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
820   }
821 
822   // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)823   void LoadGlobalProxy(Register dst) {
824     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
825   }
826 
827   void LoadNativeContextSlot(int index, Register dst);
828 
829   // Load the initial map from the global function. The registers
830   // function and map can be the same, function is then overwritten.
831   void LoadGlobalFunctionInitialMap(Register function, Register map,
832                                     Register scratch);
833 
InitializeRootRegister()834   void InitializeRootRegister() {
835     ExternalReference roots_array_start =
836         ExternalReference::roots_array_start(isolate());
837     mov(kRootRegister, Operand(roots_array_start));
838   }
839 
840   // ----------------------------------------------------------------
841   // new S390 macro-assembler interfaces that are slightly higher level
842   // than assembler-s390 and may generate variable length sequences
843 
844   // load a literal signed int value <value> to GPR <dst>
845   void LoadIntLiteral(Register dst, int value);
846 
847   // load an SMI value <value> to GPR <dst>
848   void LoadSmiLiteral(Register dst, Smi* smi);
849 
850   // load a literal double value <value> to FPR <result>
851   void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
852   void LoadDoubleLiteral(DoubleRegister result, uint64_t value,
853                          Register scratch);
854 
855   void LoadFloat32Literal(DoubleRegister result, float value, Register scratch);
856 
857   void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg);
858 
859   void LoadHalfWordP(Register dst, const MemOperand& mem,
860                      Register scratch = no_reg);
861 
862   void StoreHalfWord(Register src, const MemOperand& mem,
863                      Register scratch = r0);
864   void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
865 
866   void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
867                           Register scratch = no_reg);
868   void StoreRepresentation(Register src, const MemOperand& mem,
869                            Representation r, Register scratch = no_reg);
870 
871   void AddSmiLiteral(Register dst, Register src, Smi* smi,
872                      Register scratch = r0);
873   void SubSmiLiteral(Register dst, Register src, Smi* smi,
874                      Register scratch = r0);
875   void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
876   void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
877   void AndSmiLiteral(Register dst, Register src, Smi* smi);
878 
879   // Set new rounding mode RN to FPSCR
880   void SetRoundingMode(FPRoundingMode RN);
881 
882   // reset rounding mode to default (kRoundToNearest)
883   void ResetRoundingMode();
884 
885   // These exist to provide portability between 32 and 64bit
886   void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
887   void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
888   void StoreP(const MemOperand& mem, const Operand& opnd,
889               Register scratch = no_reg);
890   void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
891   void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
892   void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
893   void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);
894 
895   // Cleanse pointer address on 31bit by zero out top  bit.
896   // This is a NOP on 64-bit.
CleanseP(Register src)897   void CleanseP(Register src) {
898 #if (V8_HOST_ARCH_S390 && !(V8_TARGET_ARCH_S390X))
899     nilh(src, Operand(0x7FFF));
900 #endif
901   }
902 
903   // ---------------------------------------------------------------------------
904   // JavaScript invokes
905 
906   // Set up call kind marking in ecx. The method takes ecx as an
907   // explicit first parameter to make the code more readable at the
908   // call sites.
909   // void SetCallKind(Register dst, CallKind kind);
910 
911   // Removes current frame and its arguments from the stack preserving
912   // the arguments and a return address pushed to the stack for the next call.
913   // Both |callee_args_count| and |caller_args_count_reg| do not include
914   // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
915   // is trashed.
916   void PrepareForTailCall(const ParameterCount& callee_args_count,
917                           Register caller_args_count_reg, Register scratch0,
918                           Register scratch1);
919 
920   // Invoke the JavaScript function code by either calling or jumping.
921   void InvokeFunctionCode(Register function, Register new_target,
922                           const ParameterCount& expected,
923                           const ParameterCount& actual, InvokeFlag flag,
924                           const CallWrapper& call_wrapper);
925 
926   // On function call, call into the debugger if necessary.
927   void CheckDebugHook(Register fun, Register new_target,
928                       const ParameterCount& expected,
929                       const ParameterCount& actual);
930 
931   // Invoke the JavaScript function in the given register. Changes the
932   // current context to the context in the function before invoking.
933   void InvokeFunction(Register function, Register new_target,
934                       const ParameterCount& actual, InvokeFlag flag,
935                       const CallWrapper& call_wrapper);
936 
937   void InvokeFunction(Register function, const ParameterCount& expected,
938                       const ParameterCount& actual, InvokeFlag flag,
939                       const CallWrapper& call_wrapper);
940 
941   void InvokeFunction(Handle<JSFunction> function,
942                       const ParameterCount& expected,
943                       const ParameterCount& actual, InvokeFlag flag,
944                       const CallWrapper& call_wrapper);
945 
946   void IsObjectJSStringType(Register object, Register scratch, Label* fail);
947 
948   void IsObjectNameType(Register object, Register scratch, Label* fail);
949 
950   // Frame restart support
951   void MaybeDropFrames();
952 
953   // Exception handling
954 
955   // Push a new stack handler and link into stack handler chain.
956   void PushStackHandler();
957 
958   // Unlink the stack handler on top of the stack from the stack handler chain.
959   // Must preserve the result register.
960   void PopStackHandler();
961 
962   // ---------------------------------------------------------------------------
963   // Inline caching support
964 
965   void GetNumberHash(Register t0, Register scratch);
966 
MarkCode(NopMarkerTypes type)967   inline void MarkCode(NopMarkerTypes type) { nop(type); }
968 
969   // Check if the given instruction is a 'type' marker.
970   // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
971   // These instructions are generated to mark special location in the code,
972   // like some special IC code.
IsMarkedCode(Instr instr,int type)973   static inline bool IsMarkedCode(Instr instr, int type) {
974     DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
975     return IsNop(instr, type);
976   }
977 
GetCodeMarker(Instr instr)978   static inline int GetCodeMarker(Instr instr) {
979     int dst_reg_offset = 12;
980     int dst_mask = 0xf << dst_reg_offset;
981     int src_mask = 0xf;
982     int dst_reg = (instr & dst_mask) >> dst_reg_offset;
983     int src_reg = instr & src_mask;
984     uint32_t non_register_mask = ~(dst_mask | src_mask);
985     uint32_t mov_mask = al | 13 << 21;
986 
987     // Return <n> if we have a mov rn rn, else return -1.
988     int type = ((instr & non_register_mask) == mov_mask) &&
989                        (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
990                        (dst_reg < LAST_CODE_MARKER)
991                    ? src_reg
992                    : -1;
993     DCHECK((type == -1) ||
994            ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
995     return type;
996   }
997 
998   // ---------------------------------------------------------------------------
999   // Allocation support
1000 
1001   // Allocate an object in new space or old pointer space. The object_size is
1002   // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
1003   // is passed. If the space is exhausted control continues at the gc_required
1004   // label. The allocated object is returned in result. If the flag
1005   // tag_allocated_object is true the result is tagged as as a heap object.
1006   // All registers are clobbered also when control continues at the gc_required
1007   // label.
1008   void Allocate(int object_size, Register result, Register scratch1,
1009                 Register scratch2, Label* gc_required, AllocationFlags flags);
1010 
1011   void Allocate(Register object_size, Register result, Register result_end,
1012                 Register scratch, Label* gc_required, AllocationFlags flags);
1013 
1014   // FastAllocate is right now only used for folded allocations. It just
1015   // increments the top pointer without checking against limit. This can only
1016   // be done if it was proved earlier that the allocation will succeed.
1017   void FastAllocate(int object_size, Register result, Register scratch1,
1018                     Register scratch2, AllocationFlags flags);
1019 
1020   void FastAllocate(Register object_size, Register result, Register result_end,
1021                     Register scratch, AllocationFlags flags);
1022 
1023   // Allocates a heap number or jumps to the gc_required label if the young
1024   // space is full and a scavenge is needed. All registers are clobbered also
1025   // when control continues at the gc_required label.
1026   void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
1027                           Register heap_number_map, Label* gc_required,
1028                           MutableMode mode = IMMUTABLE);
1029   void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
1030                                    Register scratch1, Register scratch2,
1031                                    Register heap_number_map,
1032                                    Label* gc_required);
1033 
1034   // Allocate and initialize a JSValue wrapper with the specified {constructor}
1035   // and {value}.
1036   void AllocateJSValue(Register result, Register constructor, Register value,
1037                        Register scratch1, Register scratch2,
1038                        Label* gc_required);
1039 
1040   // Initialize fields with filler values.  |count| fields starting at
1041   // |current_address| are overwritten with the value in |filler|.  At the end
1042   // the loop, |current_address| points at the next uninitialized field.
1043   // |count| is assumed to be non-zero.
1044   void InitializeNFieldsWithFiller(Register current_address, Register count,
1045                                    Register filler);
1046 
1047   // Initialize fields with filler values.  Fields starting at |current_address|
1048   // not including |end_address| are overwritten with the value in |filler|.  At
1049   // the end the loop, |current_address| takes the value of |end_address|.
1050   void InitializeFieldsWithFiller(Register current_address,
1051                                   Register end_address, Register filler);
1052 
1053   // ---------------------------------------------------------------------------
1054   // Support functions.
1055 
1056   // Machine code version of Map::GetConstructor().
1057   // |temp| holds |result|'s map when done, and |temp2| its instance type.
1058   void GetMapConstructor(Register result, Register map, Register temp,
1059                          Register temp2);
1060 
1061   // Compare object type for heap object.  heap_object contains a non-Smi
1062   // whose object type should be compared with the given type.  This both
1063   // sets the flags and leaves the object type in the type_reg register.
1064   // It leaves the map in the map register (unless the type_reg and map register
1065   // are the same register).  It leaves the heap object in the heap_object
1066   // register unless the heap_object register is the same register as one of the
1067   // other registers.
1068   // Type_reg can be no_reg. In that case ip is used.
1069   void CompareObjectType(Register heap_object, Register map, Register type_reg,
1070                          InstanceType type);
1071 
1072   // Compare instance type in a map.  map contains a valid map object whose
1073   // object type should be compared with the given type.  This both
1074   // sets the flags and leaves the object type in the type_reg register.
1075   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
1076 
1077   // Compare an object's map with the specified map and its transitioned
1078   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
1079   // set with result of map compare. If multiple map compares are required, the
1080   // compare sequences branches to early_success.
1081   void CompareMap(Register obj, Register scratch, Handle<Map> map,
1082                   Label* early_success);
1083 
1084   // As above, but the map of the object is already loaded into the register
1085   // which is preserved by the code generated.
1086   void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
1087 
1088   // Check if the map of an object is equal to a specified map and branch to
1089   // label if not. Skip the smi check if not required (object is known to be a
1090   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1091   // against maps that are ElementsKind transition maps of the specified map.
1092   void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
1093                 SmiCheckType smi_check_type);
1094 
1095   void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
1096                 Label* fail, SmiCheckType smi_check_type);
1097 
1098   // Check if the map of an object is equal to a specified weak map and branch
1099   // to a specified target if equal. Skip the smi check if not required
1100   // (object is known to be a heap object)
1101   void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1102                        Handle<WeakCell> cell, Handle<Code> success,
1103                        SmiCheckType smi_check_type);
1104 
1105   // Compare the given value and the value of weak cell.
1106   void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
1107                     CRegister cr = cr7);
1108 
1109   void GetWeakValue(Register value, Handle<WeakCell> cell);
1110 
1111   // Load the value of the weak cell in the value register. Branch to the given
1112   // miss label if the weak cell was cleared.
1113   void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1114 
1115   // Compare the object in a register to a value from the root list.
1116   // Uses the ip register as scratch.
1117   void CompareRoot(Register obj, Heap::RootListIndex index);
PushRoot(Heap::RootListIndex index)1118   void PushRoot(Heap::RootListIndex index) {
1119     LoadRoot(r0, index);
1120     Push(r0);
1121   }
1122 
1123   // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,Heap::RootListIndex index,Label * if_equal)1124   void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
1125     CompareRoot(with, index);
1126     beq(if_equal);
1127   }
1128 
1129   // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,Heap::RootListIndex index,Label * if_not_equal)1130   void JumpIfNotRoot(Register with, Heap::RootListIndex index,
1131                      Label* if_not_equal) {
1132     CompareRoot(with, index);
1133     bne(if_not_equal);
1134   }
1135 
1136   // Load and check the instance type of an object for being a string.
1137   // Loads the type into the second argument register.
1138   // Returns a condition that will be enabled if the object was a string.
IsObjectStringType(Register obj,Register type)1139   Condition IsObjectStringType(Register obj, Register type) {
1140     LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1141     LoadlB(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1142     mov(r0, Operand(kIsNotStringMask));
1143     AndP(r0, type);
1144     DCHECK_EQ(0u, kStringTag);
1145     return eq;
1146   }
1147 
1148   // Get the number of least significant bits from a register
1149   void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1150   void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1151 
1152   // Load the value of a smi object into a FP double register. The register
1153   // scratch1 can be the same register as smi in which case smi will hold the
1154   // untagged value afterwards.
1155   void SmiToDouble(DoubleRegister value, Register smi);
1156 
1157   // Check if a double can be exactly represented as a signed 32-bit integer.
1158   // CR_EQ in cr7 is set if true.
1159   void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
1160                          Register scratch2, DoubleRegister double_scratch);
1161 
1162   // Check if a double is equal to -0.0.
1163   // CR_EQ in cr7 holds the result.
1164   void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
1165                              Register scratch2);
1166 
1167   // Check the sign of a double.
1168   // CR_LT in cr7 holds the result.
1169   void TestDoubleSign(DoubleRegister input, Register scratch);
1170   void TestHeapNumberSign(Register input, Register scratch);
1171 
1172   // Try to convert a double to a signed 32-bit integer.
1173   // CR_EQ in cr7 is set and result assigned if the conversion is exact.
1174   void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
1175                              Register scratch, DoubleRegister double_scratch);
1176 
1177   // Floor a double and writes the value to the result register.
1178   // Go to exact if the conversion is exact (to be able to test -0),
1179   // fall through calling code if an overflow occurred, else go to done.
1180   // In return, input_high is loaded with high bits of input.
1181   void TryInt32Floor(Register result, DoubleRegister double_input,
1182                      Register input_high, Register scratch,
1183                      DoubleRegister double_scratch, Label* done, Label* exact);
1184 
1185   // Performs a truncating conversion of a floating point number as used by
1186   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
1187   // succeeds, otherwise falls through if result is saturated. On return
1188   // 'result' either holds answer, or is clobbered on fall through.
1189   //
1190   // Only public for the test code in test-code-stubs-arm.cc.
1191   void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
1192                                   Label* done);
1193 
1194   // Performs a truncating conversion of a floating point number as used by
1195   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1196   // Exits with 'result' holding the answer.
1197   void TruncateDoubleToI(Register result, DoubleRegister double_input);
1198 
1199   // Performs a truncating conversion of a heap number as used by
1200   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1201   // must be different registers.  Exits with 'result' holding the answer.
1202   void TruncateHeapNumberToI(Register result, Register object);
1203 
1204   // Converts the smi or heap number in object to an int32 using the rules
1205   // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1206   // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1207   // different registers.
1208   void TruncateNumberToI(Register object, Register result,
1209                          Register heap_number_map, Register scratch1,
1210                          Label* not_int32);
1211 
1212   // ---------------------------------------------------------------------------
1213   // Runtime calls
1214 
1215   // Call a code stub.
1216   void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
1217                 Condition cond = al);
1218 
1219   // Call a code stub.
1220   void TailCallStub(CodeStub* stub, Condition cond = al);
1221 
1222   // Call a runtime routine.
1223   void CallRuntime(const Runtime::Function* f, int num_arguments,
1224                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
CallRuntimeSaveDoubles(Runtime::FunctionId fid)1225   void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
1226     const Runtime::Function* function = Runtime::FunctionForId(fid);
1227     CallRuntime(function, function->nargs, kSaveFPRegs);
1228   }
1229 
1230   // Convenience function: Same as above, but takes the fid instead.
1231   void CallRuntime(Runtime::FunctionId fid,
1232                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1233     const Runtime::Function* function = Runtime::FunctionForId(fid);
1234     CallRuntime(function, function->nargs, save_doubles);
1235   }
1236 
1237   // Convenience function: Same as above, but takes the fid instead.
1238   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1239                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1240     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1241   }
1242 
1243   // Convenience function: call an external reference.
1244   void CallExternalReference(const ExternalReference& ext, int num_arguments);
1245 
1246   // Convenience function: tail call a runtime routine (jump).
1247   void TailCallRuntime(Runtime::FunctionId fid);
1248 
1249   int CalculateStackPassedWords(int num_reg_arguments,
1250                                 int num_double_arguments);
1251 
1252   // Before calling a C-function from generated code, align arguments on stack.
1253   // After aligning the frame, non-register arguments must be stored in
1254   // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1255   // are word sized. If double arguments are used, this function assumes that
1256   // all double arguments are stored before core registers; otherwise the
1257   // correct alignment of the double values is not guaranteed.
1258   // Some compilers/platforms require the stack to be aligned when calling
1259   // C++ code.
1260   // Needs a scratch register to do some arithmetic. This register will be
1261   // trashed.
1262   void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
1263                             Register scratch);
1264   void PrepareCallCFunction(int num_reg_arguments, Register scratch);
1265 
1266   // There are two ways of passing double arguments on ARM, depending on
1267   // whether soft or hard floating point ABI is used. These functions
1268   // abstract parameter passing for the three different ways we call
1269   // C functions from generated code.
1270   void MovToFloatParameter(DoubleRegister src);
1271   void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1272   void MovToFloatResult(DoubleRegister src);
1273 
1274   // Calls a C function and cleans up the space for arguments allocated
1275   // by PrepareCallCFunction. The called function is not allowed to trigger a
1276   // garbage collection, since that might move the code and invalidate the
1277   // return address (unless this is somehow accounted for by the called
1278   // function).
1279   void CallCFunction(ExternalReference function, int num_arguments);
1280   void CallCFunction(Register function, int num_arguments);
1281   void CallCFunction(ExternalReference function, int num_reg_arguments,
1282                      int num_double_arguments);
1283   void CallCFunction(Register function, int num_reg_arguments,
1284                      int num_double_arguments);
1285 
1286   void MovFromFloatParameter(DoubleRegister dst);
1287   void MovFromFloatResult(DoubleRegister dst);
1288 
1289   // Jump to a runtime routine.
1290   void JumpToExternalReference(const ExternalReference& builtin,
1291                                bool builtin_exit_frame = false);
1292 
CodeObject()1293   Handle<Object> CodeObject() {
1294     DCHECK(!code_object_.is_null());
1295     return code_object_;
1296   }
1297 
1298   // Emit code for a truncating division by a constant. The dividend register is
1299   // unchanged and ip gets clobbered. Dividend and result must be different.
1300   void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1301 
1302   // ---------------------------------------------------------------------------
1303   // StatsCounter support
1304 
1305   void SetCounter(StatsCounter* counter, int value, Register scratch1,
1306                   Register scratch2);
1307   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1308                         Register scratch2);
1309   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1310                         Register scratch2);
1311 
1312   // ---------------------------------------------------------------------------
1313   // Debugging
1314 
1315   // Calls Abort(msg) if the condition cond is not satisfied.
1316   // Use --debug_code to enable.
1317   void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
1318   void AssertFastElements(Register elements);
1319 
1320   // Like Assert(), but always enabled.
1321   void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
1322 
1323   // Print a message to stdout and abort execution.
1324   void Abort(BailoutReason reason);
1325 
1326   // Verify restrictions about code generated in stubs.
set_generating_stub(bool value)1327   void set_generating_stub(bool value) { generating_stub_ = value; }
generating_stub()1328   bool generating_stub() { return generating_stub_; }
set_has_frame(bool value)1329   void set_has_frame(bool value) { has_frame_ = value; }
has_frame()1330   bool has_frame() { return has_frame_; }
1331   inline bool AllowThisStubCall(CodeStub* stub);
1332 
1333   // ---------------------------------------------------------------------------
1334   // Number utilities
1335 
1336   // Check whether the value of reg is a power of two and not zero. If not
1337   // control continues at the label not_power_of_two. If reg is a power of two
1338   // the register scratch contains the value of (reg - 1) when control falls
1339   // through.
1340   void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
1341                                  Label* not_power_of_two_or_zero);
1342   // Check whether the value of reg is a power of two and not zero.
1343   // Control falls through if it is, with scratch containing the mask
1344   // value (reg - 1).
1345   // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1346   // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1347   // strictly positive but not a power of two.
1348   void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
1349                                        Label* zero_and_neg,
1350                                        Label* not_power_of_two);
1351 
1352   // ---------------------------------------------------------------------------
1353   // Bit testing/extraction
1354   //
1355   // Bit numbering is such that the least significant bit is bit 0
1356   // (for consistency between 32/64-bit).
1357 
1358   // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
1359   // and place them into the least significant bits of dst.
ExtractBitRange(Register dst,Register src,int rangeStart,int rangeEnd)1360   inline void ExtractBitRange(Register dst, Register src, int rangeStart,
1361                               int rangeEnd) {
1362     DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
1363 
1364     // Try to use RISBG if possible.
1365     if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1366       int shiftAmount = (64 - rangeEnd) % 64;  // Convert to shift left.
1367       int endBit = 63;  // End is always LSB after shifting.
1368       int startBit = 63 - rangeStart + rangeEnd;
1369       risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
1370             true);
1371     } else {
1372       if (rangeEnd > 0)  // Don't need to shift if rangeEnd is zero.
1373         ShiftRightP(dst, src, Operand(rangeEnd));
1374       else if (!dst.is(src))  // If we didn't shift, we might need to copy
1375         LoadRR(dst, src);
1376       int width = rangeStart - rangeEnd + 1;
1377 #if V8_TARGET_ARCH_S390X
1378       uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
1379       nihf(dst, Operand(mask >> 32));
1380       nilf(dst, Operand(mask & 0xFFFFFFFF));
1381       ltgr(dst, dst);
1382 #else
1383       uint32_t mask = (1 << width) - 1;
1384       AndP(dst, Operand(mask));
1385 #endif
1386     }
1387   }
1388 
ExtractBit(Register dst,Register src,uint32_t bitNumber)1389   inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
1390     ExtractBitRange(dst, src, bitNumber, bitNumber);
1391   }
1392 
1393   // Extract consecutive bits (defined by mask) from src and place them
1394   // into the least significant bits of dst.
1395   inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
1396                              RCBit rc = LeaveRC) {
1397     int start = kBitsPerPointer - 1;
1398     int end;
1399     uintptr_t bit = (1L << start);
1400 
1401     while (bit && (mask & bit) == 0) {
1402       start--;
1403       bit >>= 1;
1404     }
1405     end = start;
1406     bit >>= 1;
1407 
1408     while (bit && (mask & bit)) {
1409       end--;
1410       bit >>= 1;
1411     }
1412 
1413     // 1-bits in mask must be contiguous
1414     DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1415 
1416     ExtractBitRange(dst, src, start, end);
1417   }
1418 
1419   // Test single bit in value.
1420   inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
1421     ExtractBitRange(scratch, value, bitNumber, bitNumber);
1422   }
1423 
1424   // Test consecutive bit range in value.  Range is defined by
1425   // rangeStart - rangeEnd.
1426   inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
1427                            Register scratch = r0) {
1428     ExtractBitRange(scratch, value, rangeStart, rangeEnd);
1429   }
1430 
1431   // Test consecutive bit range in value.  Range is defined by mask.
1432   inline void TestBitMask(Register value, uintptr_t mask,
1433                           Register scratch = r0) {
1434     ExtractBitMask(scratch, value, mask, SetRC);
1435   }
1436 
1437   // ---------------------------------------------------------------------------
1438   // Smi utilities
1439 
1440   // Shift left by kSmiShift
SmiTag(Register reg)1441   void SmiTag(Register reg) { SmiTag(reg, reg); }
SmiTag(Register dst,Register src)1442   void SmiTag(Register dst, Register src) {
1443     ShiftLeftP(dst, src, Operand(kSmiShift));
1444   }
1445 
1446 #if !V8_TARGET_ARCH_S390X
1447   // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1448   void SmiTagCheckOverflow(Register reg, Register overflow);
1449   void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1450 
JumpIfNotSmiCandidate(Register value,Register scratch,Label * not_smi_label)1451   inline void JumpIfNotSmiCandidate(Register value, Register scratch,
1452                                     Label* not_smi_label) {
1453     // High bits must be identical to fit into an Smi
1454     STATIC_ASSERT(kSmiShift == 1);
1455     AddP(scratch, value, Operand(0x40000000u));
1456     CmpP(scratch, Operand::Zero());
1457     blt(not_smi_label);
1458   }
1459 #endif
TestUnsignedSmiCandidate(Register value,Register scratch)1460   inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
1461     // The test is different for unsigned int values. Since we need
1462     // the value to be in the range of a positive smi, we can't
1463     // handle any of the high bits being set in the value.
1464     TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
1465                  scratch);
1466   }
JumpIfNotUnsignedSmiCandidate(Register value,Register scratch,Label * not_smi_label)1467   inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
1468                                             Label* not_smi_label) {
1469     TestUnsignedSmiCandidate(value, scratch);
1470     bne(not_smi_label /*, cr0*/);
1471   }
1472 
SmiUntag(Register reg)1473   void SmiUntag(Register reg) { SmiUntag(reg, reg); }
1474 
SmiUntag(Register dst,Register src)1475   void SmiUntag(Register dst, Register src) {
1476     ShiftRightArithP(dst, src, Operand(kSmiShift));
1477   }
1478 
SmiToPtrArrayOffset(Register dst,Register src)1479   void SmiToPtrArrayOffset(Register dst, Register src) {
1480 #if V8_TARGET_ARCH_S390X
1481     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
1482     ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
1483 #else
1484     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
1485     ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
1486 #endif
1487   }
1488 
SmiToByteArrayOffset(Register dst,Register src)1489   void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
1490 
SmiToShortArrayOffset(Register dst,Register src)1491   void SmiToShortArrayOffset(Register dst, Register src) {
1492 #if V8_TARGET_ARCH_S390X
1493     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
1494     ShiftRightArithP(dst, src, Operand(kSmiShift - 1));
1495 #else
1496     STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
1497     if (!dst.is(src)) {
1498       LoadRR(dst, src);
1499     }
1500 #endif
1501   }
1502 
SmiToIntArrayOffset(Register dst,Register src)1503   void SmiToIntArrayOffset(Register dst, Register src) {
1504 #if V8_TARGET_ARCH_S390X
1505     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
1506     ShiftRightArithP(dst, src, Operand(kSmiShift - 2));
1507 #else
1508     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
1509     ShiftLeftP(dst, src, Operand(2 - kSmiShift));
1510 #endif
1511   }
1512 
1513 #define SmiToFloatArrayOffset SmiToIntArrayOffset
1514 
SmiToDoubleArrayOffset(Register dst,Register src)1515   void SmiToDoubleArrayOffset(Register dst, Register src) {
1516 #if V8_TARGET_ARCH_S390X
1517     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
1518     ShiftRightArithP(dst, src, Operand(kSmiShift - kDoubleSizeLog2));
1519 #else
1520     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
1521     ShiftLeftP(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
1522 #endif
1523   }
1524 
SmiToArrayOffset(Register dst,Register src,int elementSizeLog2)1525   void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
1526     if (kSmiShift < elementSizeLog2) {
1527       ShiftLeftP(dst, src, Operand(elementSizeLog2 - kSmiShift));
1528     } else if (kSmiShift > elementSizeLog2) {
1529       ShiftRightArithP(dst, src, Operand(kSmiShift - elementSizeLog2));
1530     } else if (!dst.is(src)) {
1531       LoadRR(dst, src);
1532     }
1533   }
1534 
IndexToArrayOffset(Register dst,Register src,int elementSizeLog2,bool isSmi,bool keyMaybeNegative)1535   void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
1536                           bool isSmi, bool keyMaybeNegative) {
1537     if (isSmi) {
1538       SmiToArrayOffset(dst, src, elementSizeLog2);
1539     } else if (keyMaybeNegative ||
1540           !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1541 #if V8_TARGET_ARCH_S390X
1542       // If array access is dehoisted, the key, being an int32, can contain
1543       // a negative value, as needs to be sign-extended to 64-bit for
1544       // memory access.
1545       //
1546       // src (key) is a 32-bit integer.  Sign extension ensures
1547       // upper 32-bit does not contain garbage before being used to
1548       // reference memory.
1549       lgfr(src, src);
1550 #endif
1551       ShiftLeftP(dst, src, Operand(elementSizeLog2));
1552     } else {
1553       // Small optimization to reduce pathlength.  After Bounds Check,
1554       // the key is guaranteed to be non-negative.  Leverage RISBG,
1555       // which also performs zero-extension.
1556       risbg(dst, src, Operand(32 - elementSizeLog2),
1557             Operand(63 - elementSizeLog2), Operand(elementSizeLog2),
1558             true);
1559     }
1560   }
1561 
1562   // Untag the source value into destination and jump if source is a smi.
1563   // Souce and destination can be the same register.
1564   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1565 
TestIfSmi(Register value)1566   inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
1567 
TestIfSmi(MemOperand value)1568   inline void TestIfSmi(MemOperand value) {
1569     if (is_uint12(value.offset())) {
1570       tm(value, Operand(1));
1571     } else if (is_int20(value.offset())) {
1572       tmy(value, Operand(1));
1573     } else {
1574       LoadB(r0, value);
1575       tmll(r0, Operand(1));
1576     }
1577   }
1578 
TestIfPositiveSmi(Register value,Register scratch)1579   inline void TestIfPositiveSmi(Register value, Register scratch) {
1580     STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
1581                   (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
1582     mov(scratch, Operand(kIntptrSignBit | kSmiTagMask));
1583     AndP(scratch, value);
1584   }
1585 
1586   // Jump the register contains a smi.
JumpIfSmi(Register value,Label * smi_label)1587   inline void JumpIfSmi(Register value, Label* smi_label) {
1588     TestIfSmi(value);
1589     beq(smi_label /*, cr0*/);  // branch if SMI
1590   }
1591   // Jump if either of the registers contain a non-smi.
JumpIfNotSmi(Register value,Label * not_smi_label)1592   inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1593     TestIfSmi(value);
1594     bne(not_smi_label /*, cr0*/);
1595   }
1596   // Jump if either of the registers contain a non-smi.
1597   void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1598   // Jump if either of the registers contain a smi.
1599   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1600 
1601   // Abort execution if argument is a number, enabled via --debug-code.
1602   void AssertNotNumber(Register object);
1603 
1604   // Abort execution if argument is a smi, enabled via --debug-code.
1605   void AssertNotSmi(Register object);
1606   void AssertSmi(Register object);
1607 
1608 #if V8_TARGET_ARCH_S390X
TestIfInt32(Register value,Register scratch)1609   inline void TestIfInt32(Register value, Register scratch) {
1610     // High bits must be identical to fit into an 32-bit integer
1611     lgfr(scratch, value);
1612     CmpP(scratch, value);
1613   }
1614 #else
TestIfInt32(Register hi_word,Register lo_word,Register scratch)1615   inline void TestIfInt32(Register hi_word, Register lo_word,
1616                           Register scratch) {
1617     // High bits must be identical to fit into an 32-bit integer
1618     ShiftRightArith(scratch, lo_word, Operand(31));
1619     CmpP(scratch, hi_word);
1620   }
1621 #endif
1622 
1623 #if V8_TARGET_ARCH_S390X
1624   // Ensure it is permissable to read/write int value directly from
1625   // upper half of the smi.
1626   STATIC_ASSERT(kSmiTag == 0);
1627   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
1628 #endif
1629 #if V8_TARGET_LITTLE_ENDIAN
1630 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
1631 #else
1632 #define SmiWordOffset(offset) offset
1633 #endif
1634 
1635   // Abort execution if argument is not a string, enabled via --debug-code.
1636   void AssertString(Register object);
1637 
1638   // Abort execution if argument is not a name, enabled via --debug-code.
1639   void AssertName(Register object);
1640 
1641   void AssertFunction(Register object);
1642 
1643   // Abort execution if argument is not a JSBoundFunction,
1644   // enabled via --debug-code.
1645   void AssertBoundFunction(Register object);
1646 
1647   // Abort execution if argument is not a JSGeneratorObject,
1648   // enabled via --debug-code.
1649   void AssertGeneratorObject(Register object);
1650 
1651   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
1652   void AssertReceiver(Register object);
1653 
1654   // Abort execution if argument is not undefined or an AllocationSite, enabled
1655   // via --debug-code.
1656   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1657 
1658   // Abort execution if reg is not the root value with the given index,
1659   // enabled via --debug-code.
1660   void AssertIsRoot(Register reg, Heap::RootListIndex index);
1661 
1662   // ---------------------------------------------------------------------------
1663   // HeapNumber utilities
1664 
1665   void JumpIfNotHeapNumber(Register object, Register heap_number_map,
1666                            Register scratch, Label* on_not_heap_number);
1667 
1668   // ---------------------------------------------------------------------------
1669   // String utilities
1670 
1671   // Checks if both objects are sequential one-byte strings and jumps to label
1672   // if either is not. Assumes that neither object is a smi.
1673   void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1674                                                     Register object2,
1675                                                     Register scratch1,
1676                                                     Register scratch2,
1677                                                     Label* failure);
1678 
1679   // Checks if both objects are sequential one-byte strings and jumps to label
1680   // if either is not.
1681   void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1682                                              Register scratch1,
1683                                              Register scratch2,
1684                                              Label* not_flat_one_byte_strings);
1685 
1686   // Checks if both instance types are sequential one-byte strings and jumps to
1687   // label if either is not.
1688   void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1689       Register first_object_instance_type, Register second_object_instance_type,
1690       Register scratch1, Register scratch2, Label* failure);
1691 
1692   void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1693 
1694   void EmitSeqStringSetCharCheck(Register string, Register index,
1695                                  Register value, uint32_t encoding_mask);
1696 
1697   // ---------------------------------------------------------------------------
1698   // Patching helpers.
1699 
1700   void ClampUint8(Register output_reg, Register input_reg);
1701 
1702   // Saturate a value into 8-bit unsigned integer
1703   //   if input_value < 0, output_value is 0
1704   //   if input_value > 255, output_value is 255
1705   //   otherwise output_value is the (int)input_value (round to nearest)
1706   void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
1707                           DoubleRegister temp_double_reg);
1708 
1709   void LoadInstanceDescriptors(Register map, Register descriptors);
1710   void EnumLength(Register dst, Register map);
1711   void NumberOfOwnDescriptors(Register dst, Register map);
1712   void LoadAccessor(Register dst, Register holder, int accessor_index,
1713                     AccessorComponent accessor);
1714 
1715   template <typename Field>
DecodeField(Register dst,Register src)1716   void DecodeField(Register dst, Register src) {
1717     ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
1718   }
1719 
1720   template <typename Field>
DecodeField(Register reg)1721   void DecodeField(Register reg) {
1722     DecodeField<Field>(reg, reg);
1723   }
1724 
1725   template <typename Field>
DecodeFieldToSmi(Register dst,Register src)1726   void DecodeFieldToSmi(Register dst, Register src) {
1727     // TODO(joransiu): Optimize into single instruction
1728     DecodeField<Field>(dst, src);
1729     SmiTag(dst);
1730   }
1731 
1732   template <typename Field>
DecodeFieldToSmi(Register reg)1733   void DecodeFieldToSmi(Register reg) {
1734     DecodeFieldToSmi<Field>(reg, reg);
1735   }
1736 
1737   // Load the type feedback vector from a JavaScript frame.
1738   void EmitLoadFeedbackVector(Register vector);
1739 
1740   // Activation support.
1741   void EnterFrame(StackFrame::Type type,
1742                   bool load_constant_pool_pointer_reg = false);
1743   // Returns the pc offset at which the frame ends.
1744   int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
1745 
1746   void EnterBuiltinFrame(Register context, Register target, Register argc);
1747   void LeaveBuiltinFrame(Register context, Register target, Register argc);
1748 
1749   // Expects object in r2 and returns map with validated enum cache
1750   // in r2.  Assumes that any other register can be used as a scratch.
1751   void CheckEnumCache(Label* call_runtime);
1752 
1753   // AllocationMemento support. Arrays may have an associated
1754   // AllocationMemento object that can be checked for in order to pretransition
1755   // to another type.
1756   // On entry, receiver_reg should point to the array object.
1757   // scratch_reg gets clobbered.
1758   // If allocation info is present, condition flags are set to eq.
1759   void TestJSArrayForAllocationMemento(Register receiver_reg,
1760                                        Register scratch_reg,
1761                                        Register scratch2_reg,
1762                                        Label* no_memento_found);
1763 
1764  private:
1765   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1766 
1767   void CallCFunctionHelper(Register function, int num_reg_arguments,
1768                            int num_double_arguments);
1769 
1770   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
1771             CRegister cr = cr7);
1772 
1773   // Helper functions for generating invokes.
1774   void InvokePrologue(const ParameterCount& expected,
1775                       const ParameterCount& actual, Label* done,
1776                       bool* definitely_mismatches, InvokeFlag flag,
1777                       const CallWrapper& call_wrapper);
1778 
1779   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1780   void InNewSpace(Register object, Register scratch,
1781                   Condition cond,  // eq for new space, ne otherwise.
1782                   Label* branch);
1783 
1784   // Helper for finding the mark bits for an address.  Afterwards, the
1785   // bitmap register points at the word with the mark bits and the mask
1786   // the position of the first bit.  Leaves addr_reg unchanged.
1787   inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
1788                           Register mask_reg);
1789 
1790   static const RegList kSafepointSavedRegisters;
1791   static const int kNumSafepointSavedRegisters;
1792 
1793   // Compute memory operands for safepoint stack slots.
1794   static int SafepointRegisterStackIndex(int reg_code);
1795   MemOperand SafepointRegisterSlot(Register reg);
1796   MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1797 
1798   bool generating_stub_;
1799   bool has_frame_;
1800   // This handle will be patched with the code object on installation.
1801   Handle<Object> code_object_;
1802 
1803   // Needs access to SafepointRegisterStackIndex for compiled frame
1804   // traversal.
1805   friend class StandardFrame;
1806 };
1807 
1808 // The code patcher is used to patch (typically) small parts of code e.g. for
1809 // debugging and other types of instrumentation. When using the code patcher
1810 // the exact number of bytes specified must be emitted. It is not legal to emit
1811 // relocation information. If any of these constraints are violated it causes
1812 // an assertion to fail.
1813 class CodePatcher {
1814  public:
1815   enum FlushICache { FLUSH, DONT_FLUSH };
1816 
1817   CodePatcher(Isolate* isolate, byte* address, int instructions,
1818               FlushICache flush_cache = FLUSH);
1819   ~CodePatcher();
1820 
1821   // Macro assembler to emit code.
masm()1822   MacroAssembler* masm() { return &masm_; }
1823 
1824  private:
1825   byte* address_;            // The address of the code being patched.
1826   int size_;                 // Number of bytes of the expected patch size.
1827   MacroAssembler masm_;      // Macro assembler used to generate the code.
1828   FlushICache flush_cache_;  // Whether to flush the I cache after patching.
1829 };
1830 
1831 // -----------------------------------------------------------------------------
1832 // Static helper functions.
1833 
1834 inline MemOperand ContextMemOperand(Register context, int index = 0) {
1835   return MemOperand(context, Context::SlotOffset(index));
1836 }
1837 
NativeContextMemOperand()1838 inline MemOperand NativeContextMemOperand() {
1839   return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
1840 }
1841 
1842 #define ACCESS_MASM(masm) masm->
1843 
1844 }  // namespace internal
1845 }  // namespace v8
1846 
1847 #endif  // V8_S390_MACRO_ASSEMBLER_S390_H_
1848