• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
6 #define V8_S390_MACRO_ASSEMBLER_S390_H_
7 
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 // Give alias names to registers for calling conventions.
17 const Register kReturnRegister0 = {Register::kCode_r2};
18 const Register kReturnRegister1 = {Register::kCode_r3};
19 const Register kReturnRegister2 = {Register::kCode_r4};
20 const Register kJSFunctionRegister = {Register::kCode_r3};
21 const Register kContextRegister = {Register::kCode_r13};
22 const Register kAllocateSizeRegister = {Register::kCode_r3};
23 const Register kInterpreterAccumulatorRegister = {Register::kCode_r2};
24 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r6};
25 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r7};
26 const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
27 const Register kJavaScriptCallArgCountRegister = {Register::kCode_r2};
28 const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r5};
29 const Register kRuntimeCallFunctionRegister = {Register::kCode_r3};
30 const Register kRuntimeCallArgCountRegister = {Register::kCode_r2};
31 
32 // ----------------------------------------------------------------------------
33 // Static helper functions
34 
35 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)36 inline MemOperand FieldMemOperand(Register object, int offset) {
37   return MemOperand(object, offset - kHeapObjectTag);
38 }
39 
40 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,Register index,int offset)41 inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
42   return MemOperand(object, index, offset - kHeapObjectTag);
43 }
44 
45 // Generate a MemOperand for loading a field from Root register
RootMemOperand(Heap::RootListIndex index)46 inline MemOperand RootMemOperand(Heap::RootListIndex index) {
47   return MemOperand(kRootRegister, index << kPointerSizeLog2);
48 }
49 
50 // Flags used for AllocateHeapNumber
51 enum TaggingMode {
52   // Tag the result.
53   TAG_RESULT,
54   // Don't tag
55   DONT_TAG_RESULT
56 };
57 
58 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
59 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
60 enum PointersToHereCheck {
61   kPointersToHereMaybeInteresting,
62   kPointersToHereAreAlwaysInteresting
63 };
64 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
65 
66 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
67                                    Register reg3 = no_reg,
68                                    Register reg4 = no_reg,
69                                    Register reg5 = no_reg,
70                                    Register reg6 = no_reg);
71 
72 #ifdef DEBUG
73 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
74                 Register reg4 = no_reg, Register reg5 = no_reg,
75                 Register reg6 = no_reg, Register reg7 = no_reg,
76                 Register reg8 = no_reg, Register reg9 = no_reg,
77                 Register reg10 = no_reg);
78 #endif
79 
80 // These exist to provide portability between 32 and 64bit
81 #if V8_TARGET_ARCH_S390X
82 #define Div divd
83 
84 // The length of the arithmetic operation is the length
85 // of the register.
86 
87 // Length:
88 // H = halfword
89 // W = word
90 
91 // arithmetics and bitwise
92 #define AddMI agsi
93 #define AddRR agr
94 #define SubRR sgr
95 #define AndRR ngr
96 #define OrRR ogr
97 #define XorRR xgr
98 #define LoadComplementRR lcgr
99 #define LoadNegativeRR lngr
100 
101 // Distinct Operands
102 #define AddP_RRR agrk
103 #define AddPImm_RRI aghik
104 #define AddLogicalP_RRR algrk
105 #define SubP_RRR sgrk
106 #define SubLogicalP_RRR slgrk
107 #define AndP_RRR ngrk
108 #define OrP_RRR ogrk
109 #define XorP_RRR xgrk
110 
111 // Load / Store
112 #define LoadRR lgr
113 #define LoadAndTestRR ltgr
114 #define LoadImmP lghi
115 #define LoadLogicalHalfWordP llgh
116 
117 // Compare
118 #define CmpPH cghi
119 #define CmpLogicalPW clgfi
120 
121 // Shifts
122 #define ShiftLeftP sllg
123 #define ShiftRightP srlg
124 #define ShiftLeftArithP slag
125 #define ShiftRightArithP srag
126 #else
127 
128 // arithmetics and bitwise
129 // Reg2Reg
130 #define AddMI asi
131 #define AddRR ar
132 #define SubRR sr
133 #define AndRR nr
134 #define OrRR or_z
135 #define XorRR xr
136 #define LoadComplementRR lcr
137 #define LoadNegativeRR lnr
138 
139 // Distinct Operands
140 #define AddP_RRR ark
141 #define AddPImm_RRI ahik
142 #define AddLogicalP_RRR alrk
143 #define SubP_RRR srk
144 #define SubLogicalP_RRR slrk
145 #define AndP_RRR nrk
146 #define OrP_RRR ork
147 #define XorP_RRR xrk
148 
149 // Load / Store
150 #define LoadRR lr
151 #define LoadAndTestRR ltr
152 #define LoadImmP lhi
153 #define LoadLogicalHalfWordP llh
154 
155 // Compare
156 #define CmpPH chi
157 #define CmpLogicalPW clfi
158 
159 // Shifts
160 #define ShiftLeftP ShiftLeft
161 #define ShiftRightP ShiftRight
162 #define ShiftLeftArithP ShiftLeftArith
163 #define ShiftRightArithP ShiftRightArith
164 
165 #endif
166 
167 // MacroAssembler implements a collection of frequently used macros.
168 class MacroAssembler : public Assembler {
169  public:
170   MacroAssembler(Isolate* isolate, void* buffer, int size,
171                  CodeObjectRequired create_code_object);
172 
173   // Returns the size of a call in instructions.
174   static int CallSize(Register target);
175   int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
176   static int CallSizeNotPredictableCodeSize(Address target,
177                                             RelocInfo::Mode rmode,
178                                             Condition cond = al);
179 
180   // Jump, Call, and Ret pseudo instructions implementing inter-working.
181   void Jump(Register target);
182   void JumpToJSEntry(Register target);
183   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
184             CRegister cr = cr7);
185   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
186   void Call(Register target);
187   void CallJSEntry(Register target);
188   void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
189   int CallSize(Handle<Code> code,
190                RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
191                TypeFeedbackId ast_id = TypeFeedbackId::None(),
192                Condition cond = al);
193   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
194             TypeFeedbackId ast_id = TypeFeedbackId::None(),
195             Condition cond = al);
Ret()196   void Ret() { b(r14); }
Ret(Condition cond)197   void Ret(Condition cond) { b(cond, r14); }
198 
199   // Emit code to discard a non-negative number of pointer-sized elements
200   // from the stack, clobbering only the sp register.
201   void Drop(int count);
202   void Drop(Register count, Register scratch = r0);
203 
Ret(int drop)204   void Ret(int drop) {
205     Drop(drop);
206     Ret();
207   }
208 
209   void Call(Label* target);
210 
211   // Register move. May do nothing if the registers are identical.
Move(Register dst,Smi * smi)212   void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
213   void Move(Register dst, Handle<Object> value);
214   void Move(Register dst, Register src, Condition cond = al);
215   void Move(DoubleRegister dst, DoubleRegister src);
216 
217   void MultiPush(RegList regs, Register location = sp);
218   void MultiPop(RegList regs, Register location = sp);
219 
220   void MultiPushDoubles(RegList dregs, Register location = sp);
221   void MultiPopDoubles(RegList dregs, Register location = sp);
222 
223   // Load an object from the root table.
224   void LoadRoot(Register destination, Heap::RootListIndex index,
225                 Condition cond = al);
226   // Store an object to the root table.
227   void StoreRoot(Register source, Heap::RootListIndex index,
228                  Condition cond = al);
229 
230   //--------------------------------------------------------------------------
231   // S390 Macro Assemblers for Instructions
232   //--------------------------------------------------------------------------
233 
234   // Arithmetic Operations
235 
236   // Add (Register - Immediate)
237   void Add32(Register dst, const Operand& imm);
238   void AddP(Register dst, const Operand& imm);
239   void Add32(Register dst, Register src, const Operand& imm);
240   void AddP(Register dst, Register src, const Operand& imm);
241 
242   // Add (Register - Register)
243   void Add32(Register dst, Register src);
244   void AddP(Register dst, Register src);
245   void AddP_ExtendSrc(Register dst, Register src);
246   void Add32(Register dst, Register src1, Register src2);
247   void AddP(Register dst, Register src1, Register src2);
248   void AddP_ExtendSrc(Register dst, Register src1, Register src2);
249 
250   // Add (Register - Mem)
251   void Add32(Register dst, const MemOperand& opnd);
252   void AddP(Register dst, const MemOperand& opnd);
253   void AddP_ExtendSrc(Register dst, const MemOperand& opnd);
254 
255   // Add (Mem - Immediate)
256   void Add32(const MemOperand& opnd, const Operand& imm);
257   void AddP(const MemOperand& opnd, const Operand& imm);
258 
259   // Add Logical (Register - Register)
260   void AddLogical32(Register dst, Register src1, Register src2);
261 
262   // Add Logical With Carry (Register - Register)
263   void AddLogicalWithCarry32(Register dst, Register src1, Register src2);
264 
265   // Add Logical (Register - Immediate)
266   void AddLogical(Register dst, const Operand& imm);
267   void AddLogicalP(Register dst, const Operand& imm);
268 
269   // Add Logical (Register - Mem)
270   void AddLogical(Register dst, const MemOperand& opnd);
271   void AddLogicalP(Register dst, const MemOperand& opnd);
272 
273   // Subtract (Register - Immediate)
274   void Sub32(Register dst, const Operand& imm);
275   void SubP(Register dst, const Operand& imm);
276   void Sub32(Register dst, Register src, const Operand& imm);
277   void SubP(Register dst, Register src, const Operand& imm);
278 
279   // Subtract (Register - Register)
280   void Sub32(Register dst, Register src);
281   void SubP(Register dst, Register src);
282   void SubP_ExtendSrc(Register dst, Register src);
283   void Sub32(Register dst, Register src1, Register src2);
284   void SubP(Register dst, Register src1, Register src2);
285   void SubP_ExtendSrc(Register dst, Register src1, Register src2);
286 
287   // Subtract (Register - Mem)
288   void Sub32(Register dst, const MemOperand& opnd);
289   void SubP(Register dst, const MemOperand& opnd);
290   void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
291 
292   // Subtract Logical (Register - Mem)
293   void SubLogical(Register dst, const MemOperand& opnd);
294   void SubLogicalP(Register dst, const MemOperand& opnd);
295   void SubLogicalP_ExtendSrc(Register dst, const MemOperand& opnd);
296   // Subtract Logical 32-bit
297   void SubLogical32(Register dst, Register src1, Register src2);
298   // Subtract Logical With Borrow 32-bit
299   void SubLogicalWithBorrow32(Register dst, Register src1, Register src2);
300 
301   // Multiply
302   void MulP(Register dst, const Operand& opnd);
303   void MulP(Register dst, Register src);
304   void MulP(Register dst, const MemOperand& opnd);
305   void Mul(Register dst, Register src1, Register src2);
306 
307   // Divide
308   void DivP(Register dividend, Register divider);
309 
310   // Compare
311   void Cmp32(Register src1, Register src2);
312   void CmpP(Register src1, Register src2);
313   void Cmp32(Register dst, const Operand& opnd);
314   void CmpP(Register dst, const Operand& opnd);
315   void Cmp32(Register dst, const MemOperand& opnd);
316   void CmpP(Register dst, const MemOperand& opnd);
317 
318   // Compare Logical
319   void CmpLogical32(Register src1, Register src2);
320   void CmpLogicalP(Register src1, Register src2);
321   void CmpLogical32(Register src1, const Operand& opnd);
322   void CmpLogicalP(Register src1, const Operand& opnd);
323   void CmpLogical32(Register dst, const MemOperand& opnd);
324   void CmpLogicalP(Register dst, const MemOperand& opnd);
325 
326   // Compare Logical Byte (CLI/CLIY)
327   void CmpLogicalByte(const MemOperand& mem, const Operand& imm);
328 
329   // Load 32bit
330   void Load(Register dst, const MemOperand& opnd);
331   void Load(Register dst, const Operand& opnd);
332   void LoadW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
333   void LoadW(Register dst, Register src);
334   void LoadlW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
335   void LoadlW(Register dst, Register src);
336   void LoadB(Register dst, const MemOperand& opnd);
337   void LoadB(Register dst, Register src);
338   void LoadlB(Register dst, const MemOperand& opnd);
339 
340   // Load And Test
341   void LoadAndTest32(Register dst, Register src);
342   void LoadAndTestP_ExtendSrc(Register dst, Register src);
343   void LoadAndTestP(Register dst, Register src);
344 
345   void LoadAndTest32(Register dst, const MemOperand& opnd);
346   void LoadAndTestP(Register dst, const MemOperand& opnd);
347 
348   // Load Floating Point
349   void LoadDouble(DoubleRegister dst, const MemOperand& opnd);
350   void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
351   void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
352 
353   // Store Floating Point
354   void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
355   void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
356   void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
357                             DoubleRegister scratch);
358 
359   void Branch(Condition c, const Operand& opnd);
360   void BranchOnCount(Register r1, Label* l);
361 
362   // Shifts
363   void ShiftLeft(Register dst, Register src, Register val);
364   void ShiftLeft(Register dst, Register src, const Operand& val);
365   void ShiftRight(Register dst, Register src, Register val);
366   void ShiftRight(Register dst, Register src, const Operand& val);
367   void ShiftLeftArith(Register dst, Register src, Register shift);
368   void ShiftLeftArith(Register dst, Register src, const Operand& val);
369   void ShiftRightArith(Register dst, Register src, Register shift);
370   void ShiftRightArith(Register dst, Register src, const Operand& val);
371 
372   void ClearRightImm(Register dst, Register src, const Operand& val);
373 
374   // Bitwise operations
375   void And(Register dst, Register src);
376   void AndP(Register dst, Register src);
377   void And(Register dst, Register src1, Register src2);
378   void AndP(Register dst, Register src1, Register src2);
379   void And(Register dst, const MemOperand& opnd);
380   void AndP(Register dst, const MemOperand& opnd);
381   void And(Register dst, const Operand& opnd);
382   void AndP(Register dst, const Operand& opnd);
383   void And(Register dst, Register src, const Operand& opnd);
384   void AndP(Register dst, Register src, const Operand& opnd);
385   void Or(Register dst, Register src);
386   void OrP(Register dst, Register src);
387   void Or(Register dst, Register src1, Register src2);
388   void OrP(Register dst, Register src1, Register src2);
389   void Or(Register dst, const MemOperand& opnd);
390   void OrP(Register dst, const MemOperand& opnd);
391   void Or(Register dst, const Operand& opnd);
392   void OrP(Register dst, const Operand& opnd);
393   void Or(Register dst, Register src, const Operand& opnd);
394   void OrP(Register dst, Register src, const Operand& opnd);
395   void Xor(Register dst, Register src);
396   void XorP(Register dst, Register src);
397   void Xor(Register dst, Register src1, Register src2);
398   void XorP(Register dst, Register src1, Register src2);
399   void Xor(Register dst, const MemOperand& opnd);
400   void XorP(Register dst, const MemOperand& opnd);
401   void Xor(Register dst, const Operand& opnd);
402   void XorP(Register dst, const Operand& opnd);
403   void Xor(Register dst, Register src, const Operand& opnd);
404   void XorP(Register dst, Register src, const Operand& opnd);
405   void Popcnt32(Register dst, Register src);
406 
407 #ifdef V8_TARGET_ARCH_S390X
408   void Popcnt64(Register dst, Register src);
409 #endif
410 
411   void NotP(Register dst);
412 
413   void mov(Register dst, const Operand& src);
414 
CleanUInt32(Register x)415   void CleanUInt32(Register x) {
416 #ifdef V8_TARGET_ARCH_S390X
417     llgfr(x, x);
418 #endif
419   }
420 
421   // ---------------------------------------------------------------------------
422   // GC Support
423 
424   void IncrementalMarkingRecordWriteHelper(Register object, Register value,
425                                            Register address);
426 
427   enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
428 
429   // Record in the remembered set the fact that we have a pointer to new space
430   // at the address pointed to by the addr register.  Only works if addr is not
431   // in new space.
432   void RememberedSetHelper(Register object,  // Used for debug code.
433                            Register addr, Register scratch,
434                            SaveFPRegsMode save_fp,
435                            RememberedSetFinalAction and_then);
436 
437   void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
438                      Label* condition_met);
439 
440   // Check if object is in new space.  Jumps if the object is not in new space.
441   // The register scratch can be object itself, but scratch will be clobbered.
JumpIfNotInNewSpace(Register object,Register scratch,Label * branch)442   void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
443     InNewSpace(object, scratch, eq, branch);
444   }
445 
446   // Check if object is in new space.  Jumps if the object is in new space.
447   // The register scratch can be object itself, but it will be clobbered.
JumpIfInNewSpace(Register object,Register scratch,Label * branch)448   void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
449     InNewSpace(object, scratch, ne, branch);
450   }
451 
452   // Check if an object has a given incremental marking color.
453   void HasColor(Register object, Register scratch0, Register scratch1,
454                 Label* has_color, int first_bit, int second_bit);
455 
456   void JumpIfBlack(Register object, Register scratch0, Register scratch1,
457                    Label* on_black);
458 
459   // Checks the color of an object.  If the object is white we jump to the
460   // incremental marker.
461   void JumpIfWhite(Register value, Register scratch1, Register scratch2,
462                    Register scratch3, Label* value_is_white);
463 
464   // Notify the garbage collector that we wrote a pointer into an object.
465   // |object| is the object being stored into, |value| is the object being
466   // stored.  value and scratch registers are clobbered by the operation.
467   // The offset is the offset from the start of the object, not the offset from
468   // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
469   void RecordWriteField(
470       Register object, int offset, Register value, Register scratch,
471       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
472       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
473       SmiCheck smi_check = INLINE_SMI_CHECK,
474       PointersToHereCheck pointers_to_here_check_for_value =
475           kPointersToHereMaybeInteresting);
476 
477   // As above, but the offset has the tag presubtracted.  For use with
478   // MemOperand(reg, off).
479   inline void RecordWriteContextSlot(
480       Register context, int offset, Register value, Register scratch,
481       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
482       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
483       SmiCheck smi_check = INLINE_SMI_CHECK,
484       PointersToHereCheck pointers_to_here_check_for_value =
485           kPointersToHereMaybeInteresting) {
486     RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
487                      lr_status, save_fp, remembered_set_action, smi_check,
488                      pointers_to_here_check_for_value);
489   }
490 
491   // Notify the garbage collector that we wrote a code entry into a
492   // JSFunction. Only scratch is clobbered by the operation.
493   void RecordWriteCodeEntryField(Register js_function, Register code_entry,
494                                  Register scratch);
495 
496   void RecordWriteForMap(Register object, Register map, Register dst,
497                          LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
498 
499   // For a given |object| notify the garbage collector that the slot |address|
500   // has been written.  |value| is the object being stored. The value and
501   // address registers are clobbered by the operation.
502   void RecordWrite(
503       Register object, Register address, Register value,
504       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
505       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
506       SmiCheck smi_check = INLINE_SMI_CHECK,
507       PointersToHereCheck pointers_to_here_check_for_value =
508           kPointersToHereMaybeInteresting);
509 
push(Register src)510   void push(Register src) {
511     lay(sp, MemOperand(sp, -kPointerSize));
512     StoreP(src, MemOperand(sp));
513   }
514 
pop(Register dst)515   void pop(Register dst) {
516     LoadP(dst, MemOperand(sp));
517     la(sp, MemOperand(sp, kPointerSize));
518   }
519 
pop()520   void pop() { la(sp, MemOperand(sp, kPointerSize)); }
521 
Push(Register src)522   void Push(Register src) { push(src); }
523 
524   // Push a handle.
525   void Push(Handle<Object> handle);
Push(Smi * smi)526   void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
527 
528   // Push two registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)529   void Push(Register src1, Register src2) {
530     lay(sp, MemOperand(sp, -kPointerSize * 2));
531     StoreP(src1, MemOperand(sp, kPointerSize));
532     StoreP(src2, MemOperand(sp, 0));
533   }
534 
535   // Push three registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)536   void Push(Register src1, Register src2, Register src3) {
537     lay(sp, MemOperand(sp, -kPointerSize * 3));
538     StoreP(src1, MemOperand(sp, kPointerSize * 2));
539     StoreP(src2, MemOperand(sp, kPointerSize));
540     StoreP(src3, MemOperand(sp, 0));
541   }
542 
543   // Push four registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)544   void Push(Register src1, Register src2, Register src3, Register src4) {
545     lay(sp, MemOperand(sp, -kPointerSize * 4));
546     StoreP(src1, MemOperand(sp, kPointerSize * 3));
547     StoreP(src2, MemOperand(sp, kPointerSize * 2));
548     StoreP(src3, MemOperand(sp, kPointerSize));
549     StoreP(src4, MemOperand(sp, 0));
550   }
551 
552   // Push five registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)553   void Push(Register src1, Register src2, Register src3, Register src4,
554             Register src5) {
555     DCHECK(!src1.is(src2));
556     DCHECK(!src1.is(src3));
557     DCHECK(!src2.is(src3));
558     DCHECK(!src1.is(src4));
559     DCHECK(!src2.is(src4));
560     DCHECK(!src3.is(src4));
561     DCHECK(!src1.is(src5));
562     DCHECK(!src2.is(src5));
563     DCHECK(!src3.is(src5));
564     DCHECK(!src4.is(src5));
565 
566     lay(sp, MemOperand(sp, -kPointerSize * 5));
567     StoreP(src1, MemOperand(sp, kPointerSize * 4));
568     StoreP(src2, MemOperand(sp, kPointerSize * 3));
569     StoreP(src3, MemOperand(sp, kPointerSize * 2));
570     StoreP(src4, MemOperand(sp, kPointerSize));
571     StoreP(src5, MemOperand(sp, 0));
572   }
573 
Pop(Register dst)574   void Pop(Register dst) { pop(dst); }
575 
576   // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)577   void Pop(Register src1, Register src2) {
578     LoadP(src2, MemOperand(sp, 0));
579     LoadP(src1, MemOperand(sp, kPointerSize));
580     la(sp, MemOperand(sp, 2 * kPointerSize));
581   }
582 
583   // Pop three registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)584   void Pop(Register src1, Register src2, Register src3) {
585     LoadP(src3, MemOperand(sp, 0));
586     LoadP(src2, MemOperand(sp, kPointerSize));
587     LoadP(src1, MemOperand(sp, 2 * kPointerSize));
588     la(sp, MemOperand(sp, 3 * kPointerSize));
589   }
590 
591   // Pop four registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4)592   void Pop(Register src1, Register src2, Register src3, Register src4) {
593     LoadP(src4, MemOperand(sp, 0));
594     LoadP(src3, MemOperand(sp, kPointerSize));
595     LoadP(src2, MemOperand(sp, 2 * kPointerSize));
596     LoadP(src1, MemOperand(sp, 3 * kPointerSize));
597     la(sp, MemOperand(sp, 4 * kPointerSize));
598   }
599 
600   // Pop five registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4,Register src5)601   void Pop(Register src1, Register src2, Register src3, Register src4,
602            Register src5) {
603     LoadP(src5, MemOperand(sp, 0));
604     LoadP(src4, MemOperand(sp, kPointerSize));
605     LoadP(src3, MemOperand(sp, 2 * kPointerSize));
606     LoadP(src2, MemOperand(sp, 3 * kPointerSize));
607     LoadP(src1, MemOperand(sp, 4 * kPointerSize));
608     la(sp, MemOperand(sp, 5 * kPointerSize));
609   }
610 
611   // Push a fixed frame, consisting of lr, fp, constant pool.
612   void PushCommonFrame(Register marker_reg = no_reg);
613 
614   // Push a standard frame, consisting of lr, fp, constant pool,
615   // context and JS function
616   void PushStandardFrame(Register function_reg);
617 
618   void PopCommonFrame(Register marker_reg = no_reg);
619 
620   // Restore caller's frame pointer and return address prior to being
621   // overwritten by tail call stack preparation.
622   void RestoreFrameStateForTailCall();
623 
624   // Push and pop the registers that can hold pointers, as defined by the
625   // RegList constant kSafepointSavedRegisters.
626   void PushSafepointRegisters();
627   void PopSafepointRegisters();
628   // Store value in register src in the safepoint stack slot for
629   // register dst.
630   void StoreToSafepointRegisterSlot(Register src, Register dst);
631   // Load the value of the src register from its safepoint stack slot
632   // into register dst.
633   void LoadFromSafepointRegisterSlot(Register dst, Register src);
634 
635   // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
636   // from C.
637   // Does not handle errors.
638   void FlushICache(Register address, size_t size, Register scratch);
639 
640   // If the value is a NaN, canonicalize the value else, do nothing.
641   void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
CanonicalizeNaN(const DoubleRegister value)642   void CanonicalizeNaN(const DoubleRegister value) {
643     CanonicalizeNaN(value, value);
644   }
645 
646   // Converts the integer (untagged smi) in |src| to a double, storing
647   // the result to |dst|
648   void ConvertIntToDouble(Register src, DoubleRegister dst);
649 
650   // Converts the unsigned integer (untagged smi) in |src| to
651   // a double, storing the result to |dst|
652   void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
653 
654   // Converts the integer (untagged smi) in |src| to
655   // a float, storing the result in |dst|
656   void ConvertIntToFloat(Register src, DoubleRegister dst);
657 
658   // Converts the unsigned integer (untagged smi) in |src| to
659   // a float, storing the result in |dst|
660   void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
661 
662 #if V8_TARGET_ARCH_S390X
663   void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
664   void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
665   void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
666   void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
667 #endif
668 
669   void MovIntToFloat(DoubleRegister dst, Register src);
670   void MovFloatToInt(Register dst, DoubleRegister src);
671   void MovDoubleToInt64(Register dst, DoubleRegister src);
672   void MovInt64ToDouble(DoubleRegister dst, Register src);
673   // Converts the double_input to an integer.  Note that, upon return,
674   // the contents of double_dst will also hold the fixed point representation.
675   void ConvertFloat32ToInt64(const DoubleRegister double_input,
676 #if !V8_TARGET_ARCH_S390X
677                              const Register dst_hi,
678 #endif
679                              const Register dst,
680                              const DoubleRegister double_dst,
681                              FPRoundingMode rounding_mode = kRoundToZero);
682 
683   // Converts the double_input to an integer.  Note that, upon return,
684   // the contents of double_dst will also hold the fixed point representation.
685   void ConvertDoubleToInt64(const DoubleRegister double_input,
686 #if !V8_TARGET_ARCH_S390X
687                             const Register dst_hi,
688 #endif
689                             const Register dst, const DoubleRegister double_dst,
690                             FPRoundingMode rounding_mode = kRoundToZero);
691 
692   void ConvertFloat32ToInt32(const DoubleRegister double_input,
693                              const Register dst,
694                              const DoubleRegister double_dst,
695                              FPRoundingMode rounding_mode = kRoundToZero);
696   void ConvertFloat32ToUnsignedInt32(
697       const DoubleRegister double_input, const Register dst,
698       const DoubleRegister double_dst,
699       FPRoundingMode rounding_mode = kRoundToZero);
700 #if V8_TARGET_ARCH_S390X
701   // Converts the double_input to an unsigned integer.  Note that, upon return,
702   // the contents of double_dst will also hold the fixed point representation.
703   void ConvertDoubleToUnsignedInt64(
704       const DoubleRegister double_input, const Register dst,
705       const DoubleRegister double_dst,
706       FPRoundingMode rounding_mode = kRoundToZero);
707   void ConvertFloat32ToUnsignedInt64(
708       const DoubleRegister double_input, const Register dst,
709       const DoubleRegister double_dst,
710       FPRoundingMode rounding_mode = kRoundToZero);
711 #endif
712 
713 #if !V8_TARGET_ARCH_S390X
714   void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
715                      Register src_high, Register scratch, Register shift);
716   void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
717                      Register src_high, uint32_t shift);
718   void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
719                       Register src_high, Register scratch, Register shift);
720   void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
721                       Register src_high, uint32_t shift);
722   void ShiftRightArithPair(Register dst_low, Register dst_high,
723                            Register src_low, Register src_high,
724                            Register scratch, Register shift);
725   void ShiftRightArithPair(Register dst_low, Register dst_high,
726                            Register src_low, Register src_high, uint32_t shift);
727 #endif
728 
729   // Generates function and stub prologue code.
730   void StubPrologue(StackFrame::Type type, Register base = no_reg,
731                     int prologue_offset = 0);
732   void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
733 
734   // Enter exit frame.
735   // stack_space - extra stack space, used for parameters before call to C.
736   // At least one slot (for the return address) should be provided.
737   void EnterExitFrame(bool save_doubles, int stack_space = 1);
738 
739   // Leave the current exit frame. Expects the return value in r0.
740   // Expect the number of values, pushed prior to the exit frame, to
741   // remove in a register (or no_reg, if there is nothing to remove).
742   void LeaveExitFrame(bool save_doubles, Register argument_count,
743                       bool restore_context,
744                       bool argument_count_is_length = false);
745 
746   // Get the actual activation frame alignment for target environment.
747   static int ActivationFrameAlignment();
748 
749   void LoadContext(Register dst, int context_chain_length);
750 
751   // Load the global object from the current context.
LoadGlobalObject(Register dst)752   void LoadGlobalObject(Register dst) {
753     LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
754   }
755 
756   // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)757   void LoadGlobalProxy(Register dst) {
758     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
759   }
760 
761   // Conditionally load the cached Array transitioned map of type
762   // transitioned_kind from the native context if the map in register
763   // map_in_out is the cached Array map in the native context of
764   // expected_kind.
765   void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
766                                            ElementsKind transitioned_kind,
767                                            Register map_in_out,
768                                            Register scratch,
769                                            Label* no_map_match);
770 
771   void LoadNativeContextSlot(int index, Register dst);
772 
773   // Load the initial map from the global function. The registers
774   // function and map can be the same, function is then overwritten.
775   void LoadGlobalFunctionInitialMap(Register function, Register map,
776                                     Register scratch);
777 
InitializeRootRegister()778   void InitializeRootRegister() {
779     ExternalReference roots_array_start =
780         ExternalReference::roots_array_start(isolate());
781     mov(kRootRegister, Operand(roots_array_start));
782   }
783 
784   // ----------------------------------------------------------------
785   // new S390 macro-assembler interfaces that are slightly higher level
786   // than assembler-s390 and may generate variable length sequences
787 
788   // load a literal signed int value <value> to GPR <dst>
789   void LoadIntLiteral(Register dst, int value);
790 
791   // load an SMI value <value> to GPR <dst>
792   void LoadSmiLiteral(Register dst, Smi* smi);
793 
794   // load a literal double value <value> to FPR <result>
795   void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
796   void LoadDoubleLiteral(DoubleRegister result, uint64_t value,
797                          Register scratch);
798 
799   void LoadFloat32Literal(DoubleRegister result, float value, Register scratch);
800 
801   void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg);
802 
803   void LoadHalfWordP(Register dst, const MemOperand& mem,
804                      Register scratch = no_reg);
805 
806   void StoreHalfWord(Register src, const MemOperand& mem,
807                      Register scratch = r0);
808   void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
809 
810   void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
811                           Register scratch = no_reg);
812   void StoreRepresentation(Register src, const MemOperand& mem,
813                            Representation r, Register scratch = no_reg);
814 
815   void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
816   void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
817   void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
818   void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
819   void AndSmiLiteral(Register dst, Register src, Smi* smi);
820 
821   // Set new rounding mode RN to FPSCR
822   void SetRoundingMode(FPRoundingMode RN);
823 
824   // reset rounding mode to default (kRoundToNearest)
825   void ResetRoundingMode();
826 
827   // These exist to provide portability between 32 and 64bit
828   void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
829   void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
830   void StoreP(const MemOperand& mem, const Operand& opnd,
831               Register scratch = no_reg);
832   void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
833   void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
834   void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
835   void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);
836 
837   // Cleanse pointer address on 31bit by zero out top  bit.
838   // This is a NOP on 64-bit.
CleanseP(Register src)839   void CleanseP(Register src) {
840 #if (V8_HOST_ARCH_S390 && !(V8_TARGET_ARCH_S390X))
841     nilh(src, Operand(0x7FFF));
842 #endif
843   }
844 
845   // ---------------------------------------------------------------------------
846   // JavaScript invokes
847 
848   // Set up call kind marking in ecx. The method takes ecx as an
849   // explicit first parameter to make the code more readable at the
850   // call sites.
851   // void SetCallKind(Register dst, CallKind kind);
852 
853   // Removes current frame and its arguments from the stack preserving
854   // the arguments and a return address pushed to the stack for the next call.
855   // Both |callee_args_count| and |caller_args_count_reg| do not include
856   // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
857   // is trashed.
858   void PrepareForTailCall(const ParameterCount& callee_args_count,
859                           Register caller_args_count_reg, Register scratch0,
860                           Register scratch1);
861 
862   // Invoke the JavaScript function code by either calling or jumping.
863   void InvokeFunctionCode(Register function, Register new_target,
864                           const ParameterCount& expected,
865                           const ParameterCount& actual, InvokeFlag flag,
866                           const CallWrapper& call_wrapper);
867 
868   void FloodFunctionIfStepping(Register fun, Register new_target,
869                                const ParameterCount& expected,
870                                const ParameterCount& actual);
871 
872   // Invoke the JavaScript function in the given register. Changes the
873   // current context to the context in the function before invoking.
874   void InvokeFunction(Register function, Register new_target,
875                       const ParameterCount& actual, InvokeFlag flag,
876                       const CallWrapper& call_wrapper);
877 
878   void InvokeFunction(Register function, const ParameterCount& expected,
879                       const ParameterCount& actual, InvokeFlag flag,
880                       const CallWrapper& call_wrapper);
881 
882   void InvokeFunction(Handle<JSFunction> function,
883                       const ParameterCount& expected,
884                       const ParameterCount& actual, InvokeFlag flag,
885                       const CallWrapper& call_wrapper);
886 
887   void IsObjectJSStringType(Register object, Register scratch, Label* fail);
888 
889   void IsObjectNameType(Register object, Register scratch, Label* fail);
890 
891   // ---------------------------------------------------------------------------
892   // Debugger Support
893 
894   void DebugBreak();
895 
896   // ---------------------------------------------------------------------------
897   // Exception handling
898 
899   // Push a new stack handler and link into stack handler chain.
900   void PushStackHandler();
901 
902   // Unlink the stack handler on top of the stack from the stack handler chain.
903   // Must preserve the result register.
904   void PopStackHandler();
905 
906   // ---------------------------------------------------------------------------
907   // Inline caching support
908 
909   // Generate code for checking access rights - used for security checks
910   // on access to global objects across environments. The holder register
911   // is left untouched, whereas both scratch registers are clobbered.
912   void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
913                               Label* miss);
914 
915   void GetNumberHash(Register t0, Register scratch);
916 
917   void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
918                                 Register result, Register t0, Register t1,
919                                 Register t2);
920 
MarkCode(NopMarkerTypes type)921   inline void MarkCode(NopMarkerTypes type) { nop(type); }
922 
923   // Check if the given instruction is a 'type' marker.
924   // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
925   // These instructions are generated to mark special location in the code,
926   // like some special IC code.
IsMarkedCode(Instr instr,int type)927   static inline bool IsMarkedCode(Instr instr, int type) {
928     DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
929     return IsNop(instr, type);
930   }
931 
GetCodeMarker(Instr instr)932   static inline int GetCodeMarker(Instr instr) {
933     int dst_reg_offset = 12;
934     int dst_mask = 0xf << dst_reg_offset;
935     int src_mask = 0xf;
936     int dst_reg = (instr & dst_mask) >> dst_reg_offset;
937     int src_reg = instr & src_mask;
938     uint32_t non_register_mask = ~(dst_mask | src_mask);
939     uint32_t mov_mask = al | 13 << 21;
940 
941     // Return <n> if we have a mov rn rn, else return -1.
942     int type = ((instr & non_register_mask) == mov_mask) &&
943                        (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
944                        (dst_reg < LAST_CODE_MARKER)
945                    ? src_reg
946                    : -1;
947     DCHECK((type == -1) ||
948            ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
949     return type;
950   }
951 
952   // ---------------------------------------------------------------------------
953   // Allocation support
954 
955   // Allocate an object in new space or old pointer space. The object_size is
956   // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
957   // is passed. If the space is exhausted control continues at the gc_required
958   // label. The allocated object is returned in result. If the flag
959   // tag_allocated_object is true the result is tagged as as a heap object.
960   // All registers are clobbered also when control continues at the gc_required
961   // label.
962   void Allocate(int object_size, Register result, Register scratch1,
963                 Register scratch2, Label* gc_required, AllocationFlags flags);
964 
965   void Allocate(Register object_size, Register result, Register result_end,
966                 Register scratch, Label* gc_required, AllocationFlags flags);
967 
968   // FastAllocate is right now only used for folded allocations. It just
969   // increments the top pointer without checking against limit. This can only
970   // be done if it was proved earlier that the allocation will succeed.
971   void FastAllocate(int object_size, Register result, Register scratch1,
972                     Register scratch2, AllocationFlags flags);
973 
974   void FastAllocate(Register object_size, Register result, Register result_end,
975                     Register scratch, AllocationFlags flags);
976 
977   void AllocateTwoByteString(Register result, Register length,
978                              Register scratch1, Register scratch2,
979                              Register scratch3, Label* gc_required);
980   void AllocateOneByteString(Register result, Register length,
981                              Register scratch1, Register scratch2,
982                              Register scratch3, Label* gc_required);
983   void AllocateTwoByteConsString(Register result, Register length,
984                                  Register scratch1, Register scratch2,
985                                  Label* gc_required);
986   void AllocateOneByteConsString(Register result, Register length,
987                                  Register scratch1, Register scratch2,
988                                  Label* gc_required);
989   void AllocateTwoByteSlicedString(Register result, Register length,
990                                    Register scratch1, Register scratch2,
991                                    Label* gc_required);
992   void AllocateOneByteSlicedString(Register result, Register length,
993                                    Register scratch1, Register scratch2,
994                                    Label* gc_required);
995 
996   // Allocates a heap number or jumps to the gc_required label if the young
997   // space is full and a scavenge is needed. All registers are clobbered also
998   // when control continues at the gc_required label.
999   void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
1000                           Register heap_number_map, Label* gc_required,
1001                           MutableMode mode = IMMUTABLE);
1002   void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
1003                                    Register scratch1, Register scratch2,
1004                                    Register heap_number_map,
1005                                    Label* gc_required);
1006 
1007   // Allocate and initialize a JSValue wrapper with the specified {constructor}
1008   // and {value}.
1009   void AllocateJSValue(Register result, Register constructor, Register value,
1010                        Register scratch1, Register scratch2,
1011                        Label* gc_required);
1012 
1013   // Copies a number of bytes from src to dst. All registers are clobbered. On
1014   // exit src and dst will point to the place just after where the last byte was
1015   // read or written and length will be zero.
1016   void CopyBytes(Register src, Register dst, Register length, Register scratch);
1017 
1018   // Initialize fields with filler values.  |count| fields starting at
1019   // |current_address| are overwritten with the value in |filler|.  At the end
1020   // the loop, |current_address| points at the next uninitialized field.
1021   // |count| is assumed to be non-zero.
1022   void InitializeNFieldsWithFiller(Register current_address, Register count,
1023                                    Register filler);
1024 
1025   // Initialize fields with filler values.  Fields starting at |current_address|
1026   // not including |end_address| are overwritten with the value in |filler|.  At
1027   // the end the loop, |current_address| takes the value of |end_address|.
1028   void InitializeFieldsWithFiller(Register current_address,
1029                                   Register end_address, Register filler);
1030 
1031   // ---------------------------------------------------------------------------
1032   // Support functions.
1033 
1034   // Machine code version of Map::GetConstructor().
1035   // |temp| holds |result|'s map when done, and |temp2| its instance type.
1036   void GetMapConstructor(Register result, Register map, Register temp,
1037                          Register temp2);
1038 
1039   // Try to get function prototype of a function and puts the value in
1040   // the result register. Checks that the function really is a
1041   // function and jumps to the miss label if the fast checks fail. The
1042   // function register will be untouched; the other registers may be
1043   // clobbered.
1044   void TryGetFunctionPrototype(Register function, Register result,
1045                                Register scratch, Label* miss);
1046 
1047   // Compare object type for heap object.  heap_object contains a non-Smi
1048   // whose object type should be compared with the given type.  This both
1049   // sets the flags and leaves the object type in the type_reg register.
1050   // It leaves the map in the map register (unless the type_reg and map register
1051   // are the same register).  It leaves the heap object in the heap_object
1052   // register unless the heap_object register is the same register as one of the
1053   // other registers.
1054   // Type_reg can be no_reg. In that case ip is used.
1055   void CompareObjectType(Register heap_object, Register map, Register type_reg,
1056                          InstanceType type);
1057 
1058   // Compare instance type in a map.  map contains a valid map object whose
1059   // object type should be compared with the given type.  This both
1060   // sets the flags and leaves the object type in the type_reg register.
1061   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
1062 
1063   // Check if a map for a JSObject indicates that the object has fast elements.
1064   // Jump to the specified label if it does not.
1065   void CheckFastElements(Register map, Register scratch, Label* fail);
1066 
1067   // Check if a map for a JSObject indicates that the object can have both smi
1068   // and HeapObject elements.  Jump to the specified label if it does not.
1069   void CheckFastObjectElements(Register map, Register scratch, Label* fail);
1070 
1071   // Check if a map for a JSObject indicates that the object has fast smi only
1072   // elements.  Jump to the specified label if it does not.
1073   void CheckFastSmiElements(Register map, Register scratch, Label* fail);
1074 
1075   // Check to see if maybe_number can be stored as a double in
1076   // FastDoubleElements. If it can, store it at the index specified by key in
1077   // the FastDoubleElements array elements. Otherwise jump to fail.
1078   void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
1079                                    Register elements_reg, Register scratch1,
1080                                    DoubleRegister double_scratch, Label* fail,
1081                                    int elements_offset = 0);
1082 
1083   // Compare an object's map with the specified map and its transitioned
1084   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
1085   // set with result of map compare. If multiple map compares are required, the
1086   // compare sequences branches to early_success.
1087   void CompareMap(Register obj, Register scratch, Handle<Map> map,
1088                   Label* early_success);
1089 
1090   // As above, but the map of the object is already loaded into the register
1091   // which is preserved by the code generated.
1092   void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
1093 
1094   // Check if the map of an object is equal to a specified map and branch to
1095   // label if not. Skip the smi check if not required (object is known to be a
1096   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1097   // against maps that are ElementsKind transition maps of the specified map.
1098   void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
1099                 SmiCheckType smi_check_type);
1100 
1101   void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
1102                 Label* fail, SmiCheckType smi_check_type);
1103 
1104   // Check if the map of an object is equal to a specified weak map and branch
1105   // to a specified target if equal. Skip the smi check if not required
1106   // (object is known to be a heap object)
1107   void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1108                        Handle<WeakCell> cell, Handle<Code> success,
1109                        SmiCheckType smi_check_type);
1110 
1111   // Compare the given value and the value of weak cell.
1112   void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
1113                     CRegister cr = cr7);
1114 
1115   void GetWeakValue(Register value, Handle<WeakCell> cell);
1116 
1117   // Load the value of the weak cell in the value register. Branch to the given
1118   // miss label if the weak cell was cleared.
1119   void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1120 
1121   // Compare the object in a register to a value from the root list.
1122   // Uses the ip register as scratch.
1123   void CompareRoot(Register obj, Heap::RootListIndex index);
PushRoot(Heap::RootListIndex index)1124   void PushRoot(Heap::RootListIndex index) {
1125     LoadRoot(r0, index);
1126     Push(r0);
1127   }
1128 
1129   // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,Heap::RootListIndex index,Label * if_equal)1130   void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
1131     CompareRoot(with, index);
1132     beq(if_equal);
1133   }
1134 
1135   // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,Heap::RootListIndex index,Label * if_not_equal)1136   void JumpIfNotRoot(Register with, Heap::RootListIndex index,
1137                      Label* if_not_equal) {
1138     CompareRoot(with, index);
1139     bne(if_not_equal);
1140   }
1141 
1142   // Load and check the instance type of an object for being a string.
1143   // Loads the type into the second argument register.
1144   // Returns a condition that will be enabled if the object was a string.
IsObjectStringType(Register obj,Register type)1145   Condition IsObjectStringType(Register obj, Register type) {
1146     LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1147     LoadlB(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1148     mov(r0, Operand(kIsNotStringMask));
1149     AndP(r0, type);
1150     DCHECK_EQ(0u, kStringTag);
1151     return eq;
1152   }
1153 
1154   // Picks out an array index from the hash field.
1155   // Register use:
1156   //   hash - holds the index's hash. Clobbered.
1157   //   index - holds the overwritten index on exit.
1158   void IndexFromHash(Register hash, Register index);
1159 
1160   // Get the number of least significant bits from a register
1161   void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1162   void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1163 
1164   // Load the value of a smi object into a FP double register. The register
1165   // scratch1 can be the same register as smi in which case smi will hold the
1166   // untagged value afterwards.
1167   void SmiToDouble(DoubleRegister value, Register smi);
1168 
1169   // Check if a double can be exactly represented as a signed 32-bit integer.
1170   // CR_EQ in cr7 is set if true.
1171   void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
1172                          Register scratch2, DoubleRegister double_scratch);
1173 
1174   // Check if a double is equal to -0.0.
1175   // CR_EQ in cr7 holds the result.
1176   void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
1177                              Register scratch2);
1178 
1179   // Check the sign of a double.
1180   // CR_LT in cr7 holds the result.
1181   void TestDoubleSign(DoubleRegister input, Register scratch);
1182   void TestHeapNumberSign(Register input, Register scratch);
1183 
1184   // Try to convert a double to a signed 32-bit integer.
1185   // CR_EQ in cr7 is set and result assigned if the conversion is exact.
1186   void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
1187                              Register scratch, DoubleRegister double_scratch);
1188 
1189   // Floor a double and writes the value to the result register.
1190   // Go to exact if the conversion is exact (to be able to test -0),
1191   // fall through calling code if an overflow occurred, else go to done.
1192   // In return, input_high is loaded with high bits of input.
1193   void TryInt32Floor(Register result, DoubleRegister double_input,
1194                      Register input_high, Register scratch,
1195                      DoubleRegister double_scratch, Label* done, Label* exact);
1196 
1197   // Performs a truncating conversion of a floating point number as used by
1198   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
1199   // succeeds, otherwise falls through if result is saturated. On return
1200   // 'result' either holds answer, or is clobbered on fall through.
1201   //
1202   // Only public for the test code in test-code-stubs-arm.cc.
1203   void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
1204                                   Label* done);
1205 
1206   // Performs a truncating conversion of a floating point number as used by
1207   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1208   // Exits with 'result' holding the answer.
1209   void TruncateDoubleToI(Register result, DoubleRegister double_input);
1210 
1211   // Performs a truncating conversion of a heap number as used by
1212   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1213   // must be different registers.  Exits with 'result' holding the answer.
1214   void TruncateHeapNumberToI(Register result, Register object);
1215 
1216   // Converts the smi or heap number in object to an int32 using the rules
1217   // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1218   // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1219   // different registers.
1220   void TruncateNumberToI(Register object, Register result,
1221                          Register heap_number_map, Register scratch1,
1222                          Label* not_int32);
1223 
1224   // Overflow handling functions.
1225   // Usage: call the appropriate arithmetic function and then call one of the
1226   // flow control functions with the corresponding label.
1227 
1228   // Compute dst = left + right, setting condition codes. dst may be same as
1229   // either left or right (or a unique register). left and right must not be
1230   // the same register.
1231   void AddAndCheckForOverflow(Register dst, Register left, Register right,
1232                               Register overflow_dst, Register scratch = r0);
1233   void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
1234                               Register overflow_dst, Register scratch = r0);
1235 
1236   // Compute dst = left - right, setting condition codes. dst may be same as
1237   // either left or right (or a unique register). left and right must not be
1238   // the same register.
1239   void SubAndCheckForOverflow(Register dst, Register left, Register right,
1240                               Register overflow_dst, Register scratch = r0);
1241 
BranchOnOverflow(Label * label)1242   void BranchOnOverflow(Label* label) { blt(label /*, cr0*/); }
1243 
BranchOnNoOverflow(Label * label)1244   void BranchOnNoOverflow(Label* label) { bge(label /*, cr0*/); }
1245 
RetOnOverflow(void)1246   void RetOnOverflow(void) {
1247     Label label;
1248 
1249     blt(&label /*, cr0*/);
1250     Ret();
1251     bind(&label);
1252   }
1253 
RetOnNoOverflow(void)1254   void RetOnNoOverflow(void) {
1255     Label label;
1256 
1257     bge(&label /*, cr0*/);
1258     Ret();
1259     bind(&label);
1260   }
1261 
1262   // ---------------------------------------------------------------------------
1263   // Runtime calls
1264 
1265   // Call a code stub.
1266   void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
1267                 Condition cond = al);
1268 
1269   // Call a code stub.
1270   void TailCallStub(CodeStub* stub, Condition cond = al);
1271 
1272   // Call a runtime routine.
1273   void CallRuntime(const Runtime::Function* f, int num_arguments,
1274                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
CallRuntimeSaveDoubles(Runtime::FunctionId fid)1275   void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
1276     const Runtime::Function* function = Runtime::FunctionForId(fid);
1277     CallRuntime(function, function->nargs, kSaveFPRegs);
1278   }
1279 
1280   // Convenience function: Same as above, but takes the fid instead.
1281   void CallRuntime(Runtime::FunctionId fid,
1282                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1283     const Runtime::Function* function = Runtime::FunctionForId(fid);
1284     CallRuntime(function, function->nargs, save_doubles);
1285   }
1286 
1287   // Convenience function: Same as above, but takes the fid instead.
1288   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1289                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1290     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1291   }
1292 
1293   // Convenience function: call an external reference.
1294   void CallExternalReference(const ExternalReference& ext, int num_arguments);
1295 
1296   // Convenience function: tail call a runtime routine (jump).
1297   void TailCallRuntime(Runtime::FunctionId fid);
1298 
1299   int CalculateStackPassedWords(int num_reg_arguments,
1300                                 int num_double_arguments);
1301 
1302   // Before calling a C-function from generated code, align arguments on stack.
1303   // After aligning the frame, non-register arguments must be stored in
1304   // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1305   // are word sized. If double arguments are used, this function assumes that
1306   // all double arguments are stored before core registers; otherwise the
1307   // correct alignment of the double values is not guaranteed.
1308   // Some compilers/platforms require the stack to be aligned when calling
1309   // C++ code.
1310   // Needs a scratch register to do some arithmetic. This register will be
1311   // trashed.
1312   void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
1313                             Register scratch);
1314   void PrepareCallCFunction(int num_reg_arguments, Register scratch);
1315 
1316   // There are two ways of passing double arguments on ARM, depending on
1317   // whether soft or hard floating point ABI is used. These functions
1318   // abstract parameter passing for the three different ways we call
1319   // C functions from generated code.
1320   void MovToFloatParameter(DoubleRegister src);
1321   void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1322   void MovToFloatResult(DoubleRegister src);
1323 
1324   // Calls a C function and cleans up the space for arguments allocated
1325   // by PrepareCallCFunction. The called function is not allowed to trigger a
1326   // garbage collection, since that might move the code and invalidate the
1327   // return address (unless this is somehow accounted for by the called
1328   // function).
1329   void CallCFunction(ExternalReference function, int num_arguments);
1330   void CallCFunction(Register function, int num_arguments);
1331   void CallCFunction(ExternalReference function, int num_reg_arguments,
1332                      int num_double_arguments);
1333   void CallCFunction(Register function, int num_reg_arguments,
1334                      int num_double_arguments);
1335 
1336   void MovFromFloatParameter(DoubleRegister dst);
1337   void MovFromFloatResult(DoubleRegister dst);
1338 
1339   // Jump to a runtime routine.
1340   void JumpToExternalReference(const ExternalReference& builtin);
1341 
CodeObject()1342   Handle<Object> CodeObject() {
1343     DCHECK(!code_object_.is_null());
1344     return code_object_;
1345   }
1346 
1347   // Emit code for a truncating division by a constant. The dividend register is
1348   // unchanged and ip gets clobbered. Dividend and result must be different.
1349   void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1350 
1351   // ---------------------------------------------------------------------------
1352   // StatsCounter support
1353 
1354   void SetCounter(StatsCounter* counter, int value, Register scratch1,
1355                   Register scratch2);
1356   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1357                         Register scratch2);
1358   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1359                         Register scratch2);
1360 
1361   // ---------------------------------------------------------------------------
1362   // Debugging
1363 
1364   // Calls Abort(msg) if the condition cond is not satisfied.
1365   // Use --debug_code to enable.
1366   void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
1367   void AssertFastElements(Register elements);
1368 
1369   // Like Assert(), but always enabled.
1370   void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
1371 
1372   // Print a message to stdout and abort execution.
1373   void Abort(BailoutReason reason);
1374 
1375   // Verify restrictions about code generated in stubs.
set_generating_stub(bool value)1376   void set_generating_stub(bool value) { generating_stub_ = value; }
generating_stub()1377   bool generating_stub() { return generating_stub_; }
set_has_frame(bool value)1378   void set_has_frame(bool value) { has_frame_ = value; }
has_frame()1379   bool has_frame() { return has_frame_; }
1380   inline bool AllowThisStubCall(CodeStub* stub);
1381 
1382   // ---------------------------------------------------------------------------
1383   // Number utilities
1384 
1385   // Check whether the value of reg is a power of two and not zero. If not
1386   // control continues at the label not_power_of_two. If reg is a power of two
1387   // the register scratch contains the value of (reg - 1) when control falls
1388   // through.
1389   void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
1390                                  Label* not_power_of_two_or_zero);
1391   // Check whether the value of reg is a power of two and not zero.
1392   // Control falls through if it is, with scratch containing the mask
1393   // value (reg - 1).
1394   // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1395   // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1396   // strictly positive but not a power of two.
1397   void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
1398                                        Label* zero_and_neg,
1399                                        Label* not_power_of_two);
1400 
1401   // ---------------------------------------------------------------------------
1402   // Bit testing/extraction
1403   //
1404   // Bit numbering is such that the least significant bit is bit 0
1405   // (for consistency between 32/64-bit).
1406 
1407   // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
1408   // and place them into the least significant bits of dst.
ExtractBitRange(Register dst,Register src,int rangeStart,int rangeEnd)1409   inline void ExtractBitRange(Register dst, Register src, int rangeStart,
1410                               int rangeEnd) {
1411     DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
1412 
1413     // Try to use RISBG if possible.
1414     if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1415       int shiftAmount = (64 - rangeEnd) % 64;  // Convert to shift left.
1416       int endBit = 63;  // End is always LSB after shifting.
1417       int startBit = 63 - rangeStart + rangeEnd;
1418       risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
1419             true);
1420     } else {
1421       if (rangeEnd > 0)  // Don't need to shift if rangeEnd is zero.
1422         ShiftRightP(dst, src, Operand(rangeEnd));
1423       else if (!dst.is(src))  // If we didn't shift, we might need to copy
1424         LoadRR(dst, src);
1425       int width = rangeStart - rangeEnd + 1;
1426 #if V8_TARGET_ARCH_S390X
1427       uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
1428       nihf(dst, Operand(mask >> 32));
1429       nilf(dst, Operand(mask & 0xFFFFFFFF));
1430       ltgr(dst, dst);
1431 #else
1432       uint32_t mask = (1 << width) - 1;
1433       AndP(dst, Operand(mask));
1434 #endif
1435     }
1436   }
1437 
ExtractBit(Register dst,Register src,uint32_t bitNumber)1438   inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
1439     ExtractBitRange(dst, src, bitNumber, bitNumber);
1440   }
1441 
1442   // Extract consecutive bits (defined by mask) from src and place them
1443   // into the least significant bits of dst.
1444   inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
1445                              RCBit rc = LeaveRC) {
1446     int start = kBitsPerPointer - 1;
1447     int end;
1448     uintptr_t bit = (1L << start);
1449 
1450     while (bit && (mask & bit) == 0) {
1451       start--;
1452       bit >>= 1;
1453     }
1454     end = start;
1455     bit >>= 1;
1456 
1457     while (bit && (mask & bit)) {
1458       end--;
1459       bit >>= 1;
1460     }
1461 
1462     // 1-bits in mask must be contiguous
1463     DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1464 
1465     ExtractBitRange(dst, src, start, end);
1466   }
1467 
1468   // Test single bit in value.
1469   inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
1470     ExtractBitRange(scratch, value, bitNumber, bitNumber);
1471   }
1472 
1473   // Test consecutive bit range in value.  Range is defined by
1474   // rangeStart - rangeEnd.
1475   inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
1476                            Register scratch = r0) {
1477     ExtractBitRange(scratch, value, rangeStart, rangeEnd);
1478   }
1479 
1480   // Test consecutive bit range in value.  Range is defined by mask.
1481   inline void TestBitMask(Register value, uintptr_t mask,
1482                           Register scratch = r0) {
1483     ExtractBitMask(scratch, value, mask, SetRC);
1484   }
1485 
1486   // ---------------------------------------------------------------------------
1487   // Smi utilities
1488 
1489   // Shift left by kSmiShift
SmiTag(Register reg)1490   void SmiTag(Register reg) { SmiTag(reg, reg); }
SmiTag(Register dst,Register src)1491   void SmiTag(Register dst, Register src) {
1492     ShiftLeftP(dst, src, Operand(kSmiShift));
1493   }
1494 
1495 #if !V8_TARGET_ARCH_S390X
1496   // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1497   void SmiTagCheckOverflow(Register reg, Register overflow);
1498   void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1499 
JumpIfNotSmiCandidate(Register value,Register scratch,Label * not_smi_label)1500   inline void JumpIfNotSmiCandidate(Register value, Register scratch,
1501                                     Label* not_smi_label) {
1502     // High bits must be identical to fit into an Smi
1503     STATIC_ASSERT(kSmiShift == 1);
1504     AddP(scratch, value, Operand(0x40000000u));
1505     CmpP(scratch, Operand::Zero());
1506     blt(not_smi_label);
1507   }
1508 #endif
TestUnsignedSmiCandidate(Register value,Register scratch)1509   inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
1510     // The test is different for unsigned int values. Since we need
1511     // the value to be in the range of a positive smi, we can't
1512     // handle any of the high bits being set in the value.
1513     TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
1514                  scratch);
1515   }
JumpIfNotUnsignedSmiCandidate(Register value,Register scratch,Label * not_smi_label)1516   inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
1517                                             Label* not_smi_label) {
1518     TestUnsignedSmiCandidate(value, scratch);
1519     bne(not_smi_label /*, cr0*/);
1520   }
1521 
SmiUntag(Register reg)1522   void SmiUntag(Register reg) { SmiUntag(reg, reg); }
1523 
SmiUntag(Register dst,Register src)1524   void SmiUntag(Register dst, Register src) {
1525     ShiftRightArithP(dst, src, Operand(kSmiShift));
1526   }
1527 
SmiToPtrArrayOffset(Register dst,Register src)1528   void SmiToPtrArrayOffset(Register dst, Register src) {
1529 #if V8_TARGET_ARCH_S390X
1530     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
1531     ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
1532 #else
1533     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
1534     ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
1535 #endif
1536   }
1537 
SmiToByteArrayOffset(Register dst,Register src)1538   void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
1539 
SmiToShortArrayOffset(Register dst,Register src)1540   void SmiToShortArrayOffset(Register dst, Register src) {
1541 #if V8_TARGET_ARCH_S390X
1542     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
1543     ShiftRightArithP(dst, src, Operand(kSmiShift - 1));
1544 #else
1545     STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
1546     if (!dst.is(src)) {
1547       LoadRR(dst, src);
1548     }
1549 #endif
1550   }
1551 
SmiToIntArrayOffset(Register dst,Register src)1552   void SmiToIntArrayOffset(Register dst, Register src) {
1553 #if V8_TARGET_ARCH_S390X
1554     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
1555     ShiftRightArithP(dst, src, Operand(kSmiShift - 2));
1556 #else
1557     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
1558     ShiftLeftP(dst, src, Operand(2 - kSmiShift));
1559 #endif
1560   }
1561 
1562 #define SmiToFloatArrayOffset SmiToIntArrayOffset
1563 
SmiToDoubleArrayOffset(Register dst,Register src)1564   void SmiToDoubleArrayOffset(Register dst, Register src) {
1565 #if V8_TARGET_ARCH_S390X
1566     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
1567     ShiftRightArithP(dst, src, Operand(kSmiShift - kDoubleSizeLog2));
1568 #else
1569     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
1570     ShiftLeftP(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
1571 #endif
1572   }
1573 
SmiToArrayOffset(Register dst,Register src,int elementSizeLog2)1574   void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
1575     if (kSmiShift < elementSizeLog2) {
1576       ShiftLeftP(dst, src, Operand(elementSizeLog2 - kSmiShift));
1577     } else if (kSmiShift > elementSizeLog2) {
1578       ShiftRightArithP(dst, src, Operand(kSmiShift - elementSizeLog2));
1579     } else if (!dst.is(src)) {
1580       LoadRR(dst, src);
1581     }
1582   }
1583 
IndexToArrayOffset(Register dst,Register src,int elementSizeLog2,bool isSmi,bool keyMaybeNegative)1584   void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
1585                           bool isSmi, bool keyMaybeNegative) {
1586     if (isSmi) {
1587       SmiToArrayOffset(dst, src, elementSizeLog2);
1588     } else if (keyMaybeNegative ||
1589           !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1590 #if V8_TARGET_ARCH_S390X
1591       // If array access is dehoisted, the key, being an int32, can contain
1592       // a negative value, as needs to be sign-extended to 64-bit for
1593       // memory access.
1594       //
1595       // src (key) is a 32-bit integer.  Sign extension ensures
1596       // upper 32-bit does not contain garbage before being used to
1597       // reference memory.
1598       lgfr(src, src);
1599 #endif
1600       ShiftLeftP(dst, src, Operand(elementSizeLog2));
1601     } else {
1602       // Small optimization to reduce pathlength.  After Bounds Check,
1603       // the key is guaranteed to be non-negative.  Leverage RISBG,
1604       // which also performs zero-extension.
1605       risbg(dst, src, Operand(32 - elementSizeLog2),
1606             Operand(63 - elementSizeLog2), Operand(elementSizeLog2),
1607             true);
1608     }
1609   }
1610 
1611   // Untag the source value into destination and jump if source is a smi.
1612   // Souce and destination can be the same register.
1613   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1614 
1615   // Untag the source value into destination and jump if source is not a smi.
1616   // Souce and destination can be the same register.
1617   void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1618 
TestIfSmi(Register value)1619   inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
1620 
TestIfPositiveSmi(Register value,Register scratch)1621   inline void TestIfPositiveSmi(Register value, Register scratch) {
1622     STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
1623                   (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
1624     mov(scratch, Operand(kIntptrSignBit | kSmiTagMask));
1625     AndP(scratch, value);
1626   }
1627 
1628   // Jump the register contains a smi.
JumpIfSmi(Register value,Label * smi_label)1629   inline void JumpIfSmi(Register value, Label* smi_label) {
1630     TestIfSmi(value);
1631     beq(smi_label /*, cr0*/);  // branch if SMI
1632   }
1633   // Jump if either of the registers contain a non-smi.
JumpIfNotSmi(Register value,Label * not_smi_label)1634   inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1635     TestIfSmi(value);
1636     bne(not_smi_label /*, cr0*/);
1637   }
1638   // Jump if either of the registers contain a non-smi.
1639   void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1640   // Jump if either of the registers contain a smi.
1641   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1642 
1643   // Abort execution if argument is a number, enabled via --debug-code.
1644   void AssertNotNumber(Register object);
1645 
1646   // Abort execution if argument is a smi, enabled via --debug-code.
1647   void AssertNotSmi(Register object);
1648   void AssertSmi(Register object);
1649 
1650 #if V8_TARGET_ARCH_S390X
TestIfInt32(Register value,Register scratch)1651   inline void TestIfInt32(Register value, Register scratch) {
1652     // High bits must be identical to fit into an 32-bit integer
1653     lgfr(scratch, value);
1654     CmpP(scratch, value);
1655   }
1656 #else
TestIfInt32(Register hi_word,Register lo_word,Register scratch)1657   inline void TestIfInt32(Register hi_word, Register lo_word,
1658                           Register scratch) {
1659     // High bits must be identical to fit into an 32-bit integer
1660     ShiftRightArith(scratch, lo_word, Operand(31));
1661     CmpP(scratch, hi_word);
1662   }
1663 #endif
1664 
1665 #if V8_TARGET_ARCH_S390X
1666   // Ensure it is permissable to read/write int value directly from
1667   // upper half of the smi.
1668   STATIC_ASSERT(kSmiTag == 0);
1669   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
1670 #endif
1671 #if V8_TARGET_LITTLE_ENDIAN
1672 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
1673 #else
1674 #define SmiWordOffset(offset) offset
1675 #endif
1676 
1677   // Abort execution if argument is not a string, enabled via --debug-code.
1678   void AssertString(Register object);
1679 
1680   // Abort execution if argument is not a name, enabled via --debug-code.
1681   void AssertName(Register object);
1682 
1683   void AssertFunction(Register object);
1684 
1685   // Abort execution if argument is not a JSBoundFunction,
1686   // enabled via --debug-code.
1687   void AssertBoundFunction(Register object);
1688 
1689   // Abort execution if argument is not a JSGeneratorObject,
1690   // enabled via --debug-code.
1691   void AssertGeneratorObject(Register object);
1692 
1693   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
1694   void AssertReceiver(Register object);
1695 
1696   // Abort execution if argument is not undefined or an AllocationSite, enabled
1697   // via --debug-code.
1698   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1699 
1700   // Abort execution if reg is not the root value with the given index,
1701   // enabled via --debug-code.
1702   void AssertIsRoot(Register reg, Heap::RootListIndex index);
1703 
1704   // ---------------------------------------------------------------------------
1705   // HeapNumber utilities
1706 
1707   void JumpIfNotHeapNumber(Register object, Register heap_number_map,
1708                            Register scratch, Label* on_not_heap_number);
1709 
1710   // ---------------------------------------------------------------------------
1711   // String utilities
1712 
1713   // Checks if both objects are sequential one-byte strings and jumps to label
1714   // if either is not. Assumes that neither object is a smi.
1715   void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1716                                                     Register object2,
1717                                                     Register scratch1,
1718                                                     Register scratch2,
1719                                                     Label* failure);
1720 
1721   // Checks if both objects are sequential one-byte strings and jumps to label
1722   // if either is not.
1723   void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1724                                              Register scratch1,
1725                                              Register scratch2,
1726                                              Label* not_flat_one_byte_strings);
1727 
1728   // Checks if both instance types are sequential one-byte strings and jumps to
1729   // label if either is not.
1730   void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1731       Register first_object_instance_type, Register second_object_instance_type,
1732       Register scratch1, Register scratch2, Label* failure);
1733 
1734   // Check if instance type is sequential one-byte string and jump to label if
1735   // it is not.
1736   void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1737                                                 Label* failure);
1738 
1739   void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1740 
1741   void EmitSeqStringSetCharCheck(Register string, Register index,
1742                                  Register value, uint32_t encoding_mask);
1743 
1744   // ---------------------------------------------------------------------------
1745   // Patching helpers.
1746 
1747   void ClampUint8(Register output_reg, Register input_reg);
1748 
1749   // Saturate a value into 8-bit unsigned integer
1750   //   if input_value < 0, output_value is 0
1751   //   if input_value > 255, output_value is 255
1752   //   otherwise output_value is the (int)input_value (round to nearest)
1753   void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
1754                           DoubleRegister temp_double_reg);
1755 
1756   void LoadInstanceDescriptors(Register map, Register descriptors);
1757   void EnumLength(Register dst, Register map);
1758   void NumberOfOwnDescriptors(Register dst, Register map);
1759   void LoadAccessor(Register dst, Register holder, int accessor_index,
1760                     AccessorComponent accessor);
1761 
1762   template <typename Field>
DecodeField(Register dst,Register src)1763   void DecodeField(Register dst, Register src) {
1764     ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
1765   }
1766 
1767   template <typename Field>
DecodeField(Register reg)1768   void DecodeField(Register reg) {
1769     DecodeField<Field>(reg, reg);
1770   }
1771 
1772   template <typename Field>
DecodeFieldToSmi(Register dst,Register src)1773   void DecodeFieldToSmi(Register dst, Register src) {
1774     // TODO(joransiu): Optimize into single instruction
1775     DecodeField<Field>(dst, src);
1776     SmiTag(dst);
1777   }
1778 
1779   template <typename Field>
DecodeFieldToSmi(Register reg)1780   void DecodeFieldToSmi(Register reg) {
1781     DecodeFieldToSmi<Field>(reg, reg);
1782   }
1783 
1784   // Load the type feedback vector from a JavaScript frame.
1785   void EmitLoadTypeFeedbackVector(Register vector);
1786 
1787   // Activation support.
1788   void EnterFrame(StackFrame::Type type,
1789                   bool load_constant_pool_pointer_reg = false);
1790   // Returns the pc offset at which the frame ends.
1791   int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
1792 
1793   // Expects object in r2 and returns map with validated enum cache
1794   // in r2.  Assumes that any other register can be used as a scratch.
1795   void CheckEnumCache(Label* call_runtime);
1796 
1797   // AllocationMemento support. Arrays may have an associated
1798   // AllocationMemento object that can be checked for in order to pretransition
1799   // to another type.
1800   // On entry, receiver_reg should point to the array object.
1801   // scratch_reg gets clobbered.
1802   // If allocation info is present, condition flags are set to eq.
1803   void TestJSArrayForAllocationMemento(Register receiver_reg,
1804                                        Register scratch_reg,
1805                                        Register scratch2_reg,
1806                                        Label* no_memento_found);
1807 
JumpIfJSArrayHasAllocationMemento(Register receiver_reg,Register scratch_reg,Register scratch2_reg,Label * memento_found)1808   void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1809                                          Register scratch_reg,
1810                                          Register scratch2_reg,
1811                                          Label* memento_found) {
1812     Label no_memento_found;
1813     TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
1814                                     &no_memento_found);
1815     beq(memento_found);
1816     bind(&no_memento_found);
1817   }
1818 
1819   // Jumps to found label if a prototype map has dictionary elements.
1820   void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1821                                         Register scratch1, Label* found);
1822 
1823  private:
1824   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1825 
1826   void CallCFunctionHelper(Register function, int num_reg_arguments,
1827                            int num_double_arguments);
1828 
1829   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
1830             CRegister cr = cr7);
1831 
1832   // Helper functions for generating invokes.
1833   void InvokePrologue(const ParameterCount& expected,
1834                       const ParameterCount& actual, Label* done,
1835                       bool* definitely_mismatches, InvokeFlag flag,
1836                       const CallWrapper& call_wrapper);
1837 
1838   void InitializeNewString(Register string, Register length,
1839                            Heap::RootListIndex map_index, Register scratch1,
1840                            Register scratch2);
1841 
1842   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1843   void InNewSpace(Register object, Register scratch,
1844                   Condition cond,  // eq for new space, ne otherwise.
1845                   Label* branch);
1846 
1847   // Helper for finding the mark bits for an address.  Afterwards, the
1848   // bitmap register points at the word with the mark bits and the mask
1849   // the position of the first bit.  Leaves addr_reg unchanged.
1850   inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
1851                           Register mask_reg);
1852 
1853   static const RegList kSafepointSavedRegisters;
1854   static const int kNumSafepointSavedRegisters;
1855 
1856   // Compute memory operands for safepoint stack slots.
1857   static int SafepointRegisterStackIndex(int reg_code);
1858   MemOperand SafepointRegisterSlot(Register reg);
1859   MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1860 
1861   bool generating_stub_;
1862   bool has_frame_;
1863   // This handle will be patched with the code object on installation.
1864   Handle<Object> code_object_;
1865 
1866   // Needs access to SafepointRegisterStackIndex for compiled frame
1867   // traversal.
1868   friend class StandardFrame;
1869 };
1870 
1871 // The code patcher is used to patch (typically) small parts of code e.g. for
1872 // debugging and other types of instrumentation. When using the code patcher
1873 // the exact number of bytes specified must be emitted. It is not legal to emit
1874 // relocation information. If any of these constraints are violated it causes
1875 // an assertion to fail.
1876 class CodePatcher {
1877  public:
1878   enum FlushICache { FLUSH, DONT_FLUSH };
1879 
1880   CodePatcher(Isolate* isolate, byte* address, int instructions,
1881               FlushICache flush_cache = FLUSH);
1882   ~CodePatcher();
1883 
1884   // Macro assembler to emit code.
masm()1885   MacroAssembler* masm() { return &masm_; }
1886 
1887  private:
1888   byte* address_;            // The address of the code being patched.
1889   int size_;                 // Number of bytes of the expected patch size.
1890   MacroAssembler masm_;      // Macro assembler used to generate the code.
1891   FlushICache flush_cache_;  // Whether to flush the I cache after patching.
1892 };
1893 
1894 // -----------------------------------------------------------------------------
1895 // Static helper functions.
1896 
1897 inline MemOperand ContextMemOperand(Register context, int index = 0) {
1898   return MemOperand(context, Context::SlotOffset(index));
1899 }
1900 
NativeContextMemOperand()1901 inline MemOperand NativeContextMemOperand() {
1902   return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
1903 }
1904 
1905 #ifdef GENERATED_CODE_COVERAGE
1906 #define CODE_COVERAGE_STRINGIFY(x) #x
1907 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1908 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1909 #define ACCESS_MASM(masm)    \
1910   masm->stop(__FILE_LINE__); \
1911   masm->
1912 #else
1913 #define ACCESS_MASM(masm) masm->
1914 #endif
1915 }  // namespace internal
1916 }  // namespace v8
1917 
1918 #endif  // V8_S390_MACRO_ASSEMBLER_S390_H_
1919