• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <assert.h>  // For assert
6 #include <limits.h>  // For LONG_MIN, LONG_MAX.
7 
8 #if V8_TARGET_ARCH_S390
9 
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/debug/debug.h"
15 #include "src/register-configuration.h"
16 #include "src/runtime/runtime.h"
17 
18 #include "src/s390/macro-assembler-s390.h"
19 
20 namespace v8 {
21 namespace internal {
22 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
24                                CodeObjectRequired create_code_object)
25     : Assembler(arg_isolate, buffer, size),
26       generating_stub_(false),
27       has_frame_(false) {
28   if (create_code_object == CodeObjectRequired::kYes) {
29     code_object_ =
30         Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
31   }
32 }
33 
Jump(Register target)34 void MacroAssembler::Jump(Register target) { b(target); }
35 
JumpToJSEntry(Register target)36 void MacroAssembler::JumpToJSEntry(Register target) {
37   Move(ip, target);
38   Jump(ip);
39 }
40 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,CRegister)41 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
42                           Condition cond, CRegister) {
43   Label skip;
44 
45   if (cond != al) b(NegateCondition(cond), &skip);
46 
47   DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
48 
49   mov(ip, Operand(target, rmode));
50   b(ip);
51 
52   bind(&skip);
53 }
54 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,CRegister cr)55 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
56                           CRegister cr) {
57   DCHECK(!RelocInfo::IsCodeTarget(rmode));
58   Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
59 }
60 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)61 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
62                           Condition cond) {
63   DCHECK(RelocInfo::IsCodeTarget(rmode));
64   jump(code, rmode, cond);
65 }
66 
CallSize(Register target)67 int MacroAssembler::CallSize(Register target) { return 2; }  // BASR
68 
Call(Register target)69 void MacroAssembler::Call(Register target) {
70   Label start;
71   bind(&start);
72 
73   // Branch to target via indirect branch
74   basr(r14, target);
75 
76   DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
77 }
78 
CallJSEntry(Register target)79 void MacroAssembler::CallJSEntry(Register target) {
80   DCHECK(target.is(ip));
81   Call(target);
82 }
83 
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)84 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
85                              Condition cond) {
86   // S390 Assembler::move sequence is IILF / IIHF
87   int size;
88 #if V8_TARGET_ARCH_S390X
89   size = 14;  // IILF + IIHF + BASR
90 #else
91   size = 8;  // IILF + BASR
92 #endif
93   return size;
94 }
95 
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)96 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
97                                                    RelocInfo::Mode rmode,
98                                                    Condition cond) {
99   // S390 Assembler::move sequence is IILF / IIHF
100   int size;
101 #if V8_TARGET_ARCH_S390X
102   size = 14;  // IILF + IIHF + BASR
103 #else
104   size = 8;  // IILF + BASR
105 #endif
106   return size;
107 }
108 
Call(Address target,RelocInfo::Mode rmode,Condition cond)109 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
110                           Condition cond) {
111   DCHECK(cond == al);
112 
113 #ifdef DEBUG
114   // Check the expected size before generating code to ensure we assume the same
115   // constant pool availability (e.g., whether constant pool is full or not).
116   int expected_size = CallSize(target, rmode, cond);
117   Label start;
118   bind(&start);
119 #endif
120 
121   mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
122   basr(r14, ip);
123 
124   DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
125 }
126 
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)127 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
128                              TypeFeedbackId ast_id, Condition cond) {
129   return 6;  // BRASL
130 }
131 
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)132 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
133                           TypeFeedbackId ast_id, Condition cond) {
134   DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
135 
136 #ifdef DEBUG
137   // Check the expected size before generating code to ensure we assume the same
138   // constant pool availability (e.g., whether constant pool is full or not).
139   int expected_size = CallSize(code, rmode, ast_id, cond);
140   Label start;
141   bind(&start);
142 #endif
143   call(code, rmode, ast_id);
144   DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
145 }
146 
Drop(int count)147 void MacroAssembler::Drop(int count) {
148   if (count > 0) {
149     int total = count * kPointerSize;
150     if (is_uint12(total)) {
151       la(sp, MemOperand(sp, total));
152     } else if (is_int20(total)) {
153       lay(sp, MemOperand(sp, total));
154     } else {
155       AddP(sp, Operand(total));
156     }
157   }
158 }
159 
Drop(Register count,Register scratch)160 void MacroAssembler::Drop(Register count, Register scratch) {
161   ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
162   AddP(sp, sp, scratch);
163 }
164 
Call(Label * target)165 void MacroAssembler::Call(Label* target) { b(r14, target); }
166 
Push(Handle<Object> handle)167 void MacroAssembler::Push(Handle<Object> handle) {
168   mov(r0, Operand(handle));
169   push(r0);
170 }
171 
Move(Register dst,Handle<Object> value)172 void MacroAssembler::Move(Register dst, Handle<Object> value) {
173   AllowDeferredHandleDereference smi_check;
174   if (value->IsSmi()) {
175     LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
176   } else {
177     DCHECK(value->IsHeapObject());
178     if (isolate()->heap()->InNewSpace(*value)) {
179       Handle<Cell> cell = isolate()->factory()->NewCell(value);
180       mov(dst, Operand(cell));
181       LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
182     } else {
183       mov(dst, Operand(value));
184     }
185   }
186 }
187 
Move(Register dst,Register src,Condition cond)188 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
189   if (!dst.is(src)) {
190     LoadRR(dst, src);
191   }
192 }
193 
Move(DoubleRegister dst,DoubleRegister src)194 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
195   if (!dst.is(src)) {
196     ldr(dst, src);
197   }
198 }
199 
MultiPush(RegList regs,Register location)200 void MacroAssembler::MultiPush(RegList regs, Register location) {
201   int16_t num_to_push = NumberOfBitsSet(regs);
202   int16_t stack_offset = num_to_push * kPointerSize;
203 
204   SubP(location, location, Operand(stack_offset));
205   for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
206     if ((regs & (1 << i)) != 0) {
207       stack_offset -= kPointerSize;
208       StoreP(ToRegister(i), MemOperand(location, stack_offset));
209     }
210   }
211 }
212 
MultiPop(RegList regs,Register location)213 void MacroAssembler::MultiPop(RegList regs, Register location) {
214   int16_t stack_offset = 0;
215 
216   for (int16_t i = 0; i < Register::kNumRegisters; i++) {
217     if ((regs & (1 << i)) != 0) {
218       LoadP(ToRegister(i), MemOperand(location, stack_offset));
219       stack_offset += kPointerSize;
220     }
221   }
222   AddP(location, location, Operand(stack_offset));
223 }
224 
MultiPushDoubles(RegList dregs,Register location)225 void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
226   int16_t num_to_push = NumberOfBitsSet(dregs);
227   int16_t stack_offset = num_to_push * kDoubleSize;
228 
229   SubP(location, location, Operand(stack_offset));
230   for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
231     if ((dregs & (1 << i)) != 0) {
232       DoubleRegister dreg = DoubleRegister::from_code(i);
233       stack_offset -= kDoubleSize;
234       StoreDouble(dreg, MemOperand(location, stack_offset));
235     }
236   }
237 }
238 
MultiPopDoubles(RegList dregs,Register location)239 void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
240   int16_t stack_offset = 0;
241 
242   for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
243     if ((dregs & (1 << i)) != 0) {
244       DoubleRegister dreg = DoubleRegister::from_code(i);
245       LoadDouble(dreg, MemOperand(location, stack_offset));
246       stack_offset += kDoubleSize;
247     }
248   }
249   AddP(location, location, Operand(stack_offset));
250 }
251 
LoadRoot(Register destination,Heap::RootListIndex index,Condition)252 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
253                               Condition) {
254   LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
255 }
256 
StoreRoot(Register source,Heap::RootListIndex index,Condition)257 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
258                                Condition) {
259   DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
260   StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2));
261 }
262 
InNewSpace(Register object,Register scratch,Condition cond,Label * branch)263 void MacroAssembler::InNewSpace(Register object, Register scratch,
264                                 Condition cond, Label* branch) {
265   DCHECK(cond == eq || cond == ne);
266   // TODO(joransiu): check if we can merge mov Operand into AndP.
267   const int mask =
268       (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
269   CheckPageFlag(object, scratch, mask, cond, branch);
270 }
271 
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)272 void MacroAssembler::RecordWriteField(
273     Register object, int offset, Register value, Register dst,
274     LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
275     RememberedSetAction remembered_set_action, SmiCheck smi_check,
276     PointersToHereCheck pointers_to_here_check_for_value) {
277   // First, check if a write barrier is even needed. The tests below
278   // catch stores of Smis.
279   Label done;
280 
281   // Skip barrier if writing a smi.
282   if (smi_check == INLINE_SMI_CHECK) {
283     JumpIfSmi(value, &done);
284   }
285 
286   // Although the object register is tagged, the offset is relative to the start
287   // of the object, so so offset must be a multiple of kPointerSize.
288   DCHECK(IsAligned(offset, kPointerSize));
289 
290   lay(dst, MemOperand(object, offset - kHeapObjectTag));
291   if (emit_debug_code()) {
292     Label ok;
293     AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
294     beq(&ok, Label::kNear);
295     stop("Unaligned cell in write barrier");
296     bind(&ok);
297   }
298 
299   RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
300               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
301 
302   bind(&done);
303 
304   // Clobber clobbered input registers when running with the debug-code flag
305   // turned on to provoke errors.
306   if (emit_debug_code()) {
307     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
308     mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
309   }
310 }
311 
312 // Will clobber 4 registers: object, map, dst, ip.  The
313 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)314 void MacroAssembler::RecordWriteForMap(Register object, Register map,
315                                        Register dst,
316                                        LinkRegisterStatus lr_status,
317                                        SaveFPRegsMode fp_mode) {
318   if (emit_debug_code()) {
319     LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
320     CmpP(dst, Operand(isolate()->factory()->meta_map()));
321     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
322   }
323 
324   if (!FLAG_incremental_marking) {
325     return;
326   }
327 
328   if (emit_debug_code()) {
329     CmpP(map, FieldMemOperand(object, HeapObject::kMapOffset));
330     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
331   }
332 
333   Label done;
334 
335   // A single check of the map's pages interesting flag suffices, since it is
336   // only set during incremental collection, and then it's also guaranteed that
337   // the from object's page's interesting flag is also set.  This optimization
338   // relies on the fact that maps can never be in new space.
339   CheckPageFlag(map,
340                 map,  // Used as scratch.
341                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
342 
343   lay(dst, MemOperand(object, HeapObject::kMapOffset - kHeapObjectTag));
344   if (emit_debug_code()) {
345     Label ok;
346     AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
347     beq(&ok, Label::kNear);
348     stop("Unaligned cell in write barrier");
349     bind(&ok);
350   }
351 
352   // Record the actual write.
353   if (lr_status == kLRHasNotBeenSaved) {
354     push(r14);
355   }
356   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
357                        fp_mode);
358   CallStub(&stub);
359   if (lr_status == kLRHasNotBeenSaved) {
360     pop(r14);
361   }
362 
363   bind(&done);
364 
365   // Count number of write barriers in generated code.
366   isolate()->counters()->write_barriers_static()->Increment();
367   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
368 
369   // Clobber clobbered registers when running with the debug-code flag
370   // turned on to provoke errors.
371   if (emit_debug_code()) {
372     mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
373     mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
374   }
375 }
376 
377 // Will clobber 4 registers: object, address, scratch, ip.  The
378 // register 'object' contains a heap object pointer.  The heap object
379 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)380 void MacroAssembler::RecordWrite(
381     Register object, Register address, Register value,
382     LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
383     RememberedSetAction remembered_set_action, SmiCheck smi_check,
384     PointersToHereCheck pointers_to_here_check_for_value) {
385   DCHECK(!object.is(value));
386   if (emit_debug_code()) {
387     CmpP(value, MemOperand(address));
388     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
389   }
390 
391   if (remembered_set_action == OMIT_REMEMBERED_SET &&
392       !FLAG_incremental_marking) {
393     return;
394   }
395   // First, check if a write barrier is even needed. The tests below
396   // catch stores of smis and stores into the young generation.
397   Label done;
398 
399   if (smi_check == INLINE_SMI_CHECK) {
400     JumpIfSmi(value, &done);
401   }
402 
403   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
404     CheckPageFlag(value,
405                   value,  // Used as scratch.
406                   MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
407   }
408   CheckPageFlag(object,
409                 value,  // Used as scratch.
410                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
411 
412   // Record the actual write.
413   if (lr_status == kLRHasNotBeenSaved) {
414     push(r14);
415   }
416   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
417                        fp_mode);
418   CallStub(&stub);
419   if (lr_status == kLRHasNotBeenSaved) {
420     pop(r14);
421   }
422 
423   bind(&done);
424 
425   // Count number of write barriers in generated code.
426   isolate()->counters()->write_barriers_static()->Increment();
427   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
428                    value);
429 
430   // Clobber clobbered registers when running with the debug-code flag
431   // turned on to provoke errors.
432   if (emit_debug_code()) {
433     mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
434     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
435   }
436 }
437 
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)438 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
439                                                Register code_entry,
440                                                Register scratch) {
441   const int offset = JSFunction::kCodeEntryOffset;
442 
443   // Since a code entry (value) is always in old space, we don't need to update
444   // remembered set. If incremental marking is off, there is nothing for us to
445   // do.
446   if (!FLAG_incremental_marking) return;
447 
448   DCHECK(js_function.is(r3));
449   DCHECK(code_entry.is(r6));
450   DCHECK(scratch.is(r7));
451   AssertNotSmi(js_function);
452 
453   if (emit_debug_code()) {
454     AddP(scratch, js_function, Operand(offset - kHeapObjectTag));
455     LoadP(ip, MemOperand(scratch));
456     CmpP(ip, code_entry);
457     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
458   }
459 
460   // First, check if a write barrier is even needed. The tests below
461   // catch stores of Smis and stores into young gen.
462   Label done;
463 
464   CheckPageFlag(code_entry, scratch,
465                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
466   CheckPageFlag(js_function, scratch,
467                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
468 
469   const Register dst = scratch;
470   AddP(dst, js_function, Operand(offset - kHeapObjectTag));
471 
472   // Save caller-saved registers.  js_function and code_entry are in the
473   // caller-saved register list.
474   DCHECK(kJSCallerSaved & js_function.bit());
475   // DCHECK(kJSCallerSaved & code_entry.bit());
476   MultiPush(kJSCallerSaved | code_entry.bit() | r14.bit());
477 
478   int argument_count = 3;
479   PrepareCallCFunction(argument_count, code_entry);
480 
481   LoadRR(r2, js_function);
482   LoadRR(r3, dst);
483   mov(r4, Operand(ExternalReference::isolate_address(isolate())));
484 
485   {
486     AllowExternalCallThatCantCauseGC scope(this);
487     CallCFunction(
488         ExternalReference::incremental_marking_record_write_code_entry_function(
489             isolate()),
490         argument_count);
491   }
492 
493   // Restore caller-saved registers (including js_function and code_entry).
494   MultiPop(kJSCallerSaved | code_entry.bit() | r14.bit());
495 
496   bind(&done);
497 }
498 
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)499 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
500                                          Register address, Register scratch,
501                                          SaveFPRegsMode fp_mode,
502                                          RememberedSetFinalAction and_then) {
503   Label done;
504   if (emit_debug_code()) {
505     Label ok;
506     JumpIfNotInNewSpace(object, scratch, &ok);
507     stop("Remembered set pointer is in new space");
508     bind(&ok);
509   }
510   // Load store buffer top.
511   ExternalReference store_buffer =
512       ExternalReference::store_buffer_top(isolate());
513   mov(ip, Operand(store_buffer));
514   LoadP(scratch, MemOperand(ip));
515   // Store pointer to buffer and increment buffer top.
516   StoreP(address, MemOperand(scratch));
517   AddP(scratch, Operand(kPointerSize));
518   // Write back new top of buffer.
519   StoreP(scratch, MemOperand(ip));
520   // Call stub on end of buffer.
521   // Check for end of buffer.
522   AndP(scratch, Operand(StoreBuffer::kStoreBufferMask));
523 
524   if (and_then == kFallThroughAtEnd) {
525     bne(&done, Label::kNear);
526   } else {
527     DCHECK(and_then == kReturnAtEnd);
528     bne(&done, Label::kNear);
529   }
530   push(r14);
531   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
532   CallStub(&store_buffer_overflow);
533   pop(r14);
534   bind(&done);
535   if (and_then == kReturnAtEnd) {
536     Ret();
537   }
538 }
539 
PushCommonFrame(Register marker_reg)540 void MacroAssembler::PushCommonFrame(Register marker_reg) {
541   int fp_delta = 0;
542   CleanseP(r14);
543   if (marker_reg.is_valid()) {
544     Push(r14, fp, marker_reg);
545     fp_delta = 1;
546   } else {
547     Push(r14, fp);
548     fp_delta = 0;
549   }
550   la(fp, MemOperand(sp, fp_delta * kPointerSize));
551 }
552 
PopCommonFrame(Register marker_reg)553 void MacroAssembler::PopCommonFrame(Register marker_reg) {
554   if (marker_reg.is_valid()) {
555     Pop(r14, fp, marker_reg);
556   } else {
557     Pop(r14, fp);
558   }
559 }
560 
PushStandardFrame(Register function_reg)561 void MacroAssembler::PushStandardFrame(Register function_reg) {
562   int fp_delta = 0;
563   CleanseP(r14);
564   if (function_reg.is_valid()) {
565     Push(r14, fp, cp, function_reg);
566     fp_delta = 2;
567   } else {
568     Push(r14, fp, cp);
569     fp_delta = 1;
570   }
571   la(fp, MemOperand(sp, fp_delta * kPointerSize));
572 }
573 
RestoreFrameStateForTailCall()574 void MacroAssembler::RestoreFrameStateForTailCall() {
575   // if (FLAG_enable_embedded_constant_pool) {
576   //   LoadP(kConstantPoolRegister,
577   //         MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
578   //   set_constant_pool_available(false);
579   // }
580   DCHECK(!FLAG_enable_embedded_constant_pool);
581   LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
582   LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
583 }
584 
585 const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
586 const int MacroAssembler::kNumSafepointSavedRegisters =
587     Register::kNumAllocatable;
588 
589 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()590 void MacroAssembler::PushSafepointRegisters() {
591   // Safepoints expect a block of kNumSafepointRegisters values on the
592   // stack, so adjust the stack for unsaved registers.
593   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
594   DCHECK(num_unsaved >= 0);
595   if (num_unsaved > 0) {
596     lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize)));
597   }
598   MultiPush(kSafepointSavedRegisters);
599 }
600 
PopSafepointRegisters()601 void MacroAssembler::PopSafepointRegisters() {
602   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
603   MultiPop(kSafepointSavedRegisters);
604   if (num_unsaved > 0) {
605     la(sp, MemOperand(sp, num_unsaved * kPointerSize));
606   }
607 }
608 
StoreToSafepointRegisterSlot(Register src,Register dst)609 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
610   StoreP(src, SafepointRegisterSlot(dst));
611 }
612 
LoadFromSafepointRegisterSlot(Register dst,Register src)613 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
614   LoadP(dst, SafepointRegisterSlot(src));
615 }
616 
SafepointRegisterStackIndex(int reg_code)617 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
618   // The registers are pushed starting with the highest encoding,
619   // which means that lowest encodings are closest to the stack pointer.
620   RegList regs = kSafepointSavedRegisters;
621   int index = 0;
622 
623   DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
624 
625   for (int16_t i = 0; i < reg_code; i++) {
626     if ((regs & (1 << i)) != 0) {
627       index++;
628     }
629   }
630 
631   return index;
632 }
633 
SafepointRegisterSlot(Register reg)634 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
635   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
636 }
637 
SafepointRegistersAndDoublesSlot(Register reg)638 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
639   // General purpose registers are pushed last on the stack.
640   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
641   int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
642   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
643   return MemOperand(sp, doubles_size + register_offset);
644 }
645 
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)646 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
647                                      const DoubleRegister src) {
648   // Turn potential sNaN into qNaN
649   if (!dst.is(src)) ldr(dst, src);
650   lzdr(kDoubleRegZero);
651   sdbr(dst, kDoubleRegZero);
652 }
653 
ConvertIntToDouble(Register src,DoubleRegister dst)654 void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
655   cdfbr(dst, src);
656 }
657 
ConvertUnsignedIntToDouble(Register src,DoubleRegister dst)658 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
659                                                 DoubleRegister dst) {
660   if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
661     cdlfbr(Condition(5), Condition(0), dst, src);
662   } else {
663     // zero-extend src
664     llgfr(src, src);
665     // convert to double
666     cdgbr(dst, src);
667   }
668 }
669 
ConvertIntToFloat(Register src,DoubleRegister dst)670 void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
671   cefbr(dst, src);
672 }
673 
ConvertUnsignedIntToFloat(Register src,DoubleRegister dst)674 void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
675                                                DoubleRegister dst) {
676   celfbr(Condition(0), Condition(0), dst, src);
677 }
678 
679 #if V8_TARGET_ARCH_S390X
ConvertInt64ToDouble(Register src,DoubleRegister double_dst)680 void MacroAssembler::ConvertInt64ToDouble(Register src,
681                                           DoubleRegister double_dst) {
682   cdgbr(double_dst, src);
683 }
684 
ConvertUnsignedInt64ToFloat(Register src,DoubleRegister double_dst)685 void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
686                                                  DoubleRegister double_dst) {
687   celgbr(Condition(0), Condition(0), double_dst, src);
688 }
689 
ConvertUnsignedInt64ToDouble(Register src,DoubleRegister double_dst)690 void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
691                                                   DoubleRegister double_dst) {
692   cdlgbr(Condition(0), Condition(0), double_dst, src);
693 }
694 
ConvertInt64ToFloat(Register src,DoubleRegister double_dst)695 void MacroAssembler::ConvertInt64ToFloat(Register src,
696                                          DoubleRegister double_dst) {
697   cegbr(double_dst, src);
698 }
699 #endif
700 
ConvertFloat32ToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)701 void MacroAssembler::ConvertFloat32ToInt64(const DoubleRegister double_input,
702 #if !V8_TARGET_ARCH_S390X
703                                            const Register dst_hi,
704 #endif
705                                            const Register dst,
706                                            const DoubleRegister double_dst,
707                                            FPRoundingMode rounding_mode) {
708   Condition m = Condition(0);
709   switch (rounding_mode) {
710     case kRoundToZero:
711       m = Condition(5);
712       break;
713     case kRoundToNearest:
714       UNIMPLEMENTED();
715       break;
716     case kRoundToPlusInf:
717       m = Condition(6);
718       break;
719     case kRoundToMinusInf:
720       m = Condition(7);
721       break;
722     default:
723       UNIMPLEMENTED();
724       break;
725   }
726   cgebr(m, dst, double_input);
727   ldgr(double_dst, dst);
728 #if !V8_TARGET_ARCH_S390X
729   srlg(dst_hi, dst, Operand(32));
730 #endif
731 }
732 
ConvertDoubleToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)733 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
734 #if !V8_TARGET_ARCH_S390X
735                                           const Register dst_hi,
736 #endif
737                                           const Register dst,
738                                           const DoubleRegister double_dst,
739                                           FPRoundingMode rounding_mode) {
740   Condition m = Condition(0);
741   switch (rounding_mode) {
742     case kRoundToZero:
743       m = Condition(5);
744       break;
745     case kRoundToNearest:
746       UNIMPLEMENTED();
747       break;
748     case kRoundToPlusInf:
749       m = Condition(6);
750       break;
751     case kRoundToMinusInf:
752       m = Condition(7);
753       break;
754     default:
755       UNIMPLEMENTED();
756       break;
757   }
758   cgdbr(m, dst, double_input);
759   ldgr(double_dst, dst);
760 #if !V8_TARGET_ARCH_S390X
761   srlg(dst_hi, dst, Operand(32));
762 #endif
763 }
764 
ConvertFloat32ToInt32(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)765 void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input,
766                                            const Register dst,
767                                            const DoubleRegister double_dst,
768                                            FPRoundingMode rounding_mode) {
769   Condition m = Condition(0);
770   switch (rounding_mode) {
771     case kRoundToZero:
772       m = Condition(5);
773       break;
774     case kRoundToNearest:
775       UNIMPLEMENTED();
776       break;
777     case kRoundToPlusInf:
778       m = Condition(6);
779       break;
780     case kRoundToMinusInf:
781       m = Condition(7);
782       break;
783     default:
784       UNIMPLEMENTED();
785       break;
786   }
787   cfebr(m, dst, double_input);
788   ldgr(double_dst, dst);
789 }
790 
ConvertFloat32ToUnsignedInt32(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)791 void MacroAssembler::ConvertFloat32ToUnsignedInt32(
792     const DoubleRegister double_input, const Register dst,
793     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
794   Condition m = Condition(0);
795   switch (rounding_mode) {
796     case kRoundToZero:
797       m = Condition(5);
798       break;
799     case kRoundToNearest:
800       UNIMPLEMENTED();
801       break;
802     case kRoundToPlusInf:
803       m = Condition(6);
804       break;
805     case kRoundToMinusInf:
806       m = Condition(7);
807       break;
808     default:
809       UNIMPLEMENTED();
810       break;
811   }
812   clfebr(m, Condition(0), dst, double_input);
813   ldgr(double_dst, dst);
814 }
815 
816 #if V8_TARGET_ARCH_S390X
ConvertFloat32ToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)817 void MacroAssembler::ConvertFloat32ToUnsignedInt64(
818     const DoubleRegister double_input, const Register dst,
819     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
820   Condition m = Condition(0);
821   switch (rounding_mode) {
822     case kRoundToZero:
823       m = Condition(5);
824       break;
825     case kRoundToNearest:
826       UNIMPLEMENTED();
827       break;
828     case kRoundToPlusInf:
829       m = Condition(6);
830       break;
831     case kRoundToMinusInf:
832       m = Condition(7);
833       break;
834     default:
835       UNIMPLEMENTED();
836       break;
837   }
838   clgebr(m, Condition(0), dst, double_input);
839   ldgr(double_dst, dst);
840 }
841 
ConvertDoubleToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)842 void MacroAssembler::ConvertDoubleToUnsignedInt64(
843     const DoubleRegister double_input, const Register dst,
844     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
845   Condition m = Condition(0);
846   switch (rounding_mode) {
847     case kRoundToZero:
848       m = Condition(5);
849       break;
850     case kRoundToNearest:
851       UNIMPLEMENTED();
852       break;
853     case kRoundToPlusInf:
854       m = Condition(6);
855       break;
856     case kRoundToMinusInf:
857       m = Condition(7);
858       break;
859     default:
860       UNIMPLEMENTED();
861       break;
862   }
863   clgdbr(m, Condition(0), dst, double_input);
864   ldgr(double_dst, dst);
865 }
866 
867 #endif
868 
869 #if !V8_TARGET_ARCH_S390X
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)870 void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
871                                    Register src_low, Register src_high,
872                                    Register scratch, Register shift) {
873   LoadRR(r0, src_high);
874   LoadRR(r1, src_low);
875   sldl(r0, shift, Operand::Zero());
876   LoadRR(dst_high, r0);
877   LoadRR(dst_low, r1);
878 }
879 
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)880 void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
881                                    Register src_low, Register src_high,
882                                    uint32_t shift) {
883   LoadRR(r0, src_high);
884   LoadRR(r1, src_low);
885   sldl(r0, r0, Operand(shift));
886   LoadRR(dst_high, r0);
887   LoadRR(dst_low, r1);
888 }
889 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)890 void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
891                                     Register src_low, Register src_high,
892                                     Register scratch, Register shift) {
893   LoadRR(r0, src_high);
894   LoadRR(r1, src_low);
895   srdl(r0, shift, Operand::Zero());
896   LoadRR(dst_high, r0);
897   LoadRR(dst_low, r1);
898 }
899 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)900 void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
901                                     Register src_low, Register src_high,
902                                     uint32_t shift) {
903   LoadRR(r0, src_high);
904   LoadRR(r1, src_low);
905   srdl(r0, r0, Operand(shift));
906   LoadRR(dst_high, r0);
907   LoadRR(dst_low, r1);
908 }
909 
ShiftRightArithPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)910 void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
911                                          Register src_low, Register src_high,
912                                          Register scratch, Register shift) {
913   LoadRR(r0, src_high);
914   LoadRR(r1, src_low);
915   srda(r0, shift, Operand::Zero());
916   LoadRR(dst_high, r0);
917   LoadRR(dst_low, r1);
918 }
919 
ShiftRightArithPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)920 void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
921                                          Register src_low, Register src_high,
922                                          uint32_t shift) {
923   LoadRR(r0, src_high);
924   LoadRR(r1, src_low);
925   srda(r0, r0, Operand(shift));
926   LoadRR(dst_high, r0);
927   LoadRR(dst_low, r1);
928 }
929 #endif
930 
MovDoubleToInt64(Register dst,DoubleRegister src)931 void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
932   lgdr(dst, src);
933 }
934 
MovInt64ToDouble(DoubleRegister dst,Register src)935 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
936   ldgr(dst, src);
937 }
938 
StubPrologue(StackFrame::Type type,Register base,int prologue_offset)939 void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
940                                   int prologue_offset) {
941   {
942     ConstantPoolUnavailableScope constant_pool_unavailable(this);
943     LoadSmiLiteral(r1, Smi::FromInt(type));
944     PushCommonFrame(r1);
945   }
946 }
947 
Prologue(bool code_pre_aging,Register base,int prologue_offset)948 void MacroAssembler::Prologue(bool code_pre_aging, Register base,
949                               int prologue_offset) {
950   DCHECK(!base.is(no_reg));
951   {
952     PredictableCodeSizeScope predictible_code_size_scope(
953         this, kNoCodeAgeSequenceLength);
954     // The following instructions must remain together and unmodified
955     // for code aging to work properly.
956     if (code_pre_aging) {
957       // Pre-age the code.
958       // This matches the code found in PatchPlatformCodeAge()
959       Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
960       intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
961       nop();
962       CleanseP(r14);
963       Push(r14);
964       mov(r2, Operand(target));
965       Call(r2);
966       for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
967            i += 2) {
968         // TODO(joransiu): Create nop function to pad
969         //         (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
970         nop();  // 2-byte nops().
971       }
972     } else {
973       // This matches the code found in GetNoCodeAgeSequence()
974       PushStandardFrame(r3);
975     }
976   }
977 }
978 
EmitLoadTypeFeedbackVector(Register vector)979 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
980   LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
981   LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
982   LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
983 }
984 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)985 void MacroAssembler::EnterFrame(StackFrame::Type type,
986                                 bool load_constant_pool_pointer_reg) {
987   // We create a stack frame with:
988   //    Return Addr <-- old sp
989   //    Old FP      <-- new fp
990   //    CP
991   //    type
992   //    CodeObject  <-- new sp
993 
994   LoadSmiLiteral(ip, Smi::FromInt(type));
995   PushCommonFrame(ip);
996 
997   if (type == StackFrame::INTERNAL) {
998     mov(r0, Operand(CodeObject()));
999     push(r0);
1000   }
1001 }
1002 
LeaveFrame(StackFrame::Type type,int stack_adjustment)1003 int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1004   // Drop the execution stack down to the frame pointer and restore
1005   // the caller frame pointer, return address and constant pool pointer.
1006   LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1007   lay(r1, MemOperand(
1008               fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment));
1009   LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1010   LoadRR(sp, r1);
1011   int frame_ends = pc_offset();
1012   return frame_ends;
1013 }
1014 
1015 // ExitFrame layout (probably wrongish.. needs updating)
1016 //
1017 //  SP -> previousSP
1018 //        LK reserved
1019 //        code
1020 //        sp_on_exit (for debug?)
1021 // oldSP->prev SP
1022 //        LK
1023 //        <parameters on stack>
1024 
1025 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1026 // on the stack that we need to wrap a real frame around.. so first
1027 // we reserve a slot for LK and push the previous SP which is captured
1028 // in the fp register (r11)
1029 // Then - we buy a new frame
1030 
1031 // r14
1032 // oldFP <- newFP
1033 // SP
1034 // Code
1035 // Floats
1036 // gaps
1037 // Args
1038 // ABIRes <- newSP
EnterExitFrame(bool save_doubles,int stack_space)1039 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
1040   // Set up the frame structure on the stack.
1041   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1042   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1043   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1044   DCHECK(stack_space > 0);
1045 
1046   // This is an opportunity to build a frame to wrap
1047   // all of the pushes that have happened inside of V8
1048   // since we were called from C code
1049   CleanseP(r14);
1050   LoadSmiLiteral(r1, Smi::FromInt(StackFrame::EXIT));
1051   PushCommonFrame(r1);
1052   // Reserve room for saved entry sp and code object.
1053   lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
1054 
1055   if (emit_debug_code()) {
1056     StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
1057   }
1058   mov(r1, Operand(CodeObject()));
1059   StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1060 
1061   // Save the frame pointer and the context in top.
1062   mov(r1, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1063   StoreP(fp, MemOperand(r1));
1064   mov(r1, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1065   StoreP(cp, MemOperand(r1));
1066 
1067   // Optionally save all volatile double registers.
1068   if (save_doubles) {
1069     MultiPushDoubles(kCallerSavedDoubles);
1070     // Note that d0 will be accessible at
1071     //   fp - ExitFrameConstants::kFrameSize -
1072     //   kNumCallerSavedDoubles * kDoubleSize,
1073     // since the sp slot and code slot were pushed after the fp.
1074   }
1075 
1076   lay(sp, MemOperand(sp, -stack_space * kPointerSize));
1077 
1078   // Allocate and align the frame preparing for calling the runtime
1079   // function.
1080   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1081   if (frame_alignment > 0) {
1082     DCHECK(frame_alignment == 8);
1083     ClearRightImm(sp, sp, Operand(3));  // equivalent to &= -8
1084   }
1085 
1086   lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1087   StoreP(MemOperand(sp), Operand::Zero(), r0);
1088   // Set the exit frame sp value to point just before the return address
1089   // location.
1090   lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
1091   StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
1092 }
1093 
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)1094 void MacroAssembler::InitializeNewString(Register string, Register length,
1095                                          Heap::RootListIndex map_index,
1096                                          Register scratch1, Register scratch2) {
1097   SmiTag(scratch1, length);
1098   LoadRoot(scratch2, map_index);
1099   StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset));
1100   StoreP(FieldMemOperand(string, String::kHashFieldSlot),
1101          Operand(String::kEmptyHashField), scratch1);
1102   StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1103 }
1104 
ActivationFrameAlignment()1105 int MacroAssembler::ActivationFrameAlignment() {
1106 #if !defined(USE_SIMULATOR)
1107   // Running on the real platform. Use the alignment as mandated by the local
1108   // environment.
1109   // Note: This will break if we ever start generating snapshots on one S390
1110   // platform for another S390 platform with a different alignment.
1111   return base::OS::ActivationFrameAlignment();
1112 #else  // Simulated
1113   // If we are using the simulator then we should always align to the expected
1114   // alignment. As the simulator is used to generate snapshots we do not know
1115   // if the target platform will need alignment, so this is controlled from a
1116   // flag.
1117   return FLAG_sim_stack_alignment;
1118 #endif
1119 }
1120 
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool argument_count_is_length)1121 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1122                                     bool restore_context,
1123                                     bool argument_count_is_length) {
1124   // Optionally restore all double registers.
1125   if (save_doubles) {
1126     // Calculate the stack location of the saved doubles and restore them.
1127     const int kNumRegs = kNumCallerSavedDoubles;
1128     lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
1129                              kNumRegs * kDoubleSize)));
1130     MultiPopDoubles(kCallerSavedDoubles, r5);
1131   }
1132 
1133   // Clear top frame.
1134   mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1135   StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
1136 
1137   // Restore current context from top and clear it in debug mode.
1138   if (restore_context) {
1139     mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1140     LoadP(cp, MemOperand(ip));
1141   }
1142 #ifdef DEBUG
1143   mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1144   StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
1145 #endif
1146 
1147   // Tear down the exit frame, pop the arguments, and return.
1148   LeaveFrame(StackFrame::EXIT);
1149 
1150   if (argument_count.is_valid()) {
1151     if (!argument_count_is_length) {
1152       ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
1153     }
1154     la(sp, MemOperand(sp, argument_count));
1155   }
1156 }
1157 
MovFromFloatResult(const DoubleRegister dst)1158 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
1159   Move(dst, d0);
1160 }
1161 
MovFromFloatParameter(const DoubleRegister dst)1162 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1163   Move(dst, d0);
1164 }
1165 
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1166 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1167                                         Register caller_args_count_reg,
1168                                         Register scratch0, Register scratch1) {
1169 #if DEBUG
1170   if (callee_args_count.is_reg()) {
1171     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1172                        scratch1));
1173   } else {
1174     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1175   }
1176 #endif
1177 
1178   // Calculate the end of destination area where we will put the arguments
1179   // after we drop current frame. We AddP kPointerSize to count the receiver
1180   // argument which is not included into formal parameters count.
1181   Register dst_reg = scratch0;
1182   ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1183   AddP(dst_reg, fp, dst_reg);
1184   AddP(dst_reg, dst_reg,
1185        Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1186 
1187   Register src_reg = caller_args_count_reg;
1188   // Calculate the end of source area. +kPointerSize is for the receiver.
1189   if (callee_args_count.is_reg()) {
1190     ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1191     AddP(src_reg, sp, src_reg);
1192     AddP(src_reg, src_reg, Operand(kPointerSize));
1193   } else {
1194     mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
1195     AddP(src_reg, src_reg, sp);
1196   }
1197 
1198   if (FLAG_debug_code) {
1199     CmpLogicalP(src_reg, dst_reg);
1200     Check(lt, kStackAccessBelowStackPointer);
1201   }
1202 
1203   // Restore caller's frame pointer and return address now as they will be
1204   // overwritten by the copying loop.
1205   RestoreFrameStateForTailCall();
1206 
1207   // Now copy callee arguments to the caller frame going backwards to avoid
1208   // callee arguments corruption (source and destination areas could overlap).
1209 
1210   // Both src_reg and dst_reg are pointing to the word after the one to copy,
1211   // so they must be pre-decremented in the loop.
1212   Register tmp_reg = scratch1;
1213   Label loop;
1214   if (callee_args_count.is_reg()) {
1215     AddP(tmp_reg, callee_args_count.reg(), Operand(1));  // +1 for receiver
1216   } else {
1217     mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1218   }
1219   LoadRR(r1, tmp_reg);
1220   bind(&loop);
1221   LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
1222   StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1223   lay(src_reg, MemOperand(src_reg, -kPointerSize));
1224   lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
1225   BranchOnCount(r1, &loop);
1226 
1227   // Leave current frame.
1228   LoadRR(sp, dst_reg);
1229 }
1230 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)1231 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1232                                     const ParameterCount& actual, Label* done,
1233                                     bool* definitely_mismatches,
1234                                     InvokeFlag flag,
1235                                     const CallWrapper& call_wrapper) {
1236   bool definitely_matches = false;
1237   *definitely_mismatches = false;
1238   Label regular_invoke;
1239 
1240   // Check whether the expected and actual arguments count match. If not,
1241   // setup registers according to contract with ArgumentsAdaptorTrampoline:
1242   //  r2: actual arguments count
1243   //  r3: function (passed through to callee)
1244   //  r4: expected arguments count
1245 
1246   // The code below is made a lot easier because the calling code already sets
1247   // up actual and expected registers according to the contract if values are
1248   // passed in registers.
1249 
1250   // ARM has some sanity checks as per below, considering add them for S390
1251   //  DCHECK(actual.is_immediate() || actual.reg().is(r2));
1252   //  DCHECK(expected.is_immediate() || expected.reg().is(r4));
1253 
1254   if (expected.is_immediate()) {
1255     DCHECK(actual.is_immediate());
1256     mov(r2, Operand(actual.immediate()));
1257     if (expected.immediate() == actual.immediate()) {
1258       definitely_matches = true;
1259     } else {
1260       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1261       if (expected.immediate() == sentinel) {
1262         // Don't worry about adapting arguments for builtins that
1263         // don't want that done. Skip adaption code by making it look
1264         // like we have a match between expected and actual number of
1265         // arguments.
1266         definitely_matches = true;
1267       } else {
1268         *definitely_mismatches = true;
1269         mov(r4, Operand(expected.immediate()));
1270       }
1271     }
1272   } else {
1273     if (actual.is_immediate()) {
1274       mov(r2, Operand(actual.immediate()));
1275       CmpPH(expected.reg(), Operand(actual.immediate()));
1276       beq(&regular_invoke);
1277     } else {
1278       CmpP(expected.reg(), actual.reg());
1279       beq(&regular_invoke);
1280     }
1281   }
1282 
1283   if (!definitely_matches) {
1284     Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1285     if (flag == CALL_FUNCTION) {
1286       call_wrapper.BeforeCall(CallSize(adaptor));
1287       Call(adaptor);
1288       call_wrapper.AfterCall();
1289       if (!*definitely_mismatches) {
1290         b(done);
1291       }
1292     } else {
1293       Jump(adaptor, RelocInfo::CODE_TARGET);
1294     }
1295     bind(&regular_invoke);
1296   }
1297 }
1298 
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1299 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
1300                                              const ParameterCount& expected,
1301                                              const ParameterCount& actual) {
1302   Label skip_flooding;
1303   ExternalReference last_step_action =
1304       ExternalReference::debug_last_step_action_address(isolate());
1305   STATIC_ASSERT(StepFrame > StepIn);
1306   mov(r6, Operand(last_step_action));
1307   LoadB(r6, MemOperand(r6));
1308   CmpP(r6, Operand(StepIn));
1309   blt(&skip_flooding);
1310   {
1311     FrameScope frame(this,
1312                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1313     if (expected.is_reg()) {
1314       SmiTag(expected.reg());
1315       Push(expected.reg());
1316     }
1317     if (actual.is_reg()) {
1318       SmiTag(actual.reg());
1319       Push(actual.reg());
1320     }
1321     if (new_target.is_valid()) {
1322       Push(new_target);
1323     }
1324     Push(fun, fun);
1325     CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
1326     Pop(fun);
1327     if (new_target.is_valid()) {
1328       Pop(new_target);
1329     }
1330     if (actual.is_reg()) {
1331       Pop(actual.reg());
1332       SmiUntag(actual.reg());
1333     }
1334     if (expected.is_reg()) {
1335       Pop(expected.reg());
1336       SmiUntag(expected.reg());
1337     }
1338   }
1339   bind(&skip_flooding);
1340 }
1341 
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1342 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1343                                         const ParameterCount& expected,
1344                                         const ParameterCount& actual,
1345                                         InvokeFlag flag,
1346                                         const CallWrapper& call_wrapper) {
1347   // You can't call a function without a valid frame.
1348   DCHECK(flag == JUMP_FUNCTION || has_frame());
1349 
1350   DCHECK(function.is(r3));
1351   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5));
1352 
1353   if (call_wrapper.NeedsDebugStepCheck()) {
1354     FloodFunctionIfStepping(function, new_target, expected, actual);
1355   }
1356 
1357   // Clear the new.target register if not given.
1358   if (!new_target.is_valid()) {
1359     LoadRoot(r5, Heap::kUndefinedValueRootIndex);
1360   }
1361 
1362   Label done;
1363   bool definitely_mismatches = false;
1364   InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1365                  call_wrapper);
1366   if (!definitely_mismatches) {
1367     // We call indirectly through the code field in the function to
1368     // allow recompilation to take effect without changing any of the
1369     // call sites.
1370     Register code = ip;
1371     LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1372     if (flag == CALL_FUNCTION) {
1373       call_wrapper.BeforeCall(CallSize(code));
1374       CallJSEntry(code);
1375       call_wrapper.AfterCall();
1376     } else {
1377       DCHECK(flag == JUMP_FUNCTION);
1378       JumpToJSEntry(code);
1379     }
1380 
1381     // Continue here if InvokePrologue does handle the invocation due to
1382     // mismatched parameter counts.
1383     bind(&done);
1384   }
1385 }
1386 
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1387 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1388                                     const ParameterCount& actual,
1389                                     InvokeFlag flag,
1390                                     const CallWrapper& call_wrapper) {
1391   // You can't call a function without a valid frame.
1392   DCHECK(flag == JUMP_FUNCTION || has_frame());
1393 
1394   // Contract with called JS functions requires that function is passed in r3.
1395   DCHECK(fun.is(r3));
1396 
1397   Register expected_reg = r4;
1398   Register temp_reg = r6;
1399   LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
1400   LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1401   LoadW(expected_reg,
1402         FieldMemOperand(temp_reg,
1403                         SharedFunctionInfo::kFormalParameterCountOffset));
1404 #if !defined(V8_TARGET_ARCH_S390X)
1405   SmiUntag(expected_reg);
1406 #endif
1407 
1408   ParameterCount expected(expected_reg);
1409   InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1410 }
1411 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1412 void MacroAssembler::InvokeFunction(Register function,
1413                                     const ParameterCount& expected,
1414                                     const ParameterCount& actual,
1415                                     InvokeFlag flag,
1416                                     const CallWrapper& call_wrapper) {
1417   // You can't call a function without a valid frame.
1418   DCHECK(flag == JUMP_FUNCTION || has_frame());
1419 
1420   // Contract with called JS functions requires that function is passed in r3.
1421   DCHECK(function.is(r3));
1422 
1423   // Get the function and setup the context.
1424   LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1425 
1426   InvokeFunctionCode(r3, no_reg, expected, actual, flag, call_wrapper);
1427 }
1428 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1429 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1430                                     const ParameterCount& expected,
1431                                     const ParameterCount& actual,
1432                                     InvokeFlag flag,
1433                                     const CallWrapper& call_wrapper) {
1434   Move(r3, function);
1435   InvokeFunction(r3, expected, actual, flag, call_wrapper);
1436 }
1437 
IsObjectJSStringType(Register object,Register scratch,Label * fail)1438 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1439                                           Label* fail) {
1440   DCHECK(kNotStringTag != 0);
1441 
1442   LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1443   LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1444   mov(r0, Operand(kIsNotStringMask));
1445   AndP(r0, scratch);
1446   bne(fail);
1447 }
1448 
IsObjectNameType(Register object,Register scratch,Label * fail)1449 void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1450                                       Label* fail) {
1451   LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1452   LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1453   CmpP(scratch, Operand(LAST_NAME_TYPE));
1454   bgt(fail);
1455 }
1456 
DebugBreak()1457 void MacroAssembler::DebugBreak() {
1458   LoadImmP(r2, Operand::Zero());
1459   mov(r3,
1460       Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
1461   CEntryStub ces(isolate(), 1);
1462   DCHECK(AllowThisStubCall(&ces));
1463   Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
1464 }
1465 
PushStackHandler()1466 void MacroAssembler::PushStackHandler() {
1467   // Adjust this code if not the case.
1468   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1469   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1470 
1471   // Link the current handler as the next handler.
1472   mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1473 
1474   // Buy the full stack frame for 5 slots.
1475   lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
1476 
1477   // Copy the old handler into the next handler slot.
1478   mvc(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
1479       kPointerSize);
1480   // Set this new handler as the current one.
1481   StoreP(sp, MemOperand(r7));
1482 }
1483 
PopStackHandler()1484 void MacroAssembler::PopStackHandler() {
1485   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1486   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1487 
1488   // Pop the Next Handler into r3 and store it into Handler Address reference.
1489   Pop(r3);
1490   mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1491 
1492   StoreP(r3, MemOperand(ip));
1493 }
1494 
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)1495 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1496                                             Register scratch, Label* miss) {
1497   Label same_contexts;
1498 
1499   DCHECK(!holder_reg.is(scratch));
1500   DCHECK(!holder_reg.is(ip));
1501   DCHECK(!scratch.is(ip));
1502 
1503   // Load current lexical context from the active StandardFrame, which
1504   // may require crawling past STUB frames.
1505   Label load_context;
1506   Label has_context;
1507   DCHECK(!ip.is(scratch));
1508   LoadRR(ip, fp);
1509   bind(&load_context);
1510   LoadP(scratch,
1511         MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
1512   JumpIfNotSmi(scratch, &has_context);
1513   LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
1514   b(&load_context);
1515   bind(&has_context);
1516 
1517 // In debug mode, make sure the lexical context is set.
1518 #ifdef DEBUG
1519   CmpP(scratch, Operand::Zero());
1520   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1521 #endif
1522 
1523   // Load the native context of the current context.
1524   LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
1525 
1526   // Check the context is a native context.
1527   if (emit_debug_code()) {
1528     // Cannot use ip as a temporary in this verification code. Due to the fact
1529     // that ip is clobbered as part of cmp with an object Operand.
1530     push(holder_reg);  // Temporarily save holder on the stack.
1531     // Read the first word and compare to the native_context_map.
1532     LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1533     CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
1534     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1535     pop(holder_reg);  // Restore holder.
1536   }
1537 
1538   // Check if both contexts are the same.
1539   LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1540   CmpP(scratch, ip);
1541   beq(&same_contexts, Label::kNear);
1542 
1543   // Check the context is a native context.
1544   if (emit_debug_code()) {
1545     // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1546     // Cannot use ip as a temporary in this verification code. Due to the fact
1547     // that ip is clobbered as part of cmp with an object Operand.
1548     push(holder_reg);        // Temporarily save holder on the stack.
1549     LoadRR(holder_reg, ip);  // Move ip to its holding place.
1550     CompareRoot(holder_reg, Heap::kNullValueRootIndex);
1551     Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1552 
1553     LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1554     CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
1555     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1556     // Restore ip is not needed. ip is reloaded below.
1557     pop(holder_reg);  // Restore holder.
1558     // Restore ip to holder's context.
1559     LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1560   }
1561 
1562   // Check that the security token in the calling global object is
1563   // compatible with the security token in the receiving global
1564   // object.
1565   int token_offset =
1566       Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
1567 
1568   LoadP(scratch, FieldMemOperand(scratch, token_offset));
1569   LoadP(ip, FieldMemOperand(ip, token_offset));
1570   CmpP(scratch, ip);
1571   bne(miss);
1572 
1573   bind(&same_contexts);
1574 }
1575 
1576 // Compute the hash code from the untagged key.  This must be kept in sync with
1577 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1578 // code-stub-hydrogen.cc
GetNumberHash(Register t0,Register scratch)1579 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1580   // First of all we assign the hash seed to scratch.
1581   LoadRoot(scratch, Heap::kHashSeedRootIndex);
1582   SmiUntag(scratch);
1583 
1584   // Xor original key with a seed.
1585   XorP(t0, scratch);
1586 
1587   // Compute the hash code from the untagged key.  This must be kept in sync
1588   // with ComputeIntegerHash in utils.h.
1589   //
1590   // hash = ~hash + (hash << 15);
1591   LoadRR(scratch, t0);
1592   NotP(scratch);
1593   sll(t0, Operand(15));
1594   AddP(t0, scratch, t0);
1595   // hash = hash ^ (hash >> 12);
1596   ShiftRight(scratch, t0, Operand(12));
1597   XorP(t0, scratch);
1598   // hash = hash + (hash << 2);
1599   ShiftLeft(scratch, t0, Operand(2));
1600   AddP(t0, t0, scratch);
1601   // hash = hash ^ (hash >> 4);
1602   ShiftRight(scratch, t0, Operand(4));
1603   XorP(t0, scratch);
1604   // hash = hash * 2057;
1605   LoadRR(r0, t0);
1606   ShiftLeft(scratch, t0, Operand(3));
1607   AddP(t0, t0, scratch);
1608   ShiftLeft(scratch, r0, Operand(11));
1609   AddP(t0, t0, scratch);
1610   // hash = hash ^ (hash >> 16);
1611   ShiftRight(scratch, t0, Operand(16));
1612   XorP(t0, scratch);
1613   // hash & 0x3fffffff
1614   ExtractBitRange(t0, t0, 29, 0);
1615 }
1616 
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register t0,Register t1,Register t2)1617 void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
1618                                               Register key, Register result,
1619                                               Register t0, Register t1,
1620                                               Register t2) {
1621   // Register use:
1622   //
1623   // elements - holds the slow-case elements of the receiver on entry.
1624   //            Unchanged unless 'result' is the same register.
1625   //
1626   // key      - holds the smi key on entry.
1627   //            Unchanged unless 'result' is the same register.
1628   //
1629   // result   - holds the result on exit if the load succeeded.
1630   //            Allowed to be the same as 'key' or 'result'.
1631   //            Unchanged on bailout so 'key' or 'result' can be used
1632   //            in further computation.
1633   //
1634   // Scratch registers:
1635   //
1636   // t0 - holds the untagged key on entry and holds the hash once computed.
1637   //
1638   // t1 - used to hold the capacity mask of the dictionary
1639   //
1640   // t2 - used for the index into the dictionary.
1641   Label done;
1642 
1643   GetNumberHash(t0, t1);
1644 
1645   // Compute the capacity mask.
1646   LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1647   SmiUntag(t1);
1648   SubP(t1, Operand(1));
1649 
1650   // Generate an unrolled loop that performs a few probes before giving up.
1651   for (int i = 0; i < kNumberDictionaryProbes; i++) {
1652     // Use t2 for index calculations and keep the hash intact in t0.
1653     LoadRR(t2, t0);
1654     // Compute the masked index: (hash + i + i * i) & mask.
1655     if (i > 0) {
1656       AddP(t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1657     }
1658     AndP(t2, t1);
1659 
1660     // Scale the index by multiplying by the element size.
1661     DCHECK(SeededNumberDictionary::kEntrySize == 3);
1662     LoadRR(ip, t2);
1663     sll(ip, Operand(1));
1664     AddP(t2, ip);  // t2 = t2 * 3
1665 
1666     // Check if the key is identical to the name.
1667     sll(t2, Operand(kPointerSizeLog2));
1668     AddP(t2, elements);
1669     LoadP(ip,
1670           FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1671     CmpP(key, ip);
1672     if (i != kNumberDictionaryProbes - 1) {
1673       beq(&done, Label::kNear);
1674     } else {
1675       bne(miss);
1676     }
1677   }
1678 
1679   bind(&done);
1680   // Check that the value is a field property.
1681   // t2: elements + (index * kPointerSize)
1682   const int kDetailsOffset =
1683       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1684   LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
1685   LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
1686   DCHECK_EQ(DATA, 0);
1687   AndP(r0, ip, t1);
1688   bne(miss);
1689 
1690   // Get the value at the masked, scaled index and return.
1691   const int kValueOffset =
1692       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1693   LoadP(result, FieldMemOperand(t2, kValueOffset));
1694 }
1695 
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1696 void MacroAssembler::Allocate(int object_size, Register result,
1697                               Register scratch1, Register scratch2,
1698                               Label* gc_required, AllocationFlags flags) {
1699   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1700   DCHECK((flags & ALLOCATION_FOLDED) == 0);
1701   if (!FLAG_inline_new) {
1702     if (emit_debug_code()) {
1703       // Trash the registers to simulate an allocation failure.
1704       LoadImmP(result, Operand(0x7091));
1705       LoadImmP(scratch1, Operand(0x7191));
1706       LoadImmP(scratch2, Operand(0x7291));
1707     }
1708     b(gc_required);
1709     return;
1710   }
1711 
1712   DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1713 
1714   // Make object size into bytes.
1715   if ((flags & SIZE_IN_WORDS) != 0) {
1716     object_size *= kPointerSize;
1717   }
1718   DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1719 
1720   // Check relative positions of allocation top and limit addresses.
1721   ExternalReference allocation_top =
1722       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1723   ExternalReference allocation_limit =
1724       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1725 
1726   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1727   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1728   DCHECK((limit - top) == kPointerSize);
1729 
1730   // Set up allocation top address register.
1731   Register top_address = scratch1;
1732   // This code stores a temporary value in ip. This is OK, as the code below
1733   // does not need ip for implicit literal generation.
1734   Register alloc_limit = ip;
1735   Register result_end = scratch2;
1736   mov(top_address, Operand(allocation_top));
1737 
1738   if ((flags & RESULT_CONTAINS_TOP) == 0) {
1739     // Load allocation top into result and allocation limit into ip.
1740     LoadP(result, MemOperand(top_address));
1741     LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1742   } else {
1743     if (emit_debug_code()) {
1744       // Assert that result actually contains top on entry.
1745       LoadP(alloc_limit, MemOperand(top_address));
1746       CmpP(result, alloc_limit);
1747       Check(eq, kUnexpectedAllocationTop);
1748     }
1749     // Load allocation limit. Result already contains allocation top.
1750     LoadP(alloc_limit, MemOperand(top_address, limit - top));
1751   }
1752 
1753   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1754 // Align the next allocation. Storing the filler map without checking top is
1755 // safe in new-space because the limit of the heap is aligned there.
1756 #if V8_TARGET_ARCH_S390X
1757     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1758 #else
1759     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1760     AndP(result_end, result, Operand(kDoubleAlignmentMask));
1761     Label aligned;
1762     beq(&aligned, Label::kNear);
1763     if ((flags & PRETENURE) != 0) {
1764       CmpLogicalP(result, alloc_limit);
1765       bge(gc_required);
1766     }
1767     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1768     StoreW(result_end, MemOperand(result));
1769     AddP(result, result, Operand(kDoubleSize / 2));
1770     bind(&aligned);
1771 #endif
1772   }
1773 
1774   // Calculate new top and bail out if new space is exhausted. Use result
1775   // to calculate the new top.
1776   SubP(r0, alloc_limit, result);
1777   if (is_int16(object_size)) {
1778     CmpP(r0, Operand(object_size));
1779     blt(gc_required);
1780     AddP(result_end, result, Operand(object_size));
1781   } else {
1782     mov(result_end, Operand(object_size));
1783     CmpP(r0, result_end);
1784     blt(gc_required);
1785     AddP(result_end, result, result_end);
1786   }
1787 
1788   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1789     // The top pointer is not updated for allocation folding dominators.
1790     StoreP(result_end, MemOperand(top_address));
1791   }
1792 
1793   // Tag object.
1794   AddP(result, result, Operand(kHeapObjectTag));
1795 }
1796 
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)1797 void MacroAssembler::Allocate(Register object_size, Register result,
1798                               Register result_end, Register scratch,
1799                               Label* gc_required, AllocationFlags flags) {
1800   DCHECK((flags & ALLOCATION_FOLDED) == 0);
1801   if (!FLAG_inline_new) {
1802     if (emit_debug_code()) {
1803       // Trash the registers to simulate an allocation failure.
1804       LoadImmP(result, Operand(0x7091));
1805       LoadImmP(scratch, Operand(0x7191));
1806       LoadImmP(result_end, Operand(0x7291));
1807     }
1808     b(gc_required);
1809     return;
1810   }
1811 
1812   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1813   // is not specified. Other registers must not overlap.
1814   DCHECK(!AreAliased(object_size, result, scratch, ip));
1815   DCHECK(!AreAliased(result_end, result, scratch, ip));
1816   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1817 
1818   // Check relative positions of allocation top and limit addresses.
1819   ExternalReference allocation_top =
1820       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1821   ExternalReference allocation_limit =
1822       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1823   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1824   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1825   DCHECK((limit - top) == kPointerSize);
1826 
1827   // Set up allocation top address and allocation limit registers.
1828   Register top_address = scratch;
1829   // This code stores a temporary value in ip. This is OK, as the code below
1830   // does not need ip for implicit literal generation.
1831   Register alloc_limit = ip;
1832   mov(top_address, Operand(allocation_top));
1833 
1834   if ((flags & RESULT_CONTAINS_TOP) == 0) {
1835     // Load allocation top into result and allocation limit into alloc_limit..
1836     LoadP(result, MemOperand(top_address));
1837     LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1838   } else {
1839     if (emit_debug_code()) {
1840       // Assert that result actually contains top on entry.
1841       LoadP(alloc_limit, MemOperand(top_address));
1842       CmpP(result, alloc_limit);
1843       Check(eq, kUnexpectedAllocationTop);
1844     }
1845     // Load allocation limit. Result already contains allocation top.
1846     LoadP(alloc_limit, MemOperand(top_address, limit - top));
1847   }
1848 
1849   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1850 // Align the next allocation. Storing the filler map without checking top is
1851 // safe in new-space because the limit of the heap is aligned there.
1852 #if V8_TARGET_ARCH_S390X
1853     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1854 #else
1855     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1856     AndP(result_end, result, Operand(kDoubleAlignmentMask));
1857     Label aligned;
1858     beq(&aligned, Label::kNear);
1859     if ((flags & PRETENURE) != 0) {
1860       CmpLogicalP(result, alloc_limit);
1861       bge(gc_required);
1862     }
1863     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1864     StoreW(result_end, MemOperand(result));
1865     AddP(result, result, Operand(kDoubleSize / 2));
1866     bind(&aligned);
1867 #endif
1868   }
1869 
1870   // Calculate new top and bail out if new space is exhausted. Use result
1871   // to calculate the new top. Object size may be in words so a shift is
1872   // required to get the number of bytes.
1873   SubP(r0, alloc_limit, result);
1874   if ((flags & SIZE_IN_WORDS) != 0) {
1875     ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
1876     CmpP(r0, result_end);
1877     blt(gc_required);
1878     AddP(result_end, result, result_end);
1879   } else {
1880     CmpP(r0, object_size);
1881     blt(gc_required);
1882     AddP(result_end, result, object_size);
1883   }
1884 
1885   // Update allocation top. result temporarily holds the new top.
1886   if (emit_debug_code()) {
1887     AndP(r0, result_end, Operand(kObjectAlignmentMask));
1888     Check(eq, kUnalignedAllocationInNewSpace, cr0);
1889   }
1890   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1891     // The top pointer is not updated for allocation folding dominators.
1892     StoreP(result_end, MemOperand(top_address));
1893   }
1894 
1895   // Tag object.
1896   AddP(result, result, Operand(kHeapObjectTag));
1897 }
1898 
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)1899 void MacroAssembler::FastAllocate(Register object_size, Register result,
1900                                   Register result_end, Register scratch,
1901                                   AllocationFlags flags) {
1902   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1903   // is not specified. Other registers must not overlap.
1904   DCHECK(!AreAliased(object_size, result, scratch, ip));
1905   DCHECK(!AreAliased(result_end, result, scratch, ip));
1906   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1907 
1908   ExternalReference allocation_top =
1909       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1910 
1911   Register top_address = scratch;
1912   mov(top_address, Operand(allocation_top));
1913   LoadP(result, MemOperand(top_address));
1914 
1915   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1916 // Align the next allocation. Storing the filler map without checking top is
1917 // safe in new-space because the limit of the heap is aligned there.
1918 #if V8_TARGET_ARCH_S390X
1919     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1920 #else
1921     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1922     AndP(result_end, result, Operand(kDoubleAlignmentMask));
1923     Label aligned;
1924     beq(&aligned, Label::kNear);
1925     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1926     StoreW(result_end, MemOperand(result));
1927     AddP(result, result, Operand(kDoubleSize / 2));
1928     bind(&aligned);
1929 #endif
1930   }
1931 
1932   // Calculate new top using result. Object size may be in words so a shift is
1933   // required to get the number of bytes.
1934   if ((flags & SIZE_IN_WORDS) != 0) {
1935     ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
1936     AddP(result_end, result, result_end);
1937   } else {
1938     AddP(result_end, result, object_size);
1939   }
1940 
1941   // Update allocation top. result temporarily holds the new top.
1942   if (emit_debug_code()) {
1943     AndP(r0, result_end, Operand(kObjectAlignmentMask));
1944     Check(eq, kUnalignedAllocationInNewSpace, cr0);
1945   }
1946   StoreP(result_end, MemOperand(top_address));
1947 
1948   // Tag object.
1949   AddP(result, result, Operand(kHeapObjectTag));
1950 }
1951 
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)1952 void MacroAssembler::FastAllocate(int object_size, Register result,
1953                                   Register scratch1, Register scratch2,
1954                                   AllocationFlags flags) {
1955   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1956   DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1957 
1958   // Make object size into bytes.
1959   if ((flags & SIZE_IN_WORDS) != 0) {
1960     object_size *= kPointerSize;
1961   }
1962   DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1963 
1964   ExternalReference allocation_top =
1965       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1966 
1967   // Set up allocation top address register.
1968   Register top_address = scratch1;
1969   Register result_end = scratch2;
1970   mov(top_address, Operand(allocation_top));
1971   LoadP(result, MemOperand(top_address));
1972 
1973   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1974 // Align the next allocation. Storing the filler map without checking top is
1975 // safe in new-space because the limit of the heap is aligned there.
1976 #if V8_TARGET_ARCH_S390X
1977     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1978 #else
1979     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1980     AndP(result_end, result, Operand(kDoubleAlignmentMask));
1981     Label aligned;
1982     beq(&aligned, Label::kNear);
1983     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1984     StoreW(result_end, MemOperand(result));
1985     AddP(result, result, Operand(kDoubleSize / 2));
1986     bind(&aligned);
1987 #endif
1988   }
1989 
1990   // Calculate new top using result.
1991   AddP(result_end, result, Operand(object_size));
1992 
1993   // The top pointer is not updated for allocation folding dominators.
1994   StoreP(result_end, MemOperand(top_address));
1995 
1996   // Tag object.
1997   AddP(result, result, Operand(kHeapObjectTag));
1998 }
1999 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)2000 void MacroAssembler::AllocateTwoByteString(Register result, Register length,
2001                                            Register scratch1, Register scratch2,
2002                                            Register scratch3,
2003                                            Label* gc_required) {
2004   // Calculate the number of bytes needed for the characters in the string while
2005   // observing object alignment.
2006   DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2007 
2008   ShiftLeftP(scratch1, length, Operand(1));  // Length in bytes, not chars.
2009   AddP(scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
2010 
2011   AndP(scratch1, Operand(~kObjectAlignmentMask));
2012 
2013   // Allocate two-byte string in new space.
2014   Allocate(scratch1, result, scratch2, scratch3, gc_required,
2015            NO_ALLOCATION_FLAGS);
2016 
2017   // Set the map, length and hash field.
2018   InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
2019                       scratch2);
2020 }
2021 
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)2022 void MacroAssembler::AllocateOneByteString(Register result, Register length,
2023                                            Register scratch1, Register scratch2,
2024                                            Register scratch3,
2025                                            Label* gc_required) {
2026   // Calculate the number of bytes needed for the characters in the string while
2027   // observing object alignment.
2028   DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2029   DCHECK(kCharSize == 1);
2030   AddP(scratch1, length,
2031        Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
2032   AndP(scratch1, Operand(~kObjectAlignmentMask));
2033 
2034   // Allocate one-byte string in new space.
2035   Allocate(scratch1, result, scratch2, scratch3, gc_required,
2036            NO_ALLOCATION_FLAGS);
2037 
2038   // Set the map, length and hash field.
2039   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
2040                       scratch1, scratch2);
2041 }
2042 
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2043 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
2044                                                Register scratch1,
2045                                                Register scratch2,
2046                                                Label* gc_required) {
2047   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
2048            NO_ALLOCATION_FLAGS);
2049 
2050   InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
2051                       scratch2);
2052 }
2053 
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2054 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
2055                                                Register scratch1,
2056                                                Register scratch2,
2057                                                Label* gc_required) {
2058   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
2059            NO_ALLOCATION_FLAGS);
2060 
2061   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
2062                       scratch1, scratch2);
2063 }
2064 
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2065 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2066                                                  Register length,
2067                                                  Register scratch1,
2068                                                  Register scratch2,
2069                                                  Label* gc_required) {
2070   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2071            NO_ALLOCATION_FLAGS);
2072 
2073   InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
2074                       scratch2);
2075 }
2076 
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2077 void MacroAssembler::AllocateOneByteSlicedString(Register result,
2078                                                  Register length,
2079                                                  Register scratch1,
2080                                                  Register scratch2,
2081                                                  Label* gc_required) {
2082   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2083            NO_ALLOCATION_FLAGS);
2084 
2085   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
2086                       scratch1, scratch2);
2087 }
2088 
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)2089 void MacroAssembler::CompareObjectType(Register object, Register map,
2090                                        Register type_reg, InstanceType type) {
2091   const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
2092 
2093   LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
2094   CompareInstanceType(map, temp, type);
2095 }
2096 
CompareInstanceType(Register map,Register type_reg,InstanceType type)2097 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
2098                                          InstanceType type) {
2099   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2100   STATIC_ASSERT(LAST_TYPE < 256);
2101   LoadlB(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2102   CmpP(type_reg, Operand(type));
2103 }
2104 
CompareRoot(Register obj,Heap::RootListIndex index)2105 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
2106   CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
2107 }
2108 
CheckFastElements(Register map,Register scratch,Label * fail)2109 void MacroAssembler::CheckFastElements(Register map, Register scratch,
2110                                        Label* fail) {
2111   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2112   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2113   STATIC_ASSERT(FAST_ELEMENTS == 2);
2114   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2115   STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
2116   CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
2117                  Operand(Map::kMaximumBitField2FastHoleyElementValue));
2118   bgt(fail);
2119 }
2120 
CheckFastObjectElements(Register map,Register scratch,Label * fail)2121 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
2122                                              Label* fail) {
2123   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2124   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2125   STATIC_ASSERT(FAST_ELEMENTS == 2);
2126   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2127   CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
2128                  Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2129   ble(fail);
2130   CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
2131                  Operand(Map::kMaximumBitField2FastHoleyElementValue));
2132   bgt(fail);
2133 }
2134 
CheckFastSmiElements(Register map,Register scratch,Label * fail)2135 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
2136                                           Label* fail) {
2137   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2138   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2139   CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
2140                  Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2141   bgt(fail);
2142 }
2143 
SmiToDouble(DoubleRegister value,Register smi)2144 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
2145   SmiUntag(ip, smi);
2146   ConvertIntToDouble(ip, value);
2147 }
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,DoubleRegister double_scratch,Label * fail,int elements_offset)2148 void MacroAssembler::StoreNumberToDoubleElements(
2149     Register value_reg, Register key_reg, Register elements_reg,
2150     Register scratch1, DoubleRegister double_scratch, Label* fail,
2151     int elements_offset) {
2152   DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
2153   Label smi_value, store;
2154 
2155   // Handle smi values specially.
2156   JumpIfSmi(value_reg, &smi_value);
2157 
2158   // Ensure that the object is a heap number
2159   CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
2160            DONT_DO_SMI_CHECK);
2161 
2162   LoadDouble(double_scratch,
2163              FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2164   // Force a canonical NaN.
2165   CanonicalizeNaN(double_scratch);
2166   b(&store);
2167 
2168   bind(&smi_value);
2169   SmiToDouble(double_scratch, value_reg);
2170 
2171   bind(&store);
2172   SmiToDoubleArrayOffset(scratch1, key_reg);
2173   StoreDouble(double_scratch,
2174               FieldMemOperand(elements_reg, scratch1,
2175                               FixedDoubleArray::kHeaderSize - elements_offset));
2176 }
2177 
AddAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)2178 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
2179                                             Register right,
2180                                             Register overflow_dst,
2181                                             Register scratch) {
2182   DCHECK(!dst.is(overflow_dst));
2183   DCHECK(!dst.is(scratch));
2184   DCHECK(!overflow_dst.is(scratch));
2185   DCHECK(!overflow_dst.is(left));
2186   DCHECK(!overflow_dst.is(right));
2187 
2188   // TODO(joransiu): Optimize paths for left == right.
2189   bool left_is_right = left.is(right);
2190 
2191   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
2192   if (dst.is(left)) {
2193     LoadRR(scratch, left);             // Preserve left.
2194     AddP(dst, left, right);            // Left is overwritten.
2195     XorP(overflow_dst, scratch, dst);  // Original left.
2196     if (!left_is_right) XorP(scratch, dst, right);
2197   } else if (dst.is(right)) {
2198     LoadRR(scratch, right);  // Preserve right.
2199     AddP(dst, left, right);  // Right is overwritten.
2200     XorP(overflow_dst, dst, left);
2201     if (!left_is_right) XorP(scratch, dst, scratch);
2202   } else {
2203     AddP(dst, left, right);
2204     XorP(overflow_dst, dst, left);
2205     if (!left_is_right) XorP(scratch, dst, right);
2206   }
2207   if (!left_is_right) AndP(overflow_dst, scratch, overflow_dst);
2208   LoadAndTestRR(overflow_dst, overflow_dst);
2209 }
2210 
AddAndCheckForOverflow(Register dst,Register left,intptr_t right,Register overflow_dst,Register scratch)2211 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
2212                                             intptr_t right,
2213                                             Register overflow_dst,
2214                                             Register scratch) {
2215   DCHECK(!dst.is(overflow_dst));
2216   DCHECK(!dst.is(scratch));
2217   DCHECK(!overflow_dst.is(scratch));
2218   DCHECK(!overflow_dst.is(left));
2219 
2220   mov(r1, Operand(right));
2221   AddAndCheckForOverflow(dst, left, r1, overflow_dst, scratch);
2222 }
2223 
SubAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)2224 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
2225                                             Register right,
2226                                             Register overflow_dst,
2227                                             Register scratch) {
2228   DCHECK(!dst.is(overflow_dst));
2229   DCHECK(!dst.is(scratch));
2230   DCHECK(!overflow_dst.is(scratch));
2231   DCHECK(!overflow_dst.is(left));
2232   DCHECK(!overflow_dst.is(right));
2233 
2234   // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
2235   if (dst.is(left)) {
2236     LoadRR(scratch, left);   // Preserve left.
2237     SubP(dst, left, right);  // Left is overwritten.
2238     XorP(overflow_dst, dst, scratch);
2239     XorP(scratch, right);
2240     AndP(overflow_dst, scratch /*, SetRC*/);
2241     LoadAndTestRR(overflow_dst, overflow_dst);
2242     // Should be okay to remove rc
2243   } else if (dst.is(right)) {
2244     LoadRR(scratch, right);  // Preserve right.
2245     SubP(dst, left, right);  // Right is overwritten.
2246     XorP(overflow_dst, dst, left);
2247     XorP(scratch, left);
2248     AndP(overflow_dst, scratch /*, SetRC*/);
2249     LoadAndTestRR(overflow_dst, overflow_dst);
2250     // Should be okay to remove rc
2251   } else {
2252     SubP(dst, left, right);
2253     XorP(overflow_dst, dst, left);
2254     XorP(scratch, left, right);
2255     AndP(overflow_dst, scratch /*, SetRC*/);
2256     LoadAndTestRR(overflow_dst, overflow_dst);
2257     // Should be okay to remove rc
2258   }
2259 }
2260 
CompareMap(Register obj,Register scratch,Handle<Map> map,Label * early_success)2261 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
2262                                 Label* early_success) {
2263   LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2264   CompareMap(obj, map, early_success);
2265 }
2266 
CompareMap(Register obj_map,Handle<Map> map,Label * early_success)2267 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
2268                                 Label* early_success) {
2269   mov(r0, Operand(map));
2270   CmpP(r0, FieldMemOperand(obj_map, HeapObject::kMapOffset));
2271 }
2272 
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)2273 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
2274                               Label* fail, SmiCheckType smi_check_type) {
2275   if (smi_check_type == DO_SMI_CHECK) {
2276     JumpIfSmi(obj, fail);
2277   }
2278 
2279   Label success;
2280   CompareMap(obj, scratch, map, &success);
2281   bne(fail);
2282   bind(&success);
2283 }
2284 
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)2285 void MacroAssembler::CheckMap(Register obj, Register scratch,
2286                               Heap::RootListIndex index, Label* fail,
2287                               SmiCheckType smi_check_type) {
2288   if (smi_check_type == DO_SMI_CHECK) {
2289     JumpIfSmi(obj, fail);
2290   }
2291   LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2292   CompareRoot(scratch, index);
2293   bne(fail);
2294 }
2295 
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)2296 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2297                                      Register scratch2, Handle<WeakCell> cell,
2298                                      Handle<Code> success,
2299                                      SmiCheckType smi_check_type) {
2300   Label fail;
2301   if (smi_check_type == DO_SMI_CHECK) {
2302     JumpIfSmi(obj, &fail);
2303   }
2304   LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2305   CmpWeakValue(scratch1, cell, scratch2);
2306   Jump(success, RelocInfo::CODE_TARGET, eq);
2307   bind(&fail);
2308 }
2309 
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch,CRegister)2310 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2311                                   Register scratch, CRegister) {
2312   mov(scratch, Operand(cell));
2313   CmpP(value, FieldMemOperand(scratch, WeakCell::kValueOffset));
2314 }
2315 
GetWeakValue(Register value,Handle<WeakCell> cell)2316 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2317   mov(value, Operand(cell));
2318   LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
2319 }
2320 
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)2321 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2322                                    Label* miss) {
2323   GetWeakValue(value, cell);
2324   JumpIfSmi(value, miss);
2325 }
2326 
GetMapConstructor(Register result,Register map,Register temp,Register temp2)2327 void MacroAssembler::GetMapConstructor(Register result, Register map,
2328                                        Register temp, Register temp2) {
2329   Label done, loop;
2330   LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2331   bind(&loop);
2332   JumpIfSmi(result, &done);
2333   CompareObjectType(result, temp, temp2, MAP_TYPE);
2334   bne(&done);
2335   LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2336   b(&loop);
2337   bind(&done);
2338 }
2339 
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)2340 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2341                                              Register scratch, Label* miss) {
2342   // Get the prototype or initial map from the function.
2343   LoadP(result,
2344         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2345 
2346   // If the prototype or initial map is the hole, don't return it and
2347   // simply miss the cache instead. This will allow us to allocate a
2348   // prototype object on-demand in the runtime system.
2349   CompareRoot(result, Heap::kTheHoleValueRootIndex);
2350   beq(miss);
2351 
2352   // If the function does not have an initial map, we're done.
2353   Label done;
2354   CompareObjectType(result, scratch, scratch, MAP_TYPE);
2355   bne(&done, Label::kNear);
2356 
2357   // Get the prototype from the initial map.
2358   LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2359 
2360   // All done.
2361   bind(&done);
2362 }
2363 
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)2364 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
2365                               Condition cond) {
2366   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
2367   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2368 }
2369 
TailCallStub(CodeStub * stub,Condition cond)2370 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2371   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2372 }
2373 
AllowThisStubCall(CodeStub * stub)2374 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2375   return has_frame_ || !stub->SometimesSetsUpAFrame();
2376 }
2377 
IndexFromHash(Register hash,Register index)2378 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2379   // If the hash field contains an array index pick it out. The assert checks
2380   // that the constants for the maximum number of digits for an array index
2381   // cached in the hash field and the number of bits reserved for it does not
2382   // conflict.
2383   DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2384          (1 << String::kArrayIndexValueBits));
2385   DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2386 }
2387 
TestDoubleIsInt32(DoubleRegister double_input,Register scratch1,Register scratch2,DoubleRegister double_scratch)2388 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
2389                                        Register scratch1, Register scratch2,
2390                                        DoubleRegister double_scratch) {
2391   TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
2392 }
2393 
TestDoubleIsMinusZero(DoubleRegister input,Register scratch1,Register scratch2)2394 void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
2395                                            Register scratch1,
2396                                            Register scratch2) {
2397   lgdr(scratch1, input);
2398 #if V8_TARGET_ARCH_S390X
2399   llihf(scratch2, Operand(0x80000000));  // scratch2 = 0x80000000_00000000
2400   CmpP(scratch1, scratch2);
2401 #else
2402   Label done;
2403   CmpP(scratch1, Operand::Zero());
2404   bne(&done, Label::kNear);
2405 
2406   srlg(scratch1, scratch1, Operand(32));
2407   CmpP(scratch1, Operand(HeapNumber::kSignMask));
2408   bind(&done);
2409 #endif
2410 }
2411 
TestDoubleSign(DoubleRegister input,Register scratch)2412 void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
2413   lgdr(scratch, input);
2414   cgfi(scratch, Operand::Zero());
2415 }
2416 
TestHeapNumberSign(Register input,Register scratch)2417 void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
2418   LoadlW(scratch, FieldMemOperand(input, HeapNumber::kValueOffset +
2419                                              Register::kExponentOffset));
2420   Cmp32(scratch, Operand::Zero());
2421 }
2422 
TryDoubleToInt32Exact(Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch)2423 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2424                                            DoubleRegister double_input,
2425                                            Register scratch,
2426                                            DoubleRegister double_scratch) {
2427   Label done;
2428   DCHECK(!double_input.is(double_scratch));
2429 
2430   ConvertDoubleToInt64(double_input,
2431 #if !V8_TARGET_ARCH_S390X
2432                        scratch,
2433 #endif
2434                        result, double_scratch);
2435 
2436 #if V8_TARGET_ARCH_S390X
2437   TestIfInt32(result, r0);
2438 #else
2439   TestIfInt32(scratch, result, r0);
2440 #endif
2441   bne(&done);
2442 
2443   // convert back and compare
2444   lgdr(scratch, double_scratch);
2445   cdfbr(double_scratch, scratch);
2446   cdbr(double_scratch, double_input);
2447   bind(&done);
2448 }
2449 
TryInt32Floor(Register result,DoubleRegister double_input,Register input_high,Register scratch,DoubleRegister double_scratch,Label * done,Label * exact)2450 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2451                                    Register input_high, Register scratch,
2452                                    DoubleRegister double_scratch, Label* done,
2453                                    Label* exact) {
2454   DCHECK(!result.is(input_high));
2455   DCHECK(!double_input.is(double_scratch));
2456   Label exception;
2457 
2458   // Move high word into input_high
2459   lay(sp, MemOperand(sp, -kDoubleSize));
2460   StoreDouble(double_input, MemOperand(sp));
2461   LoadlW(input_high, MemOperand(sp, Register::kExponentOffset));
2462   la(sp, MemOperand(sp, kDoubleSize));
2463 
2464   // Test for NaN/Inf
2465   ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2466   CmpLogicalP(result, Operand(0x7ff));
2467   beq(&exception);
2468 
2469   // Convert (rounding to -Inf)
2470   ConvertDoubleToInt64(double_input,
2471 #if !V8_TARGET_ARCH_S390X
2472                        scratch,
2473 #endif
2474                        result, double_scratch, kRoundToMinusInf);
2475 
2476 // Test for overflow
2477 #if V8_TARGET_ARCH_S390X
2478   TestIfInt32(result, r0);
2479 #else
2480   TestIfInt32(scratch, result, r0);
2481 #endif
2482   bne(&exception);
2483 
2484   // Test for exactness
2485   lgdr(scratch, double_scratch);
2486   cdfbr(double_scratch, scratch);
2487   cdbr(double_scratch, double_input);
2488   beq(exact);
2489   b(done);
2490 
2491   bind(&exception);
2492 }
2493 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2494 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2495                                                 DoubleRegister double_input,
2496                                                 Label* done) {
2497   DoubleRegister double_scratch = kScratchDoubleReg;
2498 #if !V8_TARGET_ARCH_S390X
2499   Register scratch = ip;
2500 #endif
2501 
2502   ConvertDoubleToInt64(double_input,
2503 #if !V8_TARGET_ARCH_S390X
2504                        scratch,
2505 #endif
2506                        result, double_scratch);
2507 
2508 // Test for overflow
2509 #if V8_TARGET_ARCH_S390X
2510   TestIfInt32(result, r0);
2511 #else
2512   TestIfInt32(scratch, result, r0);
2513 #endif
2514   beq(done);
2515 }
2516 
TruncateDoubleToI(Register result,DoubleRegister double_input)2517 void MacroAssembler::TruncateDoubleToI(Register result,
2518                                        DoubleRegister double_input) {
2519   Label done;
2520 
2521   TryInlineTruncateDoubleToI(result, double_input, &done);
2522 
2523   // If we fell through then inline version didn't succeed - call stub instead.
2524   push(r14);
2525   // Put input on stack.
2526   lay(sp, MemOperand(sp, -kDoubleSize));
2527   StoreDouble(double_input, MemOperand(sp));
2528 
2529   DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2530   CallStub(&stub);
2531 
2532   la(sp, MemOperand(sp, kDoubleSize));
2533   pop(r14);
2534 
2535   bind(&done);
2536 }
2537 
TruncateHeapNumberToI(Register result,Register object)2538 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2539   Label done;
2540   DoubleRegister double_scratch = kScratchDoubleReg;
2541   DCHECK(!result.is(object));
2542 
2543   LoadDouble(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2544   TryInlineTruncateDoubleToI(result, double_scratch, &done);
2545 
2546   // If we fell through then inline version didn't succeed - call stub instead.
2547   push(r14);
2548   DoubleToIStub stub(isolate(), object, result,
2549                      HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2550   CallStub(&stub);
2551   pop(r14);
2552 
2553   bind(&done);
2554 }
2555 
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch1,Label * not_number)2556 void MacroAssembler::TruncateNumberToI(Register object, Register result,
2557                                        Register heap_number_map,
2558                                        Register scratch1, Label* not_number) {
2559   Label done;
2560   DCHECK(!result.is(object));
2561 
2562   UntagAndJumpIfSmi(result, object, &done);
2563   JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2564   TruncateHeapNumberToI(result, object);
2565 
2566   bind(&done);
2567 }
2568 
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2569 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2570                                          int num_least_bits) {
2571   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
2572     // We rotate by kSmiShift amount, and extract the num_least_bits
2573     risbg(dst, src, Operand(64 - num_least_bits), Operand(63),
2574           Operand(64 - kSmiShift), true);
2575   } else {
2576     SmiUntag(dst, src);
2577     AndP(dst, Operand((1 << num_least_bits) - 1));
2578   }
2579 }
2580 
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2581 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2582                                            int num_least_bits) {
2583   AndP(dst, src, Operand((1 << num_least_bits) - 1));
2584 }
2585 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2586 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2587                                  SaveFPRegsMode save_doubles) {
2588   // All parameters are on the stack.  r2 has the return value after call.
2589 
2590   // If the expected number of arguments of the runtime function is
2591   // constant, we check that the actual number of arguments match the
2592   // expectation.
2593   CHECK(f->nargs < 0 || f->nargs == num_arguments);
2594 
2595   // TODO(1236192): Most runtime routines don't need the number of
2596   // arguments passed in because it is constant. At some point we
2597   // should remove this need and make the runtime routine entry code
2598   // smarter.
2599   mov(r2, Operand(num_arguments));
2600   mov(r3, Operand(ExternalReference(f, isolate())));
2601   CEntryStub stub(isolate(),
2602 #if V8_TARGET_ARCH_S390X
2603                   f->result_size,
2604 #else
2605                   1,
2606 #endif
2607                   save_doubles);
2608   CallStub(&stub);
2609 }
2610 
CallExternalReference(const ExternalReference & ext,int num_arguments)2611 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2612                                            int num_arguments) {
2613   mov(r2, Operand(num_arguments));
2614   mov(r3, Operand(ext));
2615 
2616   CEntryStub stub(isolate(), 1);
2617   CallStub(&stub);
2618 }
2619 
TailCallRuntime(Runtime::FunctionId fid)2620 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2621   const Runtime::Function* function = Runtime::FunctionForId(fid);
2622   DCHECK_EQ(1, function->result_size);
2623   if (function->nargs >= 0) {
2624     mov(r2, Operand(function->nargs));
2625   }
2626   JumpToExternalReference(ExternalReference(fid, isolate()));
2627 }
2628 
JumpToExternalReference(const ExternalReference & builtin)2629 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2630   mov(r3, Operand(builtin));
2631   CEntryStub stub(isolate(), 1);
2632   Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2633 }
2634 
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2635 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2636                                 Register scratch1, Register scratch2) {
2637   if (FLAG_native_code_counters && counter->Enabled()) {
2638     mov(scratch1, Operand(value));
2639     mov(scratch2, Operand(ExternalReference(counter)));
2640     StoreW(scratch1, MemOperand(scratch2));
2641   }
2642 }
2643 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2644 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2645                                       Register scratch1, Register scratch2) {
2646   DCHECK(value > 0 && is_int8(value));
2647   if (FLAG_native_code_counters && counter->Enabled()) {
2648     mov(scratch1, Operand(ExternalReference(counter)));
2649     // @TODO(john.yan): can be optimized by asi()
2650     LoadW(scratch2, MemOperand(scratch1));
2651     AddP(scratch2, Operand(value));
2652     StoreW(scratch2, MemOperand(scratch1));
2653   }
2654 }
2655 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2656 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2657                                       Register scratch1, Register scratch2) {
2658   DCHECK(value > 0 && is_int8(value));
2659   if (FLAG_native_code_counters && counter->Enabled()) {
2660     mov(scratch1, Operand(ExternalReference(counter)));
2661     // @TODO(john.yan): can be optimized by asi()
2662     LoadW(scratch2, MemOperand(scratch1));
2663     AddP(scratch2, Operand(-value));
2664     StoreW(scratch2, MemOperand(scratch1));
2665   }
2666 }
2667 
Assert(Condition cond,BailoutReason reason,CRegister cr)2668 void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2669                             CRegister cr) {
2670   if (emit_debug_code()) Check(cond, reason, cr);
2671 }
2672 
AssertFastElements(Register elements)2673 void MacroAssembler::AssertFastElements(Register elements) {
2674   if (emit_debug_code()) {
2675     DCHECK(!elements.is(r0));
2676     Label ok;
2677     push(elements);
2678     LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2679     CompareRoot(elements, Heap::kFixedArrayMapRootIndex);
2680     beq(&ok, Label::kNear);
2681     CompareRoot(elements, Heap::kFixedDoubleArrayMapRootIndex);
2682     beq(&ok, Label::kNear);
2683     CompareRoot(elements, Heap::kFixedCOWArrayMapRootIndex);
2684     beq(&ok, Label::kNear);
2685     Abort(kJSObjectWithFastElementsMapHasSlowElements);
2686     bind(&ok);
2687     pop(elements);
2688   }
2689 }
2690 
Check(Condition cond,BailoutReason reason,CRegister cr)2691 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2692   Label L;
2693   b(cond, &L);
2694   Abort(reason);
2695   // will not return here
2696   bind(&L);
2697 }
2698 
Abort(BailoutReason reason)2699 void MacroAssembler::Abort(BailoutReason reason) {
2700   Label abort_start;
2701   bind(&abort_start);
2702 #ifdef DEBUG
2703   const char* msg = GetBailoutReason(reason);
2704   if (msg != NULL) {
2705     RecordComment("Abort message: ");
2706     RecordComment(msg);
2707   }
2708 
2709   if (FLAG_trap_on_abort) {
2710     stop(msg);
2711     return;
2712   }
2713 #endif
2714 
2715   LoadSmiLiteral(r0, Smi::FromInt(reason));
2716   push(r0);
2717   // Disable stub call restrictions to always allow calls to abort.
2718   if (!has_frame_) {
2719     // We don't actually want to generate a pile of code for this, so just
2720     // claim there is a stack frame, without generating one.
2721     FrameScope scope(this, StackFrame::NONE);
2722     CallRuntime(Runtime::kAbort);
2723   } else {
2724     CallRuntime(Runtime::kAbort);
2725   }
2726   // will not return here
2727 }
2728 
LoadContext(Register dst,int context_chain_length)2729 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2730   if (context_chain_length > 0) {
2731     // Move up the chain of contexts to the context containing the slot.
2732     LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2733     for (int i = 1; i < context_chain_length; i++) {
2734       LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2735     }
2736   } else {
2737     // Slot is in the current function context.  Move it into the
2738     // destination register in case we store into it (the write barrier
2739     // cannot be allowed to destroy the context in esi).
2740     LoadRR(dst, cp);
2741   }
2742 }
2743 
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)2744 void MacroAssembler::LoadTransitionedArrayMapConditional(
2745     ElementsKind expected_kind, ElementsKind transitioned_kind,
2746     Register map_in_out, Register scratch, Label* no_map_match) {
2747   DCHECK(IsFastElementsKind(expected_kind));
2748   DCHECK(IsFastElementsKind(transitioned_kind));
2749 
2750   // Check that the function's map is the same as the expected cached map.
2751   LoadP(scratch, NativeContextMemOperand());
2752   LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
2753   CmpP(map_in_out, ip);
2754   bne(no_map_match);
2755 
2756   // Use the transitioned cached map.
2757   LoadP(map_in_out,
2758         ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
2759 }
2760 
LoadNativeContextSlot(int index,Register dst)2761 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2762   LoadP(dst, NativeContextMemOperand());
2763   LoadP(dst, ContextMemOperand(dst, index));
2764 }
2765 
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)2766 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2767                                                   Register map,
2768                                                   Register scratch) {
2769   // Load the initial map. The global functions all have initial maps.
2770   LoadP(map,
2771         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2772   if (emit_debug_code()) {
2773     Label ok, fail;
2774     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2775     b(&ok);
2776     bind(&fail);
2777     Abort(kGlobalFunctionsMustHaveInitialMap);
2778     bind(&ok);
2779   }
2780 }
2781 
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)2782 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2783     Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2784   SubP(scratch, reg, Operand(1));
2785   CmpP(scratch, Operand::Zero());
2786   blt(not_power_of_two_or_zero);
2787   AndP(r0, reg, scratch /*, SetRC*/);  // Should be okay to remove rc
2788   bne(not_power_of_two_or_zero /*, cr0*/);
2789 }
2790 
JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,Register scratch,Label * zero_and_neg,Label * not_power_of_two)2791 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2792                                                      Register scratch,
2793                                                      Label* zero_and_neg,
2794                                                      Label* not_power_of_two) {
2795   SubP(scratch, reg, Operand(1));
2796   CmpP(scratch, Operand::Zero());
2797   blt(zero_and_neg);
2798   AndP(r0, reg, scratch /*, SetRC*/);  // Should be okay to remove rc
2799   bne(not_power_of_two /*, cr0*/);
2800 }
2801 
2802 #if !V8_TARGET_ARCH_S390X
SmiTagCheckOverflow(Register reg,Register overflow)2803 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2804   DCHECK(!reg.is(overflow));
2805   LoadRR(overflow, reg);  // Save original value.
2806   SmiTag(reg);
2807   XorP(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
2808   LoadAndTestRR(overflow, overflow);
2809 }
2810 
SmiTagCheckOverflow(Register dst,Register src,Register overflow)2811 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2812                                          Register overflow) {
2813   if (dst.is(src)) {
2814     // Fall back to slower case.
2815     SmiTagCheckOverflow(dst, overflow);
2816   } else {
2817     DCHECK(!dst.is(src));
2818     DCHECK(!dst.is(overflow));
2819     DCHECK(!src.is(overflow));
2820     SmiTag(dst, src);
2821     XorP(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
2822     LoadAndTestRR(overflow, overflow);
2823   }
2824 }
2825 #endif
2826 
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)2827 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2828                                       Label* on_not_both_smi) {
2829   STATIC_ASSERT(kSmiTag == 0);
2830   OrP(r0, reg1, reg2 /*, LeaveRC*/);  // should be okay to remove LeaveRC
2831   JumpIfNotSmi(r0, on_not_both_smi);
2832 }
2833 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)2834 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2835                                        Label* smi_case) {
2836   STATIC_ASSERT(kSmiTag == 0);
2837   STATIC_ASSERT(kSmiTagSize == 1);
2838   // this won't work if src == dst
2839   DCHECK(src.code() != dst.code());
2840   SmiUntag(dst, src);
2841   TestIfSmi(src);
2842   beq(smi_case);
2843 }
2844 
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)2845 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
2846                                           Label* non_smi_case) {
2847   STATIC_ASSERT(kSmiTag == 0);
2848   STATIC_ASSERT(kSmiTagSize == 1);
2849 
2850   // We can more optimally use TestIfSmi if dst != src
2851   // otherwise, the UnTag operation will kill the CC and we cannot
2852   // test the Tag bit.
2853   if (src.code() != dst.code()) {
2854     SmiUntag(dst, src);
2855     TestIfSmi(src);
2856   } else {
2857     TestBit(src, 0, r0);
2858     SmiUntag(dst, src);
2859     LoadAndTestRR(r0, r0);
2860   }
2861   bne(non_smi_case);
2862 }
2863 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)2864 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2865                                      Label* on_either_smi) {
2866   STATIC_ASSERT(kSmiTag == 0);
2867   JumpIfSmi(reg1, on_either_smi);
2868   JumpIfSmi(reg2, on_either_smi);
2869 }
2870 
AssertNotNumber(Register object)2871 void MacroAssembler::AssertNotNumber(Register object) {
2872   if (emit_debug_code()) {
2873     STATIC_ASSERT(kSmiTag == 0);
2874     TestIfSmi(object);
2875     Check(ne, kOperandIsANumber, cr0);
2876     push(object);
2877     CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
2878     pop(object);
2879     Check(ne, kOperandIsANumber);
2880   }
2881 }
2882 
AssertNotSmi(Register object)2883 void MacroAssembler::AssertNotSmi(Register object) {
2884   if (emit_debug_code()) {
2885     STATIC_ASSERT(kSmiTag == 0);
2886     TestIfSmi(object);
2887     Check(ne, kOperandIsASmi, cr0);
2888   }
2889 }
2890 
AssertSmi(Register object)2891 void MacroAssembler::AssertSmi(Register object) {
2892   if (emit_debug_code()) {
2893     STATIC_ASSERT(kSmiTag == 0);
2894     TestIfSmi(object);
2895     Check(eq, kOperandIsNotSmi, cr0);
2896   }
2897 }
2898 
AssertString(Register object)2899 void MacroAssembler::AssertString(Register object) {
2900   if (emit_debug_code()) {
2901     STATIC_ASSERT(kSmiTag == 0);
2902     TestIfSmi(object);
2903     Check(ne, kOperandIsASmiAndNotAString, cr0);
2904     push(object);
2905     LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2906     CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2907     pop(object);
2908     Check(lt, kOperandIsNotAString);
2909   }
2910 }
2911 
AssertName(Register object)2912 void MacroAssembler::AssertName(Register object) {
2913   if (emit_debug_code()) {
2914     STATIC_ASSERT(kSmiTag == 0);
2915     TestIfSmi(object);
2916     Check(ne, kOperandIsASmiAndNotAName, cr0);
2917     push(object);
2918     LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2919     CompareInstanceType(object, object, LAST_NAME_TYPE);
2920     pop(object);
2921     Check(le, kOperandIsNotAName);
2922   }
2923 }
2924 
AssertFunction(Register object)2925 void MacroAssembler::AssertFunction(Register object) {
2926   if (emit_debug_code()) {
2927     STATIC_ASSERT(kSmiTag == 0);
2928     TestIfSmi(object);
2929     Check(ne, kOperandIsASmiAndNotAFunction, cr0);
2930     push(object);
2931     CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2932     pop(object);
2933     Check(eq, kOperandIsNotAFunction);
2934   }
2935 }
2936 
AssertBoundFunction(Register object)2937 void MacroAssembler::AssertBoundFunction(Register object) {
2938   if (emit_debug_code()) {
2939     STATIC_ASSERT(kSmiTag == 0);
2940     TestIfSmi(object);
2941     Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
2942     push(object);
2943     CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2944     pop(object);
2945     Check(eq, kOperandIsNotABoundFunction);
2946   }
2947 }
2948 
AssertGeneratorObject(Register object)2949 void MacroAssembler::AssertGeneratorObject(Register object) {
2950   if (emit_debug_code()) {
2951     STATIC_ASSERT(kSmiTag == 0);
2952     TestIfSmi(object);
2953     Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
2954     push(object);
2955     CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
2956     pop(object);
2957     Check(eq, kOperandIsNotAGeneratorObject);
2958   }
2959 }
2960 
AssertReceiver(Register object)2961 void MacroAssembler::AssertReceiver(Register object) {
2962   if (emit_debug_code()) {
2963     STATIC_ASSERT(kSmiTag == 0);
2964     TestIfSmi(object);
2965     Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
2966     push(object);
2967     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2968     CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
2969     pop(object);
2970     Check(ge, kOperandIsNotAReceiver);
2971   }
2972 }
2973 
AssertUndefinedOrAllocationSite(Register object,Register scratch)2974 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2975                                                      Register scratch) {
2976   if (emit_debug_code()) {
2977     Label done_checking;
2978     AssertNotSmi(object);
2979     CompareRoot(object, Heap::kUndefinedValueRootIndex);
2980     beq(&done_checking, Label::kNear);
2981     LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2982     CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2983     Assert(eq, kExpectedUndefinedOrCell);
2984     bind(&done_checking);
2985   }
2986 }
2987 
AssertIsRoot(Register reg,Heap::RootListIndex index)2988 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2989   if (emit_debug_code()) {
2990     CompareRoot(reg, index);
2991     Check(eq, kHeapNumberMapRegisterClobbered);
2992   }
2993 }
2994 
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)2995 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2996                                          Register heap_number_map,
2997                                          Register scratch,
2998                                          Label* on_not_heap_number) {
2999   LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3000   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3001   CmpP(scratch, heap_number_map);
3002   bne(on_not_heap_number);
3003 }
3004 
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3005 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
3006     Register first, Register second, Register scratch1, Register scratch2,
3007     Label* failure) {
3008   // Test that both first and second are sequential one-byte strings.
3009   // Assume that they are non-smis.
3010   LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3011   LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3012   LoadlB(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3013   LoadlB(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3014 
3015   JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
3016                                                  scratch2, failure);
3017 }
3018 
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3019 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
3020                                                            Register second,
3021                                                            Register scratch1,
3022                                                            Register scratch2,
3023                                                            Label* failure) {
3024   // Check that neither is a smi.
3025   AndP(scratch1, first, second);
3026   JumpIfSmi(scratch1, failure);
3027   JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
3028                                                scratch2, failure);
3029 }
3030 
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)3031 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3032                                                      Label* not_unique_name) {
3033   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3034   Label succeed;
3035   AndP(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3036   beq(&succeed, Label::kNear);
3037   CmpP(reg, Operand(SYMBOL_TYPE));
3038   bne(not_unique_name);
3039 
3040   bind(&succeed);
3041 }
3042 
3043 // Allocates a heap number or jumps to the need_gc label if the young space
3044 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required,MutableMode mode)3045 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
3046                                         Register scratch2,
3047                                         Register heap_number_map,
3048                                         Label* gc_required,
3049                                         MutableMode mode) {
3050   // Allocate an object in the heap for the heap number and tag it as a heap
3051   // object.
3052   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3053            NO_ALLOCATION_FLAGS);
3054 
3055   Heap::RootListIndex map_index = mode == MUTABLE
3056                                       ? Heap::kMutableHeapNumberMapRootIndex
3057                                       : Heap::kHeapNumberMapRootIndex;
3058   AssertIsRoot(heap_number_map, map_index);
3059 
3060   // Store heap number map in the allocated object.
3061     StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3062 }
3063 
AllocateHeapNumberWithValue(Register result,DoubleRegister value,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required)3064 void MacroAssembler::AllocateHeapNumberWithValue(
3065     Register result, DoubleRegister value, Register scratch1, Register scratch2,
3066     Register heap_number_map, Label* gc_required) {
3067   AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3068   StoreDouble(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3069 }
3070 
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)3071 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3072                                      Register value, Register scratch1,
3073                                      Register scratch2, Label* gc_required) {
3074   DCHECK(!result.is(constructor));
3075   DCHECK(!result.is(scratch1));
3076   DCHECK(!result.is(scratch2));
3077   DCHECK(!result.is(value));
3078 
3079   // Allocate JSValue in new space.
3080   Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
3081            NO_ALLOCATION_FLAGS);
3082 
3083   // Initialize the JSValue.
3084   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3085   StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
3086   LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3087   StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
3088   StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
3089   StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
3090   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3091 }
3092 
CopyBytes(Register src,Register dst,Register length,Register scratch)3093 void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
3094                                Register scratch) {
3095   Label big_loop, left_bytes, done, fake_call;
3096 
3097   DCHECK(!scratch.is(r0));
3098 
3099   // big loop moves 256 bytes at a time
3100   bind(&big_loop);
3101   CmpP(length, Operand(static_cast<intptr_t>(0x100)));
3102   blt(&left_bytes);
3103 
3104   mvc(MemOperand(dst), MemOperand(src), 0x100);
3105 
3106   AddP(src, Operand(static_cast<intptr_t>(0x100)));
3107   AddP(dst, Operand(static_cast<intptr_t>(0x100)));
3108   SubP(length, Operand(static_cast<intptr_t>(0x100)));
3109   b(&big_loop);
3110 
3111   bind(&left_bytes);
3112   CmpP(length, Operand::Zero());
3113   beq(&done);
3114 
3115   // TODO(john.yan): More optimal version is to use MVC
3116   // Sequence below has some undiagnosed issue.
3117   /*
3118   b(scratch, &fake_call);  // use brasl to Save mvc addr to scratch
3119   mvc(MemOperand(dst), MemOperand(src), 1);
3120   bind(&fake_call);
3121   SubP(length, Operand(static_cast<intptr_t>(-1)));
3122   ex(length, MemOperand(scratch));  // execute mvc instr above
3123   AddP(src, length);
3124   AddP(dst, length);
3125   AddP(src, Operand(static_cast<intptr_t>(0x1)));
3126   AddP(dst, Operand(static_cast<intptr_t>(0x1)));
3127   */
3128 
3129   mvc(MemOperand(dst), MemOperand(src), 1);
3130   AddP(src, Operand(static_cast<intptr_t>(0x1)));
3131   AddP(dst, Operand(static_cast<intptr_t>(0x1)));
3132   SubP(length, Operand(static_cast<intptr_t>(0x1)));
3133 
3134   b(&left_bytes);
3135   bind(&done);
3136 }
3137 
InitializeNFieldsWithFiller(Register current_address,Register count,Register filler)3138 void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
3139                                                  Register count,
3140                                                  Register filler) {
3141   Label loop;
3142   bind(&loop);
3143   StoreP(filler, MemOperand(current_address));
3144   AddP(current_address, current_address, Operand(kPointerSize));
3145   BranchOnCount(r1, &loop);
3146 }
3147 
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)3148 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
3149                                                 Register end_address,
3150                                                 Register filler) {
3151   Label done;
3152   DCHECK(!filler.is(r1));
3153   DCHECK(!current_address.is(r1));
3154   DCHECK(!end_address.is(r1));
3155   SubP(r1, end_address, current_address /*, LeaveOE, SetRC*/);
3156   beq(&done, Label::kNear);
3157   ShiftRightP(r1, r1, Operand(kPointerSizeLog2));
3158   InitializeNFieldsWithFiller(current_address, r1, filler);
3159   bind(&done);
3160 }
3161 
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3162 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3163     Register first, Register second, Register scratch1, Register scratch2,
3164     Label* failure) {
3165   const int kFlatOneByteStringMask =
3166       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3167   const int kFlatOneByteStringTag =
3168       kStringTag | kOneByteStringTag | kSeqStringTag;
3169   if (!scratch1.is(first)) LoadRR(scratch1, first);
3170   if (!scratch2.is(second)) LoadRR(scratch2, second);
3171   nilf(scratch1, Operand(kFlatOneByteStringMask));
3172   CmpP(scratch1, Operand(kFlatOneByteStringTag));
3173   bne(failure);
3174   nilf(scratch2, Operand(kFlatOneByteStringMask));
3175   CmpP(scratch2, Operand(kFlatOneByteStringTag));
3176   bne(failure);
3177 }
3178 
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)3179 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3180                                                               Register scratch,
3181                                                               Label* failure) {
3182   const int kFlatOneByteStringMask =
3183       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3184   const int kFlatOneByteStringTag =
3185       kStringTag | kOneByteStringTag | kSeqStringTag;
3186 
3187   if (!scratch.is(type)) LoadRR(scratch, type);
3188   nilf(scratch, Operand(kFlatOneByteStringMask));
3189   CmpP(scratch, Operand(kFlatOneByteStringTag));
3190   bne(failure);
3191 }
3192 
3193 static const int kRegisterPassedArguments = 5;
3194 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)3195 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3196                                               int num_double_arguments) {
3197   int stack_passed_words = 0;
3198   if (num_double_arguments > DoubleRegister::kNumRegisters) {
3199     stack_passed_words +=
3200         2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3201   }
3202   // Up to five simple arguments are passed in registers r2..r6
3203   if (num_reg_arguments > kRegisterPassedArguments) {
3204     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3205   }
3206   return stack_passed_words;
3207 }
3208 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)3209 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
3210                                                Register value,
3211                                                uint32_t encoding_mask) {
3212   Label is_object;
3213   TestIfSmi(string);
3214   Check(ne, kNonObject, cr0);
3215 
3216   LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3217   LoadlB(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3218 
3219   AndP(ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3220   CmpP(ip, Operand(encoding_mask));
3221   Check(eq, kUnexpectedStringType);
3222 
3223 // The index is assumed to be untagged coming in, tag it to compare with the
3224 // string length without using a temp register, it is restored at the end of
3225 // this function.
3226 #if !V8_TARGET_ARCH_S390X
3227   Label index_tag_ok, index_tag_bad;
3228   JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
3229 #endif
3230   SmiTag(index, index);
3231 #if !V8_TARGET_ARCH_S390X
3232   b(&index_tag_ok);
3233   bind(&index_tag_bad);
3234   Abort(kIndexIsTooLarge);
3235   bind(&index_tag_ok);
3236 #endif
3237 
3238   LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
3239   CmpP(index, ip);
3240   Check(lt, kIndexIsTooLarge);
3241 
3242   DCHECK(Smi::FromInt(0) == 0);
3243   CmpP(index, Operand::Zero());
3244   Check(ge, kIndexIsNegative);
3245 
3246   SmiUntag(index, index);
3247 }
3248 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)3249 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3250                                           int num_double_arguments,
3251                                           Register scratch) {
3252   int frame_alignment = ActivationFrameAlignment();
3253   int stack_passed_arguments =
3254       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3255   int stack_space = kNumRequiredStackFrameSlots;
3256   if (frame_alignment > kPointerSize) {
3257     // Make stack end at alignment and make room for stack arguments
3258     // -- preserving original value of sp.
3259     LoadRR(scratch, sp);
3260     lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
3261     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3262     ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
3263     StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
3264   } else {
3265     stack_space += stack_passed_arguments;
3266   }
3267   lay(sp, MemOperand(sp, -(stack_space)*kPointerSize));
3268 }
3269 
PrepareCallCFunction(int num_reg_arguments,Register scratch)3270 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3271                                           Register scratch) {
3272   PrepareCallCFunction(num_reg_arguments, 0, scratch);
3273 }
3274 
MovToFloatParameter(DoubleRegister src)3275 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
3276 
MovToFloatResult(DoubleRegister src)3277 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
3278 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)3279 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3280                                           DoubleRegister src2) {
3281   if (src2.is(d0)) {
3282     DCHECK(!src1.is(d2));
3283     Move(d2, src2);
3284     Move(d0, src1);
3285   } else {
3286     Move(d0, src1);
3287     Move(d2, src2);
3288   }
3289 }
3290 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)3291 void MacroAssembler::CallCFunction(ExternalReference function,
3292                                    int num_reg_arguments,
3293                                    int num_double_arguments) {
3294   mov(ip, Operand(function));
3295   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3296 }
3297 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)3298 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
3299                                    int num_double_arguments) {
3300   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3301 }
3302 
CallCFunction(ExternalReference function,int num_arguments)3303 void MacroAssembler::CallCFunction(ExternalReference function,
3304                                    int num_arguments) {
3305   CallCFunction(function, num_arguments, 0);
3306 }
3307 
CallCFunction(Register function,int num_arguments)3308 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3309   CallCFunction(function, num_arguments, 0);
3310 }
3311 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)3312 void MacroAssembler::CallCFunctionHelper(Register function,
3313                                          int num_reg_arguments,
3314                                          int num_double_arguments) {
3315   DCHECK(has_frame());
3316 
3317   // Just call directly. The function called cannot cause a GC, or
3318   // allow preemption, so the return address in the link register
3319   // stays correct.
3320   Register dest = function;
3321   if (ABI_CALL_VIA_IP) {
3322     Move(ip, function);
3323     dest = ip;
3324   }
3325 
3326   Call(dest);
3327 
3328   int stack_passed_arguments =
3329       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3330   int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
3331   if (ActivationFrameAlignment() > kPointerSize) {
3332     // Load the original stack pointer (pre-alignment) from the stack
3333     LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
3334   } else {
3335     la(sp, MemOperand(sp, stack_space * kPointerSize));
3336   }
3337 }
3338 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)3339 void MacroAssembler::CheckPageFlag(
3340     Register object,
3341     Register scratch,  // scratch may be same register as object
3342     int mask, Condition cc, Label* condition_met) {
3343   DCHECK(cc == ne || cc == eq);
3344   ClearRightImm(scratch, object, Operand(kPageSizeBits));
3345 
3346   if (base::bits::IsPowerOfTwo32(mask)) {
3347     // If it's a power of two, we can use Test-Under-Mask Memory-Imm form
3348     // which allows testing of a single byte in memory.
3349     int32_t byte_offset = 4;
3350     uint32_t shifted_mask = mask;
3351     // Determine the byte offset to be tested
3352     if (mask <= 0x80) {
3353       byte_offset = kPointerSize - 1;
3354     } else if (mask < 0x8000) {
3355       byte_offset = kPointerSize - 2;
3356       shifted_mask = mask >> 8;
3357     } else if (mask < 0x800000) {
3358       byte_offset = kPointerSize - 3;
3359       shifted_mask = mask >> 16;
3360     } else {
3361       byte_offset = kPointerSize - 4;
3362       shifted_mask = mask >> 24;
3363     }
3364 #if V8_TARGET_LITTLE_ENDIAN
3365     // Reverse the byte_offset if emulating on little endian platform
3366     byte_offset = kPointerSize - byte_offset - 1;
3367 #endif
3368     tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
3369        Operand(shifted_mask));
3370   } else {
3371     LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3372     AndP(r0, scratch, Operand(mask));
3373   }
3374   // Should be okay to remove rc
3375 
3376   if (cc == ne) {
3377     bne(condition_met);
3378   }
3379   if (cc == eq) {
3380     beq(condition_met);
3381   }
3382 }
3383 
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)3384 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3385                                  Register scratch1, Label* on_black) {
3386   HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
3387   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3388 }
3389 
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)3390 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3391                               Register mask_scratch, Label* has_color,
3392                               int first_bit, int second_bit) {
3393   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3394 
3395   GetMarkBits(object, bitmap_scratch, mask_scratch);
3396 
3397   Label other_color, word_boundary;
3398   LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3399   // Test the first bit
3400   AndP(r0, ip, mask_scratch /*, SetRC*/);  // Should be okay to remove rc
3401   b(first_bit == 1 ? eq : ne, &other_color, Label::kNear);
3402   // Shift left 1
3403   // May need to load the next cell
3404   sll(mask_scratch, Operand(1) /*, SetRC*/);
3405   LoadAndTest32(mask_scratch, mask_scratch);
3406   beq(&word_boundary, Label::kNear);
3407   // Test the second bit
3408   AndP(r0, ip, mask_scratch /*, SetRC*/);  // Should be okay to remove rc
3409   b(second_bit == 1 ? ne : eq, has_color);
3410   b(&other_color, Label::kNear);
3411 
3412   bind(&word_boundary);
3413   LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
3414   AndP(r0, ip, Operand(1));
3415   b(second_bit == 1 ? ne : eq, has_color);
3416   bind(&other_color);
3417 }
3418 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)3419 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
3420                                  Register mask_reg) {
3421   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3422   LoadRR(bitmap_reg, addr_reg);
3423   nilf(bitmap_reg, Operand(~Page::kPageAlignmentMask));
3424   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3425   ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
3426   ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
3427   ShiftLeftP(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
3428   AddP(bitmap_reg, ip);
3429   LoadRR(ip, mask_reg);  // Have to do some funky reg shuffling as
3430                          // 31-bit shift left clobbers on s390.
3431   LoadImmP(mask_reg, Operand(1));
3432   ShiftLeftP(mask_reg, mask_reg, ip);
3433 }
3434 
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)3435 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3436                                  Register mask_scratch, Register load_scratch,
3437                                  Label* value_is_white) {
3438   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3439   GetMarkBits(value, bitmap_scratch, mask_scratch);
3440 
3441   // If the value is black or grey we don't need to do anything.
3442   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3443   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3444   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3445   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3446 
3447   // Since both black and grey have a 1 in the first position and white does
3448   // not have a 1 there we only need to check one bit.
3449   LoadlW(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3450   LoadRR(r0, load_scratch);
3451   AndP(r0, mask_scratch);
3452   beq(value_is_white);
3453 }
3454 
3455 // Saturate a value into 8-bit unsigned integer
3456 //   if input_value < 0, output_value is 0
3457 //   if input_value > 255, output_value is 255
3458 //   otherwise output_value is the input_value
ClampUint8(Register output_reg,Register input_reg)3459 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3460   int satval = (1 << 8) - 1;
3461 
3462   Label done, negative_label, overflow_label;
3463   CmpP(input_reg, Operand::Zero());
3464   blt(&negative_label);
3465 
3466   CmpP(input_reg, Operand(satval));
3467   bgt(&overflow_label);
3468   if (!output_reg.is(input_reg)) {
3469     LoadRR(output_reg, input_reg);
3470   }
3471   b(&done);
3472 
3473   bind(&negative_label);
3474   LoadImmP(output_reg, Operand::Zero());  // set to 0 if negative
3475   b(&done);
3476 
3477   bind(&overflow_label);  // set to satval if > satval
3478   LoadImmP(output_reg, Operand(satval));
3479 
3480   bind(&done);
3481 }
3482 
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister double_scratch)3483 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3484                                         DoubleRegister input_reg,
3485                                         DoubleRegister double_scratch) {
3486   Label above_zero;
3487   Label done;
3488   Label in_bounds;
3489 
3490   LoadDoubleLiteral(double_scratch, 0.0, result_reg);
3491   cdbr(input_reg, double_scratch);
3492   bgt(&above_zero, Label::kNear);
3493 
3494   // Double value is less than zero, NaN or Inf, return 0.
3495   LoadIntLiteral(result_reg, 0);
3496   b(&done, Label::kNear);
3497 
3498   // Double value is >= 255, return 255.
3499   bind(&above_zero);
3500   LoadDoubleLiteral(double_scratch, 255.0, result_reg);
3501   cdbr(input_reg, double_scratch);
3502   ble(&in_bounds, Label::kNear);
3503   LoadIntLiteral(result_reg, 255);
3504   b(&done, Label::kNear);
3505 
3506   // In 0-255 range, round and truncate.
3507   bind(&in_bounds);
3508 
3509   // round to nearest (default rounding mode)
3510   cfdbr(ROUND_TO_NEAREST_WITH_TIES_TO_EVEN, result_reg, input_reg);
3511   bind(&done);
3512 }
3513 
LoadInstanceDescriptors(Register map,Register descriptors)3514 void MacroAssembler::LoadInstanceDescriptors(Register map,
3515                                              Register descriptors) {
3516   LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3517 }
3518 
NumberOfOwnDescriptors(Register dst,Register map)3519 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3520   LoadlW(dst, FieldMemOperand(map, Map::kBitField3Offset));
3521   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3522 }
3523 
EnumLength(Register dst,Register map)3524 void MacroAssembler::EnumLength(Register dst, Register map) {
3525   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3526   LoadW(dst, FieldMemOperand(map, Map::kBitField3Offset));
3527   And(dst, Operand(Map::EnumLengthBits::kMask));
3528   SmiTag(dst);
3529 }
3530 
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3531 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3532                                   int accessor_index,
3533                                   AccessorComponent accessor) {
3534   LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3535   LoadInstanceDescriptors(dst, dst);
3536   LoadP(dst,
3537         FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3538   const int getterOffset = AccessorPair::kGetterOffset;
3539   const int setterOffset = AccessorPair::kSetterOffset;
3540   int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
3541   LoadP(dst, FieldMemOperand(dst, offset));
3542 }
3543 
CheckEnumCache(Label * call_runtime)3544 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3545   Register null_value = r7;
3546   Register empty_fixed_array_value = r8;
3547   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3548   Label next, start;
3549   LoadRR(r4, r2);
3550 
3551   // Check if the enum length field is properly initialized, indicating that
3552   // there is an enum cache.
3553   LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
3554 
3555   EnumLength(r5, r3);
3556   CmpSmiLiteral(r5, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3557   beq(call_runtime);
3558 
3559   LoadRoot(null_value, Heap::kNullValueRootIndex);
3560   b(&start, Label::kNear);
3561 
3562   bind(&next);
3563   LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
3564 
3565   // For all objects but the receiver, check that the cache is empty.
3566   EnumLength(r5, r3);
3567   CmpSmiLiteral(r5, Smi::FromInt(0), r0);
3568   bne(call_runtime);
3569 
3570   bind(&start);
3571 
3572   // Check that there are no elements. Register r4 contains the current JS
3573   // object we've reached through the prototype chain.
3574   Label no_elements;
3575   LoadP(r4, FieldMemOperand(r4, JSObject::kElementsOffset));
3576   CmpP(r4, empty_fixed_array_value);
3577   beq(&no_elements, Label::kNear);
3578 
3579   // Second chance, the object may be using the empty slow element dictionary.
3580   CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
3581   bne(call_runtime);
3582 
3583   bind(&no_elements);
3584   LoadP(r4, FieldMemOperand(r3, Map::kPrototypeOffset));
3585   CmpP(r4, null_value);
3586   bne(&next);
3587 }
3588 
3589 ////////////////////////////////////////////////////////////////////////////////
3590 //
3591 // New MacroAssembler Interfaces added for S390
3592 //
3593 ////////////////////////////////////////////////////////////////////////////////
3594 // Primarily used for loading constants
3595 // This should really move to be in macro-assembler as it
3596 // is really a pseudo instruction
3597 // Some usages of this intend for a FIXED_SEQUENCE to be used
3598 // @TODO - break this dependency so we can optimize mov() in general
3599 // and only use the generic version when we require a fixed sequence
LoadRepresentation(Register dst,const MemOperand & mem,Representation r,Register scratch)3600 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
3601                                         Representation r, Register scratch) {
3602   DCHECK(!r.IsDouble());
3603   if (r.IsInteger8()) {
3604     LoadB(dst, mem);
3605     lgbr(dst, dst);
3606   } else if (r.IsUInteger8()) {
3607     LoadlB(dst, mem);
3608   } else if (r.IsInteger16()) {
3609     LoadHalfWordP(dst, mem, scratch);
3610     lghr(dst, dst);
3611   } else if (r.IsUInteger16()) {
3612     LoadHalfWordP(dst, mem, scratch);
3613 #if V8_TARGET_ARCH_S390X
3614   } else if (r.IsInteger32()) {
3615     LoadW(dst, mem, scratch);
3616 #endif
3617   } else {
3618     LoadP(dst, mem, scratch);
3619   }
3620 }
3621 
StoreRepresentation(Register src,const MemOperand & mem,Representation r,Register scratch)3622 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
3623                                          Representation r, Register scratch) {
3624   DCHECK(!r.IsDouble());
3625   if (r.IsInteger8() || r.IsUInteger8()) {
3626     StoreByte(src, mem, scratch);
3627   } else if (r.IsInteger16() || r.IsUInteger16()) {
3628     StoreHalfWord(src, mem, scratch);
3629 #if V8_TARGET_ARCH_S390X
3630   } else if (r.IsInteger32()) {
3631     StoreW(src, mem, scratch);
3632 #endif
3633   } else {
3634     if (r.IsHeapObject()) {
3635       AssertNotSmi(src);
3636     } else if (r.IsSmi()) {
3637       AssertSmi(src);
3638     }
3639     StoreP(src, mem, scratch);
3640   }
3641 }
3642 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Register scratch2_reg,Label * no_memento_found)3643 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
3644                                                      Register scratch_reg,
3645                                                      Register scratch2_reg,
3646                                                      Label* no_memento_found) {
3647   Label map_check;
3648   Label top_check;
3649   ExternalReference new_space_allocation_top_adr =
3650       ExternalReference::new_space_allocation_top_address(isolate());
3651   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
3652   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
3653 
3654   DCHECK(!AreAliased(receiver_reg, scratch_reg));
3655 
3656   // Bail out if the object is not in new space.
3657   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
3658 
3659   DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3660 
3661   // If the object is in new space, we need to check whether it is on the same
3662   // page as the current top.
3663   AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
3664   mov(ip, Operand(new_space_allocation_top_adr));
3665   LoadP(ip, MemOperand(ip));
3666   XorP(r0, scratch_reg, ip);
3667   AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
3668   beq(&top_check, Label::kNear);
3669   // The object is on a different page than allocation top. Bail out if the
3670   // object sits on the page boundary as no memento can follow and we cannot
3671   // touch the memory following it.
3672   XorP(r0, scratch_reg, receiver_reg);
3673   AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
3674   bne(no_memento_found);
3675   // Continue with the actual map check.
3676   b(&map_check, Label::kNear);
3677   // If top is on the same page as the current object, we need to check whether
3678   // we are below top.
3679   bind(&top_check);
3680   CmpP(scratch_reg, ip);
3681   bgt(no_memento_found);
3682   // Memento map check.
3683   bind(&map_check);
3684   LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
3685   CmpP(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
3686 }
3687 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)3688 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
3689                                    Register reg4, Register reg5,
3690                                    Register reg6) {
3691   RegList regs = 0;
3692   if (reg1.is_valid()) regs |= reg1.bit();
3693   if (reg2.is_valid()) regs |= reg2.bit();
3694   if (reg3.is_valid()) regs |= reg3.bit();
3695   if (reg4.is_valid()) regs |= reg4.bit();
3696   if (reg5.is_valid()) regs |= reg5.bit();
3697   if (reg6.is_valid()) regs |= reg6.bit();
3698 
3699   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
3700   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3701     int code = config->GetAllocatableGeneralCode(i);
3702     Register candidate = Register::from_code(code);
3703     if (regs & candidate.bit()) continue;
3704     return candidate;
3705   }
3706   UNREACHABLE();
3707   return no_reg;
3708 }
3709 
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)3710 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
3711                                                       Register scratch0,
3712                                                       Register scratch1,
3713                                                       Label* found) {
3714   DCHECK(!scratch1.is(scratch0));
3715   Register current = scratch0;
3716   Label loop_again, end;
3717 
3718   // scratch contained elements pointer.
3719   LoadRR(current, object);
3720   LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
3721   LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
3722   CompareRoot(current, Heap::kNullValueRootIndex);
3723   beq(&end);
3724 
3725   // Loop based on the map going up the prototype chain.
3726   bind(&loop_again);
3727   LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
3728 
3729   STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
3730   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
3731   LoadlB(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
3732   CmpP(scratch1, Operand(JS_OBJECT_TYPE));
3733   blt(found);
3734 
3735   LoadlB(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
3736   DecodeField<Map::ElementsKindBits>(scratch1);
3737   CmpP(scratch1, Operand(DICTIONARY_ELEMENTS));
3738   beq(found);
3739   LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
3740   CompareRoot(current, Heap::kNullValueRootIndex);
3741   bne(&loop_again);
3742 
3743   bind(&end);
3744 }
3745 
mov(Register dst,const Operand & src)3746 void MacroAssembler::mov(Register dst, const Operand& src) {
3747   if (src.rmode_ != kRelocInfo_NONEPTR) {
3748     // some form of relocation needed
3749     RecordRelocInfo(src.rmode_, src.imm_);
3750   }
3751 
3752 #if V8_TARGET_ARCH_S390X
3753   int64_t value = src.immediate();
3754   int32_t hi_32 = static_cast<int64_t>(value) >> 32;
3755   int32_t lo_32 = static_cast<int32_t>(value);
3756 
3757   iihf(dst, Operand(hi_32));
3758   iilf(dst, Operand(lo_32));
3759 #else
3760   int value = src.immediate();
3761   iilf(dst, Operand(value));
3762 #endif
3763 }
3764 
Mul(Register dst,Register src1,Register src2)3765 void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
3766   if (dst.is(src2)) {
3767     MulP(dst, src1);
3768   } else if (dst.is(src1)) {
3769     MulP(dst, src2);
3770   } else {
3771     Move(dst, src1);
3772     MulP(dst, src2);
3773   }
3774 }
3775 
DivP(Register dividend,Register divider)3776 void MacroAssembler::DivP(Register dividend, Register divider) {
3777   // have to make sure the src and dst are reg pairs
3778   DCHECK(dividend.code() % 2 == 0);
3779 #if V8_TARGET_ARCH_S390X
3780   dsgr(dividend, divider);
3781 #else
3782   dr(dividend, divider);
3783 #endif
3784 }
3785 
MulP(Register dst,const Operand & opnd)3786 void MacroAssembler::MulP(Register dst, const Operand& opnd) {
3787 #if V8_TARGET_ARCH_S390X
3788   msgfi(dst, opnd);
3789 #else
3790   msfi(dst, opnd);
3791 #endif
3792 }
3793 
MulP(Register dst,Register src)3794 void MacroAssembler::MulP(Register dst, Register src) {
3795 #if V8_TARGET_ARCH_S390X
3796   msgr(dst, src);
3797 #else
3798   msr(dst, src);
3799 #endif
3800 }
3801 
MulP(Register dst,const MemOperand & opnd)3802 void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
3803 #if V8_TARGET_ARCH_S390X
3804   if (is_uint16(opnd.offset())) {
3805     ms(dst, opnd);
3806   } else if (is_int20(opnd.offset())) {
3807     msy(dst, opnd);
3808   } else {
3809     UNIMPLEMENTED();
3810   }
3811 #else
3812   if (is_int20(opnd.offset())) {
3813     msg(dst, opnd);
3814   } else {
3815     UNIMPLEMENTED();
3816   }
3817 #endif
3818 }
3819 
3820 //----------------------------------------------------------------------------
3821 //  Add Instructions
3822 //----------------------------------------------------------------------------
3823 
3824 // Add 32-bit (Register dst = Register dst + Immediate opnd)
Add32(Register dst,const Operand & opnd)3825 void MacroAssembler::Add32(Register dst, const Operand& opnd) {
3826   if (is_int16(opnd.immediate()))
3827     ahi(dst, opnd);
3828   else
3829     afi(dst, opnd);
3830 }
3831 
3832 // Add Pointer Size (Register dst = Register dst + Immediate opnd)
AddP(Register dst,const Operand & opnd)3833 void MacroAssembler::AddP(Register dst, const Operand& opnd) {
3834 #if V8_TARGET_ARCH_S390X
3835   if (is_int16(opnd.immediate()))
3836     aghi(dst, opnd);
3837   else
3838     agfi(dst, opnd);
3839 #else
3840   Add32(dst, opnd);
3841 #endif
3842 }
3843 
3844 // Add 32-bit (Register dst = Register src + Immediate opnd)
Add32(Register dst,Register src,const Operand & opnd)3845 void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
3846   if (!dst.is(src)) {
3847     if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
3848       ahik(dst, src, opnd);
3849       return;
3850     }
3851     lr(dst, src);
3852   }
3853   Add32(dst, opnd);
3854 }
3855 
3856 // Add Pointer Size (Register dst = Register src + Immediate opnd)
AddP(Register dst,Register src,const Operand & opnd)3857 void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
3858   if (!dst.is(src)) {
3859     if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
3860       AddPImm_RRI(dst, src, opnd);
3861       return;
3862     }
3863     LoadRR(dst, src);
3864   }
3865   AddP(dst, opnd);
3866 }
3867 
3868 // Add 32-bit (Register dst = Register dst + Register src)
Add32(Register dst,Register src)3869 void MacroAssembler::Add32(Register dst, Register src) { ar(dst, src); }
3870 
3871 // Add Pointer Size (Register dst = Register dst + Register src)
AddP(Register dst,Register src)3872 void MacroAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
3873 
3874 // Add Pointer Size with src extension
3875 //     (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
3876 // src is treated as a 32-bit signed integer, which is sign extended to
3877 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,Register src)3878 void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) {
3879 #if V8_TARGET_ARCH_S390X
3880   agfr(dst, src);
3881 #else
3882   ar(dst, src);
3883 #endif
3884 }
3885 
3886 // Add 32-bit (Register dst = Register src1 + Register src2)
Add32(Register dst,Register src1,Register src2)3887 void MacroAssembler::Add32(Register dst, Register src1, Register src2) {
3888   if (!dst.is(src1) && !dst.is(src2)) {
3889     // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
3890     // as AR is a smaller instruction
3891     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3892       ark(dst, src1, src2);
3893       return;
3894     } else {
3895       lr(dst, src1);
3896     }
3897   } else if (dst.is(src2)) {
3898     src2 = src1;
3899   }
3900   ar(dst, src2);
3901 }
3902 
3903 // Add Pointer Size (Register dst = Register src1 + Register src2)
AddP(Register dst,Register src1,Register src2)3904 void MacroAssembler::AddP(Register dst, Register src1, Register src2) {
3905   if (!dst.is(src1) && !dst.is(src2)) {
3906     // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
3907     // as AR is a smaller instruction
3908     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3909       AddP_RRR(dst, src1, src2);
3910       return;
3911     } else {
3912       LoadRR(dst, src1);
3913     }
3914   } else if (dst.is(src2)) {
3915     src2 = src1;
3916   }
3917   AddRR(dst, src2);
3918 }
3919 
3920 // Add Pointer Size with src extension
3921 //      (Register dst (ptr) = Register dst (ptr) + Register src1 (ptr) +
3922 //                            Register src2 (32 | 32->64))
3923 // src is treated as a 32-bit signed integer, which is sign extended to
3924 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,Register src1,Register src2)3925 void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1,
3926                                     Register src2) {
3927 #if V8_TARGET_ARCH_S390X
3928   if (dst.is(src2)) {
3929     // The source we need to sign extend is the same as result.
3930     lgfr(dst, src2);
3931     agr(dst, src1);
3932   } else {
3933     if (!dst.is(src1)) LoadRR(dst, src1);
3934     agfr(dst, src2);
3935   }
3936 #else
3937   AddP(dst, src1, src2);
3938 #endif
3939 }
3940 
3941 // Add 32-bit (Register-Memory)
Add32(Register dst,const MemOperand & opnd)3942 void MacroAssembler::Add32(Register dst, const MemOperand& opnd) {
3943   DCHECK(is_int20(opnd.offset()));
3944   if (is_uint12(opnd.offset()))
3945     a(dst, opnd);
3946   else
3947     ay(dst, opnd);
3948 }
3949 
3950 // Add Pointer Size (Register-Memory)
AddP(Register dst,const MemOperand & opnd)3951 void MacroAssembler::AddP(Register dst, const MemOperand& opnd) {
3952 #if V8_TARGET_ARCH_S390X
3953   DCHECK(is_int20(opnd.offset()));
3954   ag(dst, opnd);
3955 #else
3956   Add32(dst, opnd);
3957 #endif
3958 }
3959 
3960 // Add Pointer Size with src extension
3961 //      (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64))
3962 // src is treated as a 32-bit signed integer, which is sign extended to
3963 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,const MemOperand & opnd)3964 void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
3965 #if V8_TARGET_ARCH_S390X
3966   DCHECK(is_int20(opnd.offset()));
3967   agf(dst, opnd);
3968 #else
3969   Add32(dst, opnd);
3970 #endif
3971 }
3972 
3973 // Add 32-bit (Memory - Immediate)
Add32(const MemOperand & opnd,const Operand & imm)3974 void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
3975   DCHECK(is_int8(imm.immediate()));
3976   DCHECK(is_int20(opnd.offset()));
3977   DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
3978   asi(opnd, imm);
3979 }
3980 
3981 // Add Pointer-sized (Memory - Immediate)
AddP(const MemOperand & opnd,const Operand & imm)3982 void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
3983   DCHECK(is_int8(imm.immediate()));
3984   DCHECK(is_int20(opnd.offset()));
3985   DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
3986 #if V8_TARGET_ARCH_S390X
3987   agsi(opnd, imm);
3988 #else
3989   asi(opnd, imm);
3990 #endif
3991 }
3992 
3993 //----------------------------------------------------------------------------
3994 //  Add Logical Instructions
3995 //----------------------------------------------------------------------------
3996 
3997 // Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
AddLogicalWithCarry32(Register dst,Register src1,Register src2)3998 void MacroAssembler::AddLogicalWithCarry32(Register dst, Register src1,
3999                                            Register src2) {
4000   if (!dst.is(src2) && !dst.is(src1)) {
4001     lr(dst, src1);
4002     alcr(dst, src2);
4003   } else if (!dst.is(src2)) {
4004     // dst == src1
4005     DCHECK(dst.is(src1));
4006     alcr(dst, src2);
4007   } else {
4008     // dst == src2
4009     DCHECK(dst.is(src2));
4010     alcr(dst, src1);
4011   }
4012 }
4013 
4014 // Add Logical 32-bit (Register dst = Register src1 + Register src2)
AddLogical32(Register dst,Register src1,Register src2)4015 void MacroAssembler::AddLogical32(Register dst, Register src1, Register src2) {
4016   if (!dst.is(src2) && !dst.is(src1)) {
4017     lr(dst, src1);
4018     alr(dst, src2);
4019   } else if (!dst.is(src2)) {
4020     // dst == src1
4021     DCHECK(dst.is(src1));
4022     alr(dst, src2);
4023   } else {
4024     // dst == src2
4025     DCHECK(dst.is(src2));
4026     alr(dst, src1);
4027   }
4028 }
4029 
4030 // Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
AddLogical(Register dst,const Operand & imm)4031 void MacroAssembler::AddLogical(Register dst, const Operand& imm) {
4032   alfi(dst, imm);
4033 }
4034 
4035 // Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
AddLogicalP(Register dst,const Operand & imm)4036 void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) {
4037 #ifdef V8_TARGET_ARCH_S390X
4038   algfi(dst, imm);
4039 #else
4040   AddLogical(dst, imm);
4041 #endif
4042 }
4043 
4044 // Add Logical 32-bit (Register-Memory)
AddLogical(Register dst,const MemOperand & opnd)4045 void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) {
4046   DCHECK(is_int20(opnd.offset()));
4047   if (is_uint12(opnd.offset()))
4048     al_z(dst, opnd);
4049   else
4050     aly(dst, opnd);
4051 }
4052 
4053 // Add Logical Pointer Size (Register-Memory)
AddLogicalP(Register dst,const MemOperand & opnd)4054 void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
4055 #if V8_TARGET_ARCH_S390X
4056   DCHECK(is_int20(opnd.offset()));
4057   alg(dst, opnd);
4058 #else
4059   AddLogical(dst, opnd);
4060 #endif
4061 }
4062 
4063 //----------------------------------------------------------------------------
4064 //  Subtract Instructions
4065 //----------------------------------------------------------------------------
4066 
4067 // Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
4068 // src2)
SubLogicalWithBorrow32(Register dst,Register src1,Register src2)4069 void MacroAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
4070                                             Register src2) {
4071   if (!dst.is(src2) && !dst.is(src1)) {
4072     lr(dst, src1);
4073     slbr(dst, src2);
4074   } else if (!dst.is(src2)) {
4075     // dst == src1
4076     DCHECK(dst.is(src1));
4077     slbr(dst, src2);
4078   } else {
4079     // dst == src2
4080     DCHECK(dst.is(src2));
4081     lr(r0, dst);
4082     SubLogicalWithBorrow32(dst, src1, r0);
4083   }
4084 }
4085 
4086 // Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
SubLogical32(Register dst,Register src1,Register src2)4087 void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
4088   if (!dst.is(src2) && !dst.is(src1)) {
4089     lr(dst, src1);
4090     slr(dst, src2);
4091   } else if (!dst.is(src2)) {
4092     // dst == src1
4093     DCHECK(dst.is(src1));
4094     slr(dst, src2);
4095   } else {
4096     // dst == src2
4097     DCHECK(dst.is(src2));
4098     lr(r0, dst);
4099     SubLogical32(dst, src1, r0);
4100   }
4101 }
4102 
4103 // Subtract 32-bit (Register dst = Register dst - Immediate opnd)
Sub32(Register dst,const Operand & imm)4104 void MacroAssembler::Sub32(Register dst, const Operand& imm) {
4105   Add32(dst, Operand(-(imm.imm_)));
4106 }
4107 
4108 // Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
SubP(Register dst,const Operand & imm)4109 void MacroAssembler::SubP(Register dst, const Operand& imm) {
4110   AddP(dst, Operand(-(imm.imm_)));
4111 }
4112 
4113 // Subtract 32-bit (Register dst = Register src - Immediate opnd)
Sub32(Register dst,Register src,const Operand & imm)4114 void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) {
4115   Add32(dst, src, Operand(-(imm.imm_)));
4116 }
4117 
4118 // Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
SubP(Register dst,Register src,const Operand & imm)4119 void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) {
4120   AddP(dst, src, Operand(-(imm.imm_)));
4121 }
4122 
4123 // Subtract 32-bit (Register dst = Register dst - Register src)
Sub32(Register dst,Register src)4124 void MacroAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
4125 
4126 // Subtract Pointer Size (Register dst = Register dst - Register src)
SubP(Register dst,Register src)4127 void MacroAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
4128 
4129 // Subtract Pointer Size with src extension
4130 //     (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
4131 // src is treated as a 32-bit signed integer, which is sign extended to
4132 // 64-bit if necessary.
SubP_ExtendSrc(Register dst,Register src)4133 void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
4134 #if V8_TARGET_ARCH_S390X
4135   sgfr(dst, src);
4136 #else
4137   sr(dst, src);
4138 #endif
4139 }
4140 
4141 // Subtract 32-bit (Register = Register - Register)
Sub32(Register dst,Register src1,Register src2)4142 void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
4143   // Use non-clobbering version if possible
4144   if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
4145     srk(dst, src1, src2);
4146     return;
4147   }
4148   if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1);
4149   // In scenario where we have dst = src - dst, we need to swap and negate
4150   if (!dst.is(src1) && dst.is(src2)) {
4151     sr(dst, src1);  // dst = (dst - src)
4152     lcr(dst, dst);  // dst = -dst
4153   } else {
4154     sr(dst, src2);
4155   }
4156 }
4157 
4158 // Subtract Pointer Sized (Register = Register - Register)
SubP(Register dst,Register src1,Register src2)4159 void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
4160   // Use non-clobbering version if possible
4161   if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
4162     SubP_RRR(dst, src1, src2);
4163     return;
4164   }
4165   if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
4166   // In scenario where we have dst = src - dst, we need to swap and negate
4167   if (!dst.is(src1) && dst.is(src2)) {
4168     SubP(dst, src1);             // dst = (dst - src)
4169     LoadComplementRR(dst, dst);  // dst = -dst
4170   } else {
4171     SubP(dst, src2);
4172   }
4173 }
4174 
4175 // Subtract Pointer Size with src extension
4176 //     (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
4177 // src is treated as a 32-bit signed integer, which is sign extended to
4178 // 64-bit if necessary.
SubP_ExtendSrc(Register dst,Register src1,Register src2)4179 void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
4180                                     Register src2) {
4181 #if V8_TARGET_ARCH_S390X
4182   if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
4183 
4184   // In scenario where we have dst = src - dst, we need to swap and negate
4185   if (!dst.is(src1) && dst.is(src2)) {
4186     lgfr(dst, dst);              // Sign extend this operand first.
4187     SubP(dst, src1);             // dst = (dst - src)
4188     LoadComplementRR(dst, dst);  // dst = -dst
4189   } else {
4190     sgfr(dst, src2);
4191   }
4192 #else
4193   SubP(dst, src1, src2);
4194 #endif
4195 }
4196 
4197 // Subtract 32-bit (Register-Memory)
Sub32(Register dst,const MemOperand & opnd)4198 void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) {
4199   DCHECK(is_int20(opnd.offset()));
4200   if (is_uint12(opnd.offset()))
4201     s(dst, opnd);
4202   else
4203     sy(dst, opnd);
4204 }
4205 
4206 // Subtract Pointer Sized (Register - Memory)
SubP(Register dst,const MemOperand & opnd)4207 void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
4208 #if V8_TARGET_ARCH_S390X
4209   sg(dst, opnd);
4210 #else
4211   Sub32(dst, opnd);
4212 #endif
4213 }
4214 
MovIntToFloat(DoubleRegister dst,Register src)4215 void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
4216   sllg(src, src, Operand(32));
4217   ldgr(dst, src);
4218 }
4219 
MovFloatToInt(Register dst,DoubleRegister src)4220 void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
4221   lgdr(dst, src);
4222   srlg(dst, dst, Operand(32));
4223 }
4224 
SubP_ExtendSrc(Register dst,const MemOperand & opnd)4225 void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
4226 #if V8_TARGET_ARCH_S390X
4227   DCHECK(is_int20(opnd.offset()));
4228   sgf(dst, opnd);
4229 #else
4230   Sub32(dst, opnd);
4231 #endif
4232 }
4233 
4234 //----------------------------------------------------------------------------
4235 //  Subtract Logical Instructions
4236 //----------------------------------------------------------------------------
4237 
4238 // Subtract Logical 32-bit (Register - Memory)
SubLogical(Register dst,const MemOperand & opnd)4239 void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) {
4240   DCHECK(is_int20(opnd.offset()));
4241   if (is_uint12(opnd.offset()))
4242     sl(dst, opnd);
4243   else
4244     sly(dst, opnd);
4245 }
4246 
4247 // Subtract Logical Pointer Sized (Register - Memory)
SubLogicalP(Register dst,const MemOperand & opnd)4248 void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
4249   DCHECK(is_int20(opnd.offset()));
4250 #if V8_TARGET_ARCH_S390X
4251   slgf(dst, opnd);
4252 #else
4253   SubLogical(dst, opnd);
4254 #endif
4255 }
4256 
4257 // Subtract Logical Pointer Size with src extension
4258 //      (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64))
4259 // src is treated as a 32-bit signed integer, which is sign extended to
4260 // 64-bit if necessary.
SubLogicalP_ExtendSrc(Register dst,const MemOperand & opnd)4261 void MacroAssembler::SubLogicalP_ExtendSrc(Register dst,
4262                                            const MemOperand& opnd) {
4263 #if V8_TARGET_ARCH_S390X
4264   DCHECK(is_int20(opnd.offset()));
4265   slgf(dst, opnd);
4266 #else
4267   SubLogical(dst, opnd);
4268 #endif
4269 }
4270 
4271 //----------------------------------------------------------------------------
4272 //  Bitwise Operations
4273 //----------------------------------------------------------------------------
4274 
4275 // AND 32-bit - dst = dst & src
And(Register dst,Register src)4276 void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
4277 
4278 // AND Pointer Size - dst = dst & src
AndP(Register dst,Register src)4279 void MacroAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
4280 
4281 // Non-clobbering AND 32-bit - dst = src1 & src1
And(Register dst,Register src1,Register src2)4282 void MacroAssembler::And(Register dst, Register src1, Register src2) {
4283   if (!dst.is(src1) && !dst.is(src2)) {
4284     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4285     // as XR is a smaller instruction
4286     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4287       nrk(dst, src1, src2);
4288       return;
4289     } else {
4290       lr(dst, src1);
4291     }
4292   } else if (dst.is(src2)) {
4293     src2 = src1;
4294   }
4295   And(dst, src2);
4296 }
4297 
4298 // Non-clobbering AND pointer size - dst = src1 & src1
AndP(Register dst,Register src1,Register src2)4299 void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
4300   if (!dst.is(src1) && !dst.is(src2)) {
4301     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4302     // as XR is a smaller instruction
4303     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4304       AndP_RRR(dst, src1, src2);
4305       return;
4306     } else {
4307       LoadRR(dst, src1);
4308     }
4309   } else if (dst.is(src2)) {
4310     src2 = src1;
4311   }
4312   AndP(dst, src2);
4313 }
4314 
4315 // AND 32-bit (Reg - Mem)
And(Register dst,const MemOperand & opnd)4316 void MacroAssembler::And(Register dst, const MemOperand& opnd) {
4317   DCHECK(is_int20(opnd.offset()));
4318   if (is_uint12(opnd.offset()))
4319     n(dst, opnd);
4320   else
4321     ny(dst, opnd);
4322 }
4323 
4324 // AND Pointer Size (Reg - Mem)
AndP(Register dst,const MemOperand & opnd)4325 void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
4326   DCHECK(is_int20(opnd.offset()));
4327 #if V8_TARGET_ARCH_S390X
4328   ng(dst, opnd);
4329 #else
4330   And(dst, opnd);
4331 #endif
4332 }
4333 
4334 // AND 32-bit - dst = dst & imm
And(Register dst,const Operand & opnd)4335 void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
4336 
4337 // AND Pointer Size - dst = dst & imm
AndP(Register dst,const Operand & opnd)4338 void MacroAssembler::AndP(Register dst, const Operand& opnd) {
4339 #if V8_TARGET_ARCH_S390X
4340   intptr_t value = opnd.imm_;
4341   if (value >> 32 != -1) {
4342     // this may not work b/c condition code won't be set correctly
4343     nihf(dst, Operand(value >> 32));
4344   }
4345   nilf(dst, Operand(value & 0xFFFFFFFF));
4346 #else
4347   And(dst, opnd);
4348 #endif
4349 }
4350 
4351 // AND 32-bit - dst = src & imm
And(Register dst,Register src,const Operand & opnd)4352 void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
4353   if (!dst.is(src)) lr(dst, src);
4354   nilf(dst, opnd);
4355 }
4356 
4357 // AND Pointer Size - dst = src & imm
AndP(Register dst,Register src,const Operand & opnd)4358 void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
4359   // Try to exploit RISBG first
4360   intptr_t value = opnd.imm_;
4361   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
4362     intptr_t shifted_value = value;
4363     int trailing_zeros = 0;
4364 
4365     // We start checking how many trailing zeros are left at the end.
4366     while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
4367       trailing_zeros++;
4368       shifted_value >>= 1;
4369     }
4370 
4371     // If temp (value with right-most set of zeros shifted out) is 1 less
4372     // than power of 2, we have consecutive bits of 1.
4373     // Special case: If shift_value is zero, we cannot use RISBG, as it requires
4374     //               selection of at least 1 bit.
4375     if ((0 != shifted_value) && base::bits::IsPowerOfTwo64(shifted_value + 1)) {
4376       int startBit =
4377           base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
4378       int endBit = 63 - trailing_zeros;
4379       // Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
4380       risbg(dst, src, Operand(startBit), Operand(endBit), Operand::Zero(),
4381             true);
4382       return;
4383     } else if (-1 == shifted_value) {
4384       // A Special case in which all top bits up to MSB are 1's.  In this case,
4385       // we can set startBit to be 0.
4386       int endBit = 63 - trailing_zeros;
4387       risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
4388       return;
4389     }
4390   }
4391 
4392   // If we are &'ing zero, we can just whack the dst register and skip copy
4393   if (!dst.is(src) && (0 != value)) LoadRR(dst, src);
4394   AndP(dst, opnd);
4395 }
4396 
4397 // OR 32-bit - dst = dst & src
Or(Register dst,Register src)4398 void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
4399 
4400 // OR Pointer Size - dst = dst & src
OrP(Register dst,Register src)4401 void MacroAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
4402 
4403 // Non-clobbering OR 32-bit - dst = src1 & src1
Or(Register dst,Register src1,Register src2)4404 void MacroAssembler::Or(Register dst, Register src1, Register src2) {
4405   if (!dst.is(src1) && !dst.is(src2)) {
4406     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4407     // as XR is a smaller instruction
4408     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4409       ork(dst, src1, src2);
4410       return;
4411     } else {
4412       lr(dst, src1);
4413     }
4414   } else if (dst.is(src2)) {
4415     src2 = src1;
4416   }
4417   Or(dst, src2);
4418 }
4419 
4420 // Non-clobbering OR pointer size - dst = src1 & src1
OrP(Register dst,Register src1,Register src2)4421 void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
4422   if (!dst.is(src1) && !dst.is(src2)) {
4423     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4424     // as XR is a smaller instruction
4425     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4426       OrP_RRR(dst, src1, src2);
4427       return;
4428     } else {
4429       LoadRR(dst, src1);
4430     }
4431   } else if (dst.is(src2)) {
4432     src2 = src1;
4433   }
4434   OrP(dst, src2);
4435 }
4436 
4437 // OR 32-bit (Reg - Mem)
Or(Register dst,const MemOperand & opnd)4438 void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
4439   DCHECK(is_int20(opnd.offset()));
4440   if (is_uint12(opnd.offset()))
4441     o(dst, opnd);
4442   else
4443     oy(dst, opnd);
4444 }
4445 
4446 // OR Pointer Size (Reg - Mem)
OrP(Register dst,const MemOperand & opnd)4447 void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
4448   DCHECK(is_int20(opnd.offset()));
4449 #if V8_TARGET_ARCH_S390X
4450   og(dst, opnd);
4451 #else
4452   Or(dst, opnd);
4453 #endif
4454 }
4455 
4456 // OR 32-bit - dst = dst & imm
Or(Register dst,const Operand & opnd)4457 void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
4458 
4459 // OR Pointer Size - dst = dst & imm
OrP(Register dst,const Operand & opnd)4460 void MacroAssembler::OrP(Register dst, const Operand& opnd) {
4461 #if V8_TARGET_ARCH_S390X
4462   intptr_t value = opnd.imm_;
4463   if (value >> 32 != 0) {
4464     // this may not work b/c condition code won't be set correctly
4465     oihf(dst, Operand(value >> 32));
4466   }
4467   oilf(dst, Operand(value & 0xFFFFFFFF));
4468 #else
4469   Or(dst, opnd);
4470 #endif
4471 }
4472 
4473 // OR 32-bit - dst = src & imm
Or(Register dst,Register src,const Operand & opnd)4474 void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
4475   if (!dst.is(src)) lr(dst, src);
4476   oilf(dst, opnd);
4477 }
4478 
4479 // OR Pointer Size - dst = src & imm
OrP(Register dst,Register src,const Operand & opnd)4480 void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
4481   if (!dst.is(src)) LoadRR(dst, src);
4482   OrP(dst, opnd);
4483 }
4484 
4485 // XOR 32-bit - dst = dst & src
Xor(Register dst,Register src)4486 void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
4487 
4488 // XOR Pointer Size - dst = dst & src
XorP(Register dst,Register src)4489 void MacroAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
4490 
4491 // Non-clobbering XOR 32-bit - dst = src1 & src1
Xor(Register dst,Register src1,Register src2)4492 void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
4493   if (!dst.is(src1) && !dst.is(src2)) {
4494     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4495     // as XR is a smaller instruction
4496     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4497       xrk(dst, src1, src2);
4498       return;
4499     } else {
4500       lr(dst, src1);
4501     }
4502   } else if (dst.is(src2)) {
4503     src2 = src1;
4504   }
4505   Xor(dst, src2);
4506 }
4507 
4508 // Non-clobbering XOR pointer size - dst = src1 & src1
XorP(Register dst,Register src1,Register src2)4509 void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
4510   if (!dst.is(src1) && !dst.is(src2)) {
4511     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4512     // as XR is a smaller instruction
4513     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4514       XorP_RRR(dst, src1, src2);
4515       return;
4516     } else {
4517       LoadRR(dst, src1);
4518     }
4519   } else if (dst.is(src2)) {
4520     src2 = src1;
4521   }
4522   XorP(dst, src2);
4523 }
4524 
4525 // XOR 32-bit (Reg - Mem)
Xor(Register dst,const MemOperand & opnd)4526 void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
4527   DCHECK(is_int20(opnd.offset()));
4528   if (is_uint12(opnd.offset()))
4529     x(dst, opnd);
4530   else
4531     xy(dst, opnd);
4532 }
4533 
4534 // XOR Pointer Size (Reg - Mem)
XorP(Register dst,const MemOperand & opnd)4535 void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
4536   DCHECK(is_int20(opnd.offset()));
4537 #if V8_TARGET_ARCH_S390X
4538   xg(dst, opnd);
4539 #else
4540   Xor(dst, opnd);
4541 #endif
4542 }
4543 
4544 // XOR 32-bit - dst = dst & imm
Xor(Register dst,const Operand & opnd)4545 void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
4546 
4547 // XOR Pointer Size - dst = dst & imm
XorP(Register dst,const Operand & opnd)4548 void MacroAssembler::XorP(Register dst, const Operand& opnd) {
4549 #if V8_TARGET_ARCH_S390X
4550   intptr_t value = opnd.imm_;
4551   xihf(dst, Operand(value >> 32));
4552   xilf(dst, Operand(value & 0xFFFFFFFF));
4553 #else
4554   Xor(dst, opnd);
4555 #endif
4556 }
4557 
4558 // XOR 32-bit - dst = src & imm
Xor(Register dst,Register src,const Operand & opnd)4559 void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
4560   if (!dst.is(src)) lr(dst, src);
4561   xilf(dst, opnd);
4562 }
4563 
4564 // XOR Pointer Size - dst = src & imm
XorP(Register dst,Register src,const Operand & opnd)4565 void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
4566   if (!dst.is(src)) LoadRR(dst, src);
4567   XorP(dst, opnd);
4568 }
4569 
NotP(Register dst)4570 void MacroAssembler::NotP(Register dst) {
4571 #if V8_TARGET_ARCH_S390X
4572   xihf(dst, Operand(0xFFFFFFFF));
4573   xilf(dst, Operand(0xFFFFFFFF));
4574 #else
4575   XorP(dst, Operand(0xFFFFFFFF));
4576 #endif
4577 }
4578 
4579 // works the same as mov
Load(Register dst,const Operand & opnd)4580 void MacroAssembler::Load(Register dst, const Operand& opnd) {
4581   intptr_t value = opnd.immediate();
4582   if (is_int16(value)) {
4583 #if V8_TARGET_ARCH_S390X
4584     lghi(dst, opnd);
4585 #else
4586     lhi(dst, opnd);
4587 #endif
4588   } else {
4589 #if V8_TARGET_ARCH_S390X
4590     llilf(dst, opnd);
4591 #else
4592     iilf(dst, opnd);
4593 #endif
4594   }
4595 }
4596 
Load(Register dst,const MemOperand & opnd)4597 void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
4598   DCHECK(is_int20(opnd.offset()));
4599 #if V8_TARGET_ARCH_S390X
4600   lgf(dst, opnd);  // 64<-32
4601 #else
4602   if (is_uint12(opnd.offset())) {
4603     l(dst, opnd);
4604   } else {
4605     ly(dst, opnd);
4606   }
4607 #endif
4608 }
4609 
4610 //-----------------------------------------------------------------------------
4611 //  Compare Helpers
4612 //-----------------------------------------------------------------------------
4613 
4614 // Compare 32-bit Register vs Register
Cmp32(Register src1,Register src2)4615 void MacroAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
4616 
4617 // Compare Pointer Sized Register vs Register
CmpP(Register src1,Register src2)4618 void MacroAssembler::CmpP(Register src1, Register src2) {
4619 #if V8_TARGET_ARCH_S390X
4620   cgr(src1, src2);
4621 #else
4622   Cmp32(src1, src2);
4623 #endif
4624 }
4625 
4626 // Compare 32-bit Register vs Immediate
4627 // This helper will set up proper relocation entries if required.
Cmp32(Register dst,const Operand & opnd)4628 void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
4629   if (opnd.rmode_ == kRelocInfo_NONEPTR) {
4630     intptr_t value = opnd.immediate();
4631     if (is_int16(value))
4632       chi(dst, opnd);
4633     else
4634       cfi(dst, opnd);
4635   } else {
4636     // Need to generate relocation record here
4637     RecordRelocInfo(opnd.rmode_, opnd.imm_);
4638     cfi(dst, opnd);
4639   }
4640 }
4641 
4642 // Compare Pointer Sized  Register vs Immediate
4643 // This helper will set up proper relocation entries if required.
CmpP(Register dst,const Operand & opnd)4644 void MacroAssembler::CmpP(Register dst, const Operand& opnd) {
4645 #if V8_TARGET_ARCH_S390X
4646   if (opnd.rmode_ == kRelocInfo_NONEPTR) {
4647     cgfi(dst, opnd);
4648   } else {
4649     mov(r0, opnd);  // Need to generate 64-bit relocation
4650     cgr(dst, r0);
4651   }
4652 #else
4653   Cmp32(dst, opnd);
4654 #endif
4655 }
4656 
4657 // Compare 32-bit Register vs Memory
Cmp32(Register dst,const MemOperand & opnd)4658 void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) {
4659   // make sure offset is within 20 bit range
4660   DCHECK(is_int20(opnd.offset()));
4661   if (is_uint12(opnd.offset()))
4662     c(dst, opnd);
4663   else
4664     cy(dst, opnd);
4665 }
4666 
4667 // Compare Pointer Size Register vs Memory
CmpP(Register dst,const MemOperand & opnd)4668 void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) {
4669   // make sure offset is within 20 bit range
4670   DCHECK(is_int20(opnd.offset()));
4671 #if V8_TARGET_ARCH_S390X
4672   cg(dst, opnd);
4673 #else
4674   Cmp32(dst, opnd);
4675 #endif
4676 }
4677 
4678 //-----------------------------------------------------------------------------
4679 // Compare Logical Helpers
4680 //-----------------------------------------------------------------------------
4681 
4682 // Compare Logical 32-bit Register vs Register
CmpLogical32(Register dst,Register src)4683 void MacroAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
4684 
4685 // Compare Logical Pointer Sized Register vs Register
CmpLogicalP(Register dst,Register src)4686 void MacroAssembler::CmpLogicalP(Register dst, Register src) {
4687 #ifdef V8_TARGET_ARCH_S390X
4688   clgr(dst, src);
4689 #else
4690   CmpLogical32(dst, src);
4691 #endif
4692 }
4693 
4694 // Compare Logical 32-bit Register vs Immediate
CmpLogical32(Register dst,const Operand & opnd)4695 void MacroAssembler::CmpLogical32(Register dst, const Operand& opnd) {
4696   clfi(dst, opnd);
4697 }
4698 
4699 // Compare Logical Pointer Sized Register vs Immediate
CmpLogicalP(Register dst,const Operand & opnd)4700 void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
4701 #if V8_TARGET_ARCH_S390X
4702   DCHECK(static_cast<uint32_t>(opnd.immediate() >> 32) == 0);
4703   clgfi(dst, opnd);
4704 #else
4705   CmpLogical32(dst, opnd);
4706 #endif
4707 }
4708 
4709 // Compare Logical 32-bit Register vs Memory
CmpLogical32(Register dst,const MemOperand & opnd)4710 void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
4711   // make sure offset is within 20 bit range
4712   DCHECK(is_int20(opnd.offset()));
4713   if (is_uint12(opnd.offset()))
4714     cl(dst, opnd);
4715   else
4716     cly(dst, opnd);
4717 }
4718 
4719 // Compare Logical Pointer Sized Register vs Memory
CmpLogicalP(Register dst,const MemOperand & opnd)4720 void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
4721   // make sure offset is within 20 bit range
4722   DCHECK(is_int20(opnd.offset()));
4723 #if V8_TARGET_ARCH_S390X
4724   clg(dst, opnd);
4725 #else
4726   CmpLogical32(dst, opnd);
4727 #endif
4728 }
4729 
4730 // Compare Logical Byte (Mem - Imm)
CmpLogicalByte(const MemOperand & mem,const Operand & imm)4731 void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
4732   DCHECK(is_uint8(imm.immediate()));
4733   if (is_uint12(mem.offset()))
4734     cli(mem, imm);
4735   else
4736     cliy(mem, imm);
4737 }
4738 
Branch(Condition c,const Operand & opnd)4739 void MacroAssembler::Branch(Condition c, const Operand& opnd) {
4740   intptr_t value = opnd.immediate();
4741   if (is_int16(value))
4742     brc(c, opnd);
4743   else
4744     brcl(c, opnd);
4745 }
4746 
4747 // Branch On Count.  Decrement R1, and branch if R1 != 0.
BranchOnCount(Register r1,Label * l)4748 void MacroAssembler::BranchOnCount(Register r1, Label* l) {
4749   int32_t offset = branch_offset(l);
4750   if (is_int16(offset)) {
4751 #if V8_TARGET_ARCH_S390X
4752     brctg(r1, Operand(offset));
4753 #else
4754     brct(r1, Operand(offset));
4755 #endif
4756   } else {
4757     AddP(r1, Operand(-1));
4758     Branch(ne, Operand(offset));
4759   }
4760 }
4761 
LoadIntLiteral(Register dst,int value)4762 void MacroAssembler::LoadIntLiteral(Register dst, int value) {
4763   Load(dst, Operand(value));
4764 }
4765 
LoadSmiLiteral(Register dst,Smi * smi)4766 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
4767   intptr_t value = reinterpret_cast<intptr_t>(smi);
4768 #if V8_TARGET_ARCH_S390X
4769   DCHECK((value & 0xffffffff) == 0);
4770   // The smi value is loaded in upper 32-bits.  Lower 32-bit are zeros.
4771   llihf(dst, Operand(value >> 32));
4772 #else
4773   llilf(dst, Operand(value));
4774 #endif
4775 }
4776 
LoadDoubleLiteral(DoubleRegister result,uint64_t value,Register scratch)4777 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
4778                                        Register scratch) {
4779   uint32_t hi_32 = value >> 32;
4780   uint32_t lo_32 = static_cast<uint32_t>(value);
4781 
4782   // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
4783   iihf(scratch, Operand(hi_32));
4784   iilf(scratch, Operand(lo_32));
4785   ldgr(result, scratch);
4786 }
4787 
LoadDoubleLiteral(DoubleRegister result,double value,Register scratch)4788 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
4789                                        Register scratch) {
4790   uint64_t int_val = bit_cast<uint64_t, double>(value);
4791   LoadDoubleLiteral(result, int_val, scratch);
4792 }
4793 
LoadFloat32Literal(DoubleRegister result,float value,Register scratch)4794 void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
4795                                         Register scratch) {
4796   uint32_t hi_32 = bit_cast<uint32_t>(value);
4797   uint32_t lo_32 = 0;
4798 
4799   // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
4800   iihf(scratch, Operand(hi_32));
4801   iilf(scratch, Operand(lo_32));
4802   ldgr(result, scratch);
4803 }
4804 
CmpSmiLiteral(Register src1,Smi * smi,Register scratch)4805 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
4806 #if V8_TARGET_ARCH_S390X
4807   LoadSmiLiteral(scratch, smi);
4808   cgr(src1, scratch);
4809 #else
4810   // CFI takes 32-bit immediate.
4811   cfi(src1, Operand(smi));
4812 #endif
4813 }
4814 
CmpLogicalSmiLiteral(Register src1,Smi * smi,Register scratch)4815 void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
4816                                           Register scratch) {
4817 #if V8_TARGET_ARCH_S390X
4818   LoadSmiLiteral(scratch, smi);
4819   clgr(src1, scratch);
4820 #else
4821   // CLFI takes 32-bit immediate
4822   clfi(src1, Operand(smi));
4823 #endif
4824 }
4825 
AddSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)4826 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
4827                                    Register scratch) {
4828 #if V8_TARGET_ARCH_S390X
4829   LoadSmiLiteral(scratch, smi);
4830   AddP(dst, src, scratch);
4831 #else
4832   AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
4833 #endif
4834 }
4835 
SubSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)4836 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
4837                                    Register scratch) {
4838 #if V8_TARGET_ARCH_S390X
4839   LoadSmiLiteral(scratch, smi);
4840   SubP(dst, src, scratch);
4841 #else
4842   AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
4843 #endif
4844 }
4845 
AndSmiLiteral(Register dst,Register src,Smi * smi)4846 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
4847   if (!dst.is(src)) LoadRR(dst, src);
4848 #if V8_TARGET_ARCH_S390X
4849   DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0);
4850   int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
4851   nihf(dst, Operand(value));
4852 #else
4853   nilf(dst, Operand(reinterpret_cast<int>(smi)));
4854 #endif
4855 }
4856 
4857 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)4858 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
4859                            Register scratch) {
4860   int offset = mem.offset();
4861 
4862   if (!scratch.is(no_reg) && !is_int20(offset)) {
4863     /* cannot use d-form */
4864     LoadIntLiteral(scratch, offset);
4865 #if V8_TARGET_ARCH_S390X
4866     lg(dst, MemOperand(mem.rb(), scratch));
4867 #else
4868     l(dst, MemOperand(mem.rb(), scratch));
4869 #endif
4870   } else {
4871 #if V8_TARGET_ARCH_S390X
4872     lg(dst, mem);
4873 #else
4874     if (is_uint12(offset)) {
4875       l(dst, mem);
4876     } else {
4877       ly(dst, mem);
4878     }
4879 #endif
4880   }
4881 }
4882 
4883 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)4884 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
4885                             Register scratch) {
4886   if (!is_int20(mem.offset())) {
4887     DCHECK(!scratch.is(no_reg));
4888     DCHECK(!scratch.is(r0));
4889     LoadIntLiteral(scratch, mem.offset());
4890 #if V8_TARGET_ARCH_S390X
4891     stg(src, MemOperand(mem.rb(), scratch));
4892 #else
4893     st(src, MemOperand(mem.rb(), scratch));
4894 #endif
4895   } else {
4896 #if V8_TARGET_ARCH_S390X
4897     stg(src, mem);
4898 #else
4899     // StoreW will try to generate ST if offset fits, otherwise
4900     // it'll generate STY.
4901     StoreW(src, mem);
4902 #endif
4903   }
4904 }
4905 
4906 // Store a "pointer" sized constant to the memory location
StoreP(const MemOperand & mem,const Operand & opnd,Register scratch)4907 void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
4908                             Register scratch) {
4909   // Relocations not supported
4910   DCHECK(opnd.rmode_ == kRelocInfo_NONEPTR);
4911 
4912   // Try to use MVGHI/MVHI
4913   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
4914       mem.getIndexRegister().is(r0) && is_int16(opnd.imm_)) {
4915 #if V8_TARGET_ARCH_S390X
4916     mvghi(mem, opnd);
4917 #else
4918     mvhi(mem, opnd);
4919 #endif
4920   } else {
4921     LoadImmP(scratch, opnd);
4922     StoreP(scratch, mem);
4923   }
4924 }
4925 
LoadMultipleP(Register dst1,Register dst2,const MemOperand & mem)4926 void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
4927                                    const MemOperand& mem) {
4928 #if V8_TARGET_ARCH_S390X
4929   DCHECK(is_int20(mem.offset()));
4930   lmg(dst1, dst2, mem);
4931 #else
4932   if (is_uint12(mem.offset())) {
4933     lm(dst1, dst2, mem);
4934   } else {
4935     DCHECK(is_int20(mem.offset()));
4936     lmy(dst1, dst2, mem);
4937   }
4938 #endif
4939 }
4940 
StoreMultipleP(Register src1,Register src2,const MemOperand & mem)4941 void MacroAssembler::StoreMultipleP(Register src1, Register src2,
4942                                     const MemOperand& mem) {
4943 #if V8_TARGET_ARCH_S390X
4944   DCHECK(is_int20(mem.offset()));
4945   stmg(src1, src2, mem);
4946 #else
4947   if (is_uint12(mem.offset())) {
4948     stm(src1, src2, mem);
4949   } else {
4950     DCHECK(is_int20(mem.offset()));
4951     stmy(src1, src2, mem);
4952   }
4953 #endif
4954 }
4955 
LoadMultipleW(Register dst1,Register dst2,const MemOperand & mem)4956 void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
4957                                    const MemOperand& mem) {
4958   if (is_uint12(mem.offset())) {
4959     lm(dst1, dst2, mem);
4960   } else {
4961     DCHECK(is_int20(mem.offset()));
4962     lmy(dst1, dst2, mem);
4963   }
4964 }
4965 
StoreMultipleW(Register src1,Register src2,const MemOperand & mem)4966 void MacroAssembler::StoreMultipleW(Register src1, Register src2,
4967                                     const MemOperand& mem) {
4968   if (is_uint12(mem.offset())) {
4969     stm(src1, src2, mem);
4970   } else {
4971     DCHECK(is_int20(mem.offset()));
4972     stmy(src1, src2, mem);
4973   }
4974 }
4975 
4976 // Load 32-bits and sign extend if necessary.
LoadW(Register dst,Register src)4977 void MacroAssembler::LoadW(Register dst, Register src) {
4978 #if V8_TARGET_ARCH_S390X
4979   lgfr(dst, src);
4980 #else
4981   if (!dst.is(src)) lr(dst, src);
4982 #endif
4983 }
4984 
4985 // Load 32-bits and sign extend if necessary.
LoadW(Register dst,const MemOperand & mem,Register scratch)4986 void MacroAssembler::LoadW(Register dst, const MemOperand& mem,
4987                            Register scratch) {
4988   int offset = mem.offset();
4989 
4990   if (!is_int20(offset)) {
4991     DCHECK(!scratch.is(no_reg));
4992     LoadIntLiteral(scratch, offset);
4993 #if V8_TARGET_ARCH_S390X
4994     lgf(dst, MemOperand(mem.rb(), scratch));
4995 #else
4996     l(dst, MemOperand(mem.rb(), scratch));
4997 #endif
4998   } else {
4999 #if V8_TARGET_ARCH_S390X
5000     lgf(dst, mem);
5001 #else
5002     if (is_uint12(offset)) {
5003       l(dst, mem);
5004     } else {
5005       ly(dst, mem);
5006     }
5007 #endif
5008   }
5009 }
5010 
5011 // Load 32-bits and zero extend if necessary.
LoadlW(Register dst,Register src)5012 void MacroAssembler::LoadlW(Register dst, Register src) {
5013 #if V8_TARGET_ARCH_S390X
5014   llgfr(dst, src);
5015 #else
5016   if (!dst.is(src)) lr(dst, src);
5017 #endif
5018 }
5019 
5020 // Variable length depending on whether offset fits into immediate field
5021 // MemOperand of RX or RXY format
LoadlW(Register dst,const MemOperand & mem,Register scratch)5022 void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
5023                             Register scratch) {
5024   Register base = mem.rb();
5025   int offset = mem.offset();
5026 
5027 #if V8_TARGET_ARCH_S390X
5028   if (is_int20(offset)) {
5029     llgf(dst, mem);
5030   } else if (!scratch.is(no_reg)) {
5031     // Materialize offset into scratch register.
5032     LoadIntLiteral(scratch, offset);
5033     llgf(dst, MemOperand(base, scratch));
5034   } else {
5035     DCHECK(false);
5036   }
5037 #else
5038   bool use_RXform = false;
5039   bool use_RXYform = false;
5040   if (is_uint12(offset)) {
5041     // RX-format supports unsigned 12-bits offset.
5042     use_RXform = true;
5043   } else if (is_int20(offset)) {
5044     // RXY-format supports signed 20-bits offset.
5045     use_RXYform = true;
5046   } else if (!scratch.is(no_reg)) {
5047     // Materialize offset into scratch register.
5048     LoadIntLiteral(scratch, offset);
5049   } else {
5050     DCHECK(false);
5051   }
5052 
5053   if (use_RXform) {
5054     l(dst, mem);
5055   } else if (use_RXYform) {
5056     ly(dst, mem);
5057   } else {
5058     ly(dst, MemOperand(base, scratch));
5059   }
5060 #endif
5061 }
5062 
LoadB(Register dst,const MemOperand & mem)5063 void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
5064 #if V8_TARGET_ARCH_S390X
5065   lgb(dst, mem);
5066 #else
5067   lb(dst, mem);
5068 #endif
5069 }
5070 
LoadB(Register dst,Register src)5071 void MacroAssembler::LoadB(Register dst, Register src) {
5072 #if V8_TARGET_ARCH_S390X
5073   lgbr(dst, src);
5074 #else
5075   lbr(dst, src);
5076 #endif
5077 }
5078 
LoadlB(Register dst,const MemOperand & mem)5079 void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
5080 #if V8_TARGET_ARCH_S390X
5081   llgc(dst, mem);
5082 #else
5083   llc(dst, mem);
5084 #endif
5085 }
5086 
5087 // Load And Test (Reg <- Reg)
LoadAndTest32(Register dst,Register src)5088 void MacroAssembler::LoadAndTest32(Register dst, Register src) {
5089   ltr(dst, src);
5090 }
5091 
5092 // Load And Test
5093 //     (Register dst(ptr) = Register src (32 | 32->64))
5094 // src is treated as a 32-bit signed integer, which is sign extended to
5095 // 64-bit if necessary.
LoadAndTestP_ExtendSrc(Register dst,Register src)5096 void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
5097 #if V8_TARGET_ARCH_S390X
5098   ltgfr(dst, src);
5099 #else
5100   ltr(dst, src);
5101 #endif
5102 }
5103 
5104 // Load And Test Pointer Sized (Reg <- Reg)
LoadAndTestP(Register dst,Register src)5105 void MacroAssembler::LoadAndTestP(Register dst, Register src) {
5106 #if V8_TARGET_ARCH_S390X
5107   ltgr(dst, src);
5108 #else
5109   ltr(dst, src);
5110 #endif
5111 }
5112 
5113 // Load And Test 32-bit (Reg <- Mem)
LoadAndTest32(Register dst,const MemOperand & mem)5114 void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
5115   lt_z(dst, mem);
5116 }
5117 
5118 // Load And Test Pointer Sized (Reg <- Mem)
LoadAndTestP(Register dst,const MemOperand & mem)5119 void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
5120 #if V8_TARGET_ARCH_S390X
5121   ltg(dst, mem);
5122 #else
5123   lt_z(dst, mem);
5124 #endif
5125 }
5126 
5127 // Load Double Precision (64-bit) Floating Point number from memory
LoadDouble(DoubleRegister dst,const MemOperand & mem)5128 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
5129   // for 32bit and 64bit we all use 64bit floating point regs
5130   if (is_uint12(mem.offset())) {
5131     ld(dst, mem);
5132   } else {
5133     ldy(dst, mem);
5134   }
5135 }
5136 
5137 // Load Single Precision (32-bit) Floating Point number from memory
LoadFloat32(DoubleRegister dst,const MemOperand & mem)5138 void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
5139   if (is_uint12(mem.offset())) {
5140     le_z(dst, mem);
5141   } else {
5142     DCHECK(is_int20(mem.offset()));
5143     ley(dst, mem);
5144   }
5145 }
5146 
5147 // Load Single Precision (32-bit) Floating Point number from memory,
5148 // and convert to Double Precision (64-bit)
LoadFloat32ConvertToDouble(DoubleRegister dst,const MemOperand & mem)5149 void MacroAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
5150                                                 const MemOperand& mem) {
5151   LoadFloat32(dst, mem);
5152   ldebr(dst, dst);
5153 }
5154 
5155 // Store Double Precision (64-bit) Floating Point number to memory
StoreDouble(DoubleRegister dst,const MemOperand & mem)5156 void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
5157   if (is_uint12(mem.offset())) {
5158     std(dst, mem);
5159   } else {
5160     stdy(dst, mem);
5161   }
5162 }
5163 
5164 // Store Single Precision (32-bit) Floating Point number to memory
StoreFloat32(DoubleRegister src,const MemOperand & mem)5165 void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
5166   if (is_uint12(mem.offset())) {
5167     ste(src, mem);
5168   } else {
5169     stey(src, mem);
5170   }
5171 }
5172 
5173 // Convert Double precision (64-bit) to Single Precision (32-bit)
5174 // and store resulting Float32 to memory
StoreDoubleAsFloat32(DoubleRegister src,const MemOperand & mem,DoubleRegister scratch)5175 void MacroAssembler::StoreDoubleAsFloat32(DoubleRegister src,
5176                                           const MemOperand& mem,
5177                                           DoubleRegister scratch) {
5178   ledbr(scratch, src);
5179   StoreFloat32(scratch, mem);
5180 }
5181 
5182 // Variable length depending on whether offset fits into immediate field
5183 // MemOperand of RX or RXY format
StoreW(Register src,const MemOperand & mem,Register scratch)5184 void MacroAssembler::StoreW(Register src, const MemOperand& mem,
5185                             Register scratch) {
5186   Register base = mem.rb();
5187   int offset = mem.offset();
5188 
5189   bool use_RXform = false;
5190   bool use_RXYform = false;
5191 
5192   if (is_uint12(offset)) {
5193     // RX-format supports unsigned 12-bits offset.
5194     use_RXform = true;
5195   } else if (is_int20(offset)) {
5196     // RXY-format supports signed 20-bits offset.
5197     use_RXYform = true;
5198   } else if (!scratch.is(no_reg)) {
5199     // Materialize offset into scratch register.
5200     LoadIntLiteral(scratch, offset);
5201   } else {
5202     // scratch is no_reg
5203     DCHECK(false);
5204   }
5205 
5206   if (use_RXform) {
5207     st(src, mem);
5208   } else if (use_RXYform) {
5209     sty(src, mem);
5210   } else {
5211     StoreW(src, MemOperand(base, scratch));
5212   }
5213 }
5214 
5215 // Loads 16-bits half-word value from memory and sign extends to pointer
5216 // sized register
LoadHalfWordP(Register dst,const MemOperand & mem,Register scratch)5217 void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
5218                                    Register scratch) {
5219   Register base = mem.rb();
5220   int offset = mem.offset();
5221 
5222   if (!is_int20(offset)) {
5223     DCHECK(!scratch.is(no_reg));
5224     LoadIntLiteral(scratch, offset);
5225 #if V8_TARGET_ARCH_S390X
5226     lgh(dst, MemOperand(base, scratch));
5227 #else
5228     lh(dst, MemOperand(base, scratch));
5229 #endif
5230   } else {
5231 #if V8_TARGET_ARCH_S390X
5232     lgh(dst, mem);
5233 #else
5234     if (is_uint12(offset)) {
5235       lh(dst, mem);
5236     } else {
5237       lhy(dst, mem);
5238     }
5239 #endif
5240   }
5241 }
5242 
5243 // Variable length depending on whether offset fits into immediate field
5244 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)5245 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
5246                                    Register scratch) {
5247   Register base = mem.rb();
5248   int offset = mem.offset();
5249 
5250   if (is_uint12(offset)) {
5251     sth(src, mem);
5252   } else if (is_int20(offset)) {
5253     sthy(src, mem);
5254   } else {
5255     DCHECK(!scratch.is(no_reg));
5256     LoadIntLiteral(scratch, offset);
5257     sth(src, MemOperand(base, scratch));
5258   }
5259 }
5260 
5261 // Variable length depending on whether offset fits into immediate field
5262 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)5263 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
5264                                Register scratch) {
5265   Register base = mem.rb();
5266   int offset = mem.offset();
5267 
5268   if (is_uint12(offset)) {
5269     stc(src, mem);
5270   } else if (is_int20(offset)) {
5271     stcy(src, mem);
5272   } else {
5273     DCHECK(!scratch.is(no_reg));
5274     LoadIntLiteral(scratch, offset);
5275     stc(src, MemOperand(base, scratch));
5276   }
5277 }
5278 
5279 // Shift left logical for 32-bit integer types.
ShiftLeft(Register dst,Register src,const Operand & val)5280 void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
5281   if (dst.is(src)) {
5282     sll(dst, val);
5283   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5284     sllk(dst, src, val);
5285   } else {
5286     lr(dst, src);
5287     sll(dst, val);
5288   }
5289 }
5290 
5291 // Shift left logical for 32-bit integer types.
ShiftLeft(Register dst,Register src,Register val)5292 void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) {
5293   if (dst.is(src)) {
5294     sll(dst, val);
5295   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5296     sllk(dst, src, val);
5297   } else {
5298     DCHECK(!dst.is(val));  // The lr/sll path clobbers val.
5299     lr(dst, src);
5300     sll(dst, val);
5301   }
5302 }
5303 
5304 // Shift right logical for 32-bit integer types.
ShiftRight(Register dst,Register src,const Operand & val)5305 void MacroAssembler::ShiftRight(Register dst, Register src,
5306                                 const Operand& val) {
5307   if (dst.is(src)) {
5308     srl(dst, val);
5309   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5310     srlk(dst, src, val);
5311   } else {
5312     lr(dst, src);
5313     srl(dst, val);
5314   }
5315 }
5316 
5317 // Shift right logical for 32-bit integer types.
ShiftRight(Register dst,Register src,Register val)5318 void MacroAssembler::ShiftRight(Register dst, Register src, Register val) {
5319   if (dst.is(src)) {
5320     srl(dst, val);
5321   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5322     srlk(dst, src, val);
5323   } else {
5324     DCHECK(!dst.is(val));  // The lr/srl path clobbers val.
5325     lr(dst, src);
5326     srl(dst, val);
5327   }
5328 }
5329 
5330 // Shift left arithmetic for 32-bit integer types.
ShiftLeftArith(Register dst,Register src,const Operand & val)5331 void MacroAssembler::ShiftLeftArith(Register dst, Register src,
5332                                     const Operand& val) {
5333   if (dst.is(src)) {
5334     sla(dst, val);
5335   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5336     slak(dst, src, val);
5337   } else {
5338     lr(dst, src);
5339     sla(dst, val);
5340   }
5341 }
5342 
5343 // Shift left arithmetic for 32-bit integer types.
ShiftLeftArith(Register dst,Register src,Register val)5344 void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
5345   if (dst.is(src)) {
5346     sla(dst, val);
5347   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5348     slak(dst, src, val);
5349   } else {
5350     DCHECK(!dst.is(val));  // The lr/sla path clobbers val.
5351     lr(dst, src);
5352     sla(dst, val);
5353   }
5354 }
5355 
5356 // Shift right arithmetic for 32-bit integer types.
ShiftRightArith(Register dst,Register src,const Operand & val)5357 void MacroAssembler::ShiftRightArith(Register dst, Register src,
5358                                      const Operand& val) {
5359   if (dst.is(src)) {
5360     sra(dst, val);
5361   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5362     srak(dst, src, val);
5363   } else {
5364     lr(dst, src);
5365     sra(dst, val);
5366   }
5367 }
5368 
5369 // Shift right arithmetic for 32-bit integer types.
ShiftRightArith(Register dst,Register src,Register val)5370 void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
5371   if (dst.is(src)) {
5372     sra(dst, val);
5373   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5374     srak(dst, src, val);
5375   } else {
5376     DCHECK(!dst.is(val));  // The lr/sra path clobbers val.
5377     lr(dst, src);
5378     sra(dst, val);
5379   }
5380 }
5381 
5382 // Clear right most # of bits
ClearRightImm(Register dst,Register src,const Operand & val)5383 void MacroAssembler::ClearRightImm(Register dst, Register src,
5384                                    const Operand& val) {
5385   int numBitsToClear = val.imm_ % (kPointerSize * 8);
5386 
5387   // Try to use RISBG if possible
5388   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
5389     int endBit = 63 - numBitsToClear;
5390     risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
5391     return;
5392   }
5393 
5394   uint64_t hexMask = ~((1L << numBitsToClear) - 1);
5395 
5396   // S390 AND instr clobbers source.  Make a copy if necessary
5397   if (!dst.is(src)) LoadRR(dst, src);
5398 
5399   if (numBitsToClear <= 16) {
5400     nill(dst, Operand(static_cast<uint16_t>(hexMask)));
5401   } else if (numBitsToClear <= 32) {
5402     nilf(dst, Operand(static_cast<uint32_t>(hexMask)));
5403   } else if (numBitsToClear <= 64) {
5404     nilf(dst, Operand(static_cast<intptr_t>(0)));
5405     nihf(dst, Operand(hexMask >> 32));
5406   }
5407 }
5408 
Popcnt32(Register dst,Register src)5409 void MacroAssembler::Popcnt32(Register dst, Register src) {
5410   DCHECK(!src.is(r0));
5411   DCHECK(!dst.is(r0));
5412 
5413   popcnt(dst, src);
5414   ShiftRight(r0, dst, Operand(16));
5415   ar(dst, r0);
5416   ShiftRight(r0, dst, Operand(8));
5417   ar(dst, r0);
5418   LoadB(dst, dst);
5419 }
5420 
5421 #ifdef V8_TARGET_ARCH_S390X
Popcnt64(Register dst,Register src)5422 void MacroAssembler::Popcnt64(Register dst, Register src) {
5423   DCHECK(!src.is(r0));
5424   DCHECK(!dst.is(r0));
5425 
5426   popcnt(dst, src);
5427   ShiftRightP(r0, dst, Operand(32));
5428   AddP(dst, r0);
5429   ShiftRightP(r0, dst, Operand(16));
5430   AddP(dst, r0);
5431   ShiftRightP(r0, dst, Operand(8));
5432   AddP(dst, r0);
5433   LoadB(dst, dst);
5434 }
5435 #endif
5436 
5437 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)5438 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
5439                 Register reg5, Register reg6, Register reg7, Register reg8,
5440                 Register reg9, Register reg10) {
5441   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
5442                         reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5443                         reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
5444                         reg10.is_valid();
5445 
5446   RegList regs = 0;
5447   if (reg1.is_valid()) regs |= reg1.bit();
5448   if (reg2.is_valid()) regs |= reg2.bit();
5449   if (reg3.is_valid()) regs |= reg3.bit();
5450   if (reg4.is_valid()) regs |= reg4.bit();
5451   if (reg5.is_valid()) regs |= reg5.bit();
5452   if (reg6.is_valid()) regs |= reg6.bit();
5453   if (reg7.is_valid()) regs |= reg7.bit();
5454   if (reg8.is_valid()) regs |= reg8.bit();
5455   if (reg9.is_valid()) regs |= reg9.bit();
5456   if (reg10.is_valid()) regs |= reg10.bit();
5457   int n_of_non_aliasing_regs = NumRegs(regs);
5458 
5459   return n_of_valid_regs != n_of_non_aliasing_regs;
5460 }
5461 #endif
5462 
CodePatcher(Isolate * isolate,byte * address,int size,FlushICache flush_cache)5463 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size,
5464                          FlushICache flush_cache)
5465     : address_(address),
5466       size_(size),
5467       masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
5468       flush_cache_(flush_cache) {
5469   // Create a new macro assembler pointing to the address of the code to patch.
5470   // The size is adjusted with kGap on order for the assembler to generate size
5471   // bytes of instructions without failing with buffer size constraints.
5472   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5473 }
5474 
~CodePatcher()5475 CodePatcher::~CodePatcher() {
5476   // Indicate that code has changed.
5477   if (flush_cache_ == FLUSH) {
5478     Assembler::FlushICache(masm_.isolate(), address_, size_);
5479   }
5480 
5481   // Check that the code was patched as expected.
5482   DCHECK(masm_.pc_ == address_ + size_);
5483   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5484 }
5485 
TruncatingDiv(Register result,Register dividend,int32_t divisor)5486 void MacroAssembler::TruncatingDiv(Register result, Register dividend,
5487                                    int32_t divisor) {
5488   DCHECK(!dividend.is(result));
5489   DCHECK(!dividend.is(r0));
5490   DCHECK(!result.is(r0));
5491   base::MagicNumbersForDivision<uint32_t> mag =
5492       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5493 #ifdef V8_TARGET_ARCH_S390X
5494   LoadRR(result, dividend);
5495   MulP(result, Operand(mag.multiplier));
5496   ShiftRightArithP(result, result, Operand(32));
5497 
5498 #else
5499   lay(sp, MemOperand(sp, -kPointerSize));
5500   StoreP(r1, MemOperand(sp));
5501 
5502   mov(r1, Operand(mag.multiplier));
5503   mr_z(r0, dividend);  // r0:r1 = r1 * dividend
5504 
5505   LoadRR(result, r0);
5506   LoadP(r1, MemOperand(sp));
5507   la(sp, MemOperand(sp, kPointerSize));
5508 #endif
5509   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5510   if (divisor > 0 && neg) {
5511     AddP(result, dividend);
5512   }
5513   if (divisor < 0 && !neg && mag.multiplier > 0) {
5514     SubP(result, dividend);
5515   }
5516   if (mag.shift > 0) ShiftRightArith(result, result, Operand(mag.shift));
5517   ExtractBit(r0, dividend, 31);
5518   AddP(result, r0);
5519 }
5520 
5521 }  // namespace internal
5522 }  // namespace v8
5523 
5524 #endif  // V8_TARGET_ARCH_S390
5525