• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <assert.h>  // For assert
6 #include <limits.h>  // For LONG_MIN, LONG_MAX.
7 
8 #if V8_TARGET_ARCH_S390
9 
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/debug/debug.h"
15 #include "src/register-configuration.h"
16 #include "src/runtime/runtime.h"
17 
18 #include "src/s390/macro-assembler-s390.h"
19 
20 namespace v8 {
21 namespace internal {
22 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
24                                CodeObjectRequired create_code_object)
25     : Assembler(arg_isolate, buffer, size),
26       generating_stub_(false),
27       has_frame_(false) {
28   if (create_code_object == CodeObjectRequired::kYes) {
29     code_object_ =
30         Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
31   }
32 }
33 
Jump(Register target)34 void MacroAssembler::Jump(Register target) { b(target); }
35 
JumpToJSEntry(Register target)36 void MacroAssembler::JumpToJSEntry(Register target) {
37   Move(ip, target);
38   Jump(ip);
39 }
40 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,CRegister)41 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
42                           Condition cond, CRegister) {
43   Label skip;
44 
45   if (cond != al) b(NegateCondition(cond), &skip);
46 
47   DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
48 
49   mov(ip, Operand(target, rmode));
50   b(ip);
51 
52   bind(&skip);
53 }
54 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,CRegister cr)55 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
56                           CRegister cr) {
57   DCHECK(!RelocInfo::IsCodeTarget(rmode));
58   Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
59 }
60 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)61 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
62                           Condition cond) {
63   DCHECK(RelocInfo::IsCodeTarget(rmode));
64   jump(code, rmode, cond);
65 }
66 
CallSize(Register target)67 int MacroAssembler::CallSize(Register target) { return 2; }  // BASR
68 
Call(Register target)69 void MacroAssembler::Call(Register target) {
70   Label start;
71   bind(&start);
72 
73   // Branch to target via indirect branch
74   basr(r14, target);
75 
76   DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
77 }
78 
CallJSEntry(Register target)79 void MacroAssembler::CallJSEntry(Register target) {
80   DCHECK(target.is(ip));
81   Call(target);
82 }
83 
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)84 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
85                              Condition cond) {
86   // S390 Assembler::move sequence is IILF / IIHF
87   int size;
88 #if V8_TARGET_ARCH_S390X
89   size = 14;  // IILF + IIHF + BASR
90 #else
91   size = 8;  // IILF + BASR
92 #endif
93   return size;
94 }
95 
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)96 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
97                                                    RelocInfo::Mode rmode,
98                                                    Condition cond) {
99   // S390 Assembler::move sequence is IILF / IIHF
100   int size;
101 #if V8_TARGET_ARCH_S390X
102   size = 14;  // IILF + IIHF + BASR
103 #else
104   size = 8;  // IILF + BASR
105 #endif
106   return size;
107 }
108 
Call(Address target,RelocInfo::Mode rmode,Condition cond)109 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
110                           Condition cond) {
111   DCHECK(cond == al);
112 
113 #ifdef DEBUG
114   // Check the expected size before generating code to ensure we assume the same
115   // constant pool availability (e.g., whether constant pool is full or not).
116   int expected_size = CallSize(target, rmode, cond);
117   Label start;
118   bind(&start);
119 #endif
120 
121   mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
122   basr(r14, ip);
123 
124   DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
125 }
126 
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)127 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
128                              TypeFeedbackId ast_id, Condition cond) {
129   return 6;  // BRASL
130 }
131 
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)132 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
133                           TypeFeedbackId ast_id, Condition cond) {
134   DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
135 
136 #ifdef DEBUG
137   // Check the expected size before generating code to ensure we assume the same
138   // constant pool availability (e.g., whether constant pool is full or not).
139   int expected_size = CallSize(code, rmode, ast_id, cond);
140   Label start;
141   bind(&start);
142 #endif
143   call(code, rmode, ast_id);
144   DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
145 }
146 
Drop(int count)147 void MacroAssembler::Drop(int count) {
148   if (count > 0) {
149     int total = count * kPointerSize;
150     if (is_uint12(total)) {
151       la(sp, MemOperand(sp, total));
152     } else if (is_int20(total)) {
153       lay(sp, MemOperand(sp, total));
154     } else {
155       AddP(sp, Operand(total));
156     }
157   }
158 }
159 
Drop(Register count,Register scratch)160 void MacroAssembler::Drop(Register count, Register scratch) {
161   ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
162   AddP(sp, sp, scratch);
163 }
164 
Call(Label * target)165 void MacroAssembler::Call(Label* target) { b(r14, target); }
166 
Push(Handle<Object> handle)167 void MacroAssembler::Push(Handle<Object> handle) {
168   mov(r0, Operand(handle));
169   push(r0);
170 }
171 
Move(Register dst,Handle<Object> value)172 void MacroAssembler::Move(Register dst, Handle<Object> value) {
173   mov(dst, Operand(value));
174 }
175 
Move(Register dst,Register src,Condition cond)176 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
177   if (!dst.is(src)) {
178     LoadRR(dst, src);
179   }
180 }
181 
Move(DoubleRegister dst,DoubleRegister src)182 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
183   if (!dst.is(src)) {
184     ldr(dst, src);
185   }
186 }
187 
MultiPush(RegList regs,Register location)188 void MacroAssembler::MultiPush(RegList regs, Register location) {
189   int16_t num_to_push = NumberOfBitsSet(regs);
190   int16_t stack_offset = num_to_push * kPointerSize;
191 
192   SubP(location, location, Operand(stack_offset));
193   for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
194     if ((regs & (1 << i)) != 0) {
195       stack_offset -= kPointerSize;
196       StoreP(ToRegister(i), MemOperand(location, stack_offset));
197     }
198   }
199 }
200 
MultiPop(RegList regs,Register location)201 void MacroAssembler::MultiPop(RegList regs, Register location) {
202   int16_t stack_offset = 0;
203 
204   for (int16_t i = 0; i < Register::kNumRegisters; i++) {
205     if ((regs & (1 << i)) != 0) {
206       LoadP(ToRegister(i), MemOperand(location, stack_offset));
207       stack_offset += kPointerSize;
208     }
209   }
210   AddP(location, location, Operand(stack_offset));
211 }
212 
MultiPushDoubles(RegList dregs,Register location)213 void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
214   int16_t num_to_push = NumberOfBitsSet(dregs);
215   int16_t stack_offset = num_to_push * kDoubleSize;
216 
217   SubP(location, location, Operand(stack_offset));
218   for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
219     if ((dregs & (1 << i)) != 0) {
220       DoubleRegister dreg = DoubleRegister::from_code(i);
221       stack_offset -= kDoubleSize;
222       StoreDouble(dreg, MemOperand(location, stack_offset));
223     }
224   }
225 }
226 
MultiPopDoubles(RegList dregs,Register location)227 void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
228   int16_t stack_offset = 0;
229 
230   for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
231     if ((dregs & (1 << i)) != 0) {
232       DoubleRegister dreg = DoubleRegister::from_code(i);
233       LoadDouble(dreg, MemOperand(location, stack_offset));
234       stack_offset += kDoubleSize;
235     }
236   }
237   AddP(location, location, Operand(stack_offset));
238 }
239 
LoadRoot(Register destination,Heap::RootListIndex index,Condition)240 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
241                               Condition) {
242   LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
243 }
244 
StoreRoot(Register source,Heap::RootListIndex index,Condition)245 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
246                                Condition) {
247   DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
248   StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2));
249 }
250 
InNewSpace(Register object,Register scratch,Condition cond,Label * branch)251 void MacroAssembler::InNewSpace(Register object, Register scratch,
252                                 Condition cond, Label* branch) {
253   DCHECK(cond == eq || cond == ne);
254   CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
255 }
256 
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)257 void MacroAssembler::RecordWriteField(
258     Register object, int offset, Register value, Register dst,
259     LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
260     RememberedSetAction remembered_set_action, SmiCheck smi_check,
261     PointersToHereCheck pointers_to_here_check_for_value) {
262   // First, check if a write barrier is even needed. The tests below
263   // catch stores of Smis.
264   Label done;
265 
266   // Skip barrier if writing a smi.
267   if (smi_check == INLINE_SMI_CHECK) {
268     JumpIfSmi(value, &done);
269   }
270 
271   // Although the object register is tagged, the offset is relative to the start
272   // of the object, so so offset must be a multiple of kPointerSize.
273   DCHECK(IsAligned(offset, kPointerSize));
274 
275   lay(dst, MemOperand(object, offset - kHeapObjectTag));
276   if (emit_debug_code()) {
277     Label ok;
278     AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
279     beq(&ok, Label::kNear);
280     stop("Unaligned cell in write barrier");
281     bind(&ok);
282   }
283 
284   RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
285               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
286 
287   bind(&done);
288 
289   // Clobber clobbered input registers when running with the debug-code flag
290   // turned on to provoke errors.
291   if (emit_debug_code()) {
292     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
293     mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
294   }
295 }
296 
297 // Will clobber 4 registers: object, map, dst, ip.  The
298 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)299 void MacroAssembler::RecordWriteForMap(Register object, Register map,
300                                        Register dst,
301                                        LinkRegisterStatus lr_status,
302                                        SaveFPRegsMode fp_mode) {
303   if (emit_debug_code()) {
304     LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
305     CmpP(dst, Operand(isolate()->factory()->meta_map()));
306     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
307   }
308 
309   if (!FLAG_incremental_marking) {
310     return;
311   }
312 
313   if (emit_debug_code()) {
314     CmpP(map, FieldMemOperand(object, HeapObject::kMapOffset));
315     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
316   }
317 
318   Label done;
319 
320   // A single check of the map's pages interesting flag suffices, since it is
321   // only set during incremental collection, and then it's also guaranteed that
322   // the from object's page's interesting flag is also set.  This optimization
323   // relies on the fact that maps can never be in new space.
324   CheckPageFlag(map,
325                 map,  // Used as scratch.
326                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
327 
328   lay(dst, MemOperand(object, HeapObject::kMapOffset - kHeapObjectTag));
329   if (emit_debug_code()) {
330     Label ok;
331     AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
332     beq(&ok, Label::kNear);
333     stop("Unaligned cell in write barrier");
334     bind(&ok);
335   }
336 
337   // Record the actual write.
338   if (lr_status == kLRHasNotBeenSaved) {
339     push(r14);
340   }
341   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
342                        fp_mode);
343   CallStub(&stub);
344   if (lr_status == kLRHasNotBeenSaved) {
345     pop(r14);
346   }
347 
348   bind(&done);
349 
350   // Count number of write barriers in generated code.
351   isolate()->counters()->write_barriers_static()->Increment();
352   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
353 
354   // Clobber clobbered registers when running with the debug-code flag
355   // turned on to provoke errors.
356   if (emit_debug_code()) {
357     mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
358     mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
359   }
360 }
361 
362 // Will clobber 4 registers: object, address, scratch, ip.  The
363 // register 'object' contains a heap object pointer.  The heap object
364 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)365 void MacroAssembler::RecordWrite(
366     Register object, Register address, Register value,
367     LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
368     RememberedSetAction remembered_set_action, SmiCheck smi_check,
369     PointersToHereCheck pointers_to_here_check_for_value) {
370   DCHECK(!object.is(value));
371   if (emit_debug_code()) {
372     CmpP(value, MemOperand(address));
373     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
374   }
375 
376   if (remembered_set_action == OMIT_REMEMBERED_SET &&
377       !FLAG_incremental_marking) {
378     return;
379   }
380   // First, check if a write barrier is even needed. The tests below
381   // catch stores of smis and stores into the young generation.
382   Label done;
383 
384   if (smi_check == INLINE_SMI_CHECK) {
385     JumpIfSmi(value, &done);
386   }
387 
388   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
389     CheckPageFlag(value,
390                   value,  // Used as scratch.
391                   MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
392   }
393   CheckPageFlag(object,
394                 value,  // Used as scratch.
395                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
396 
397   // Record the actual write.
398   if (lr_status == kLRHasNotBeenSaved) {
399     push(r14);
400   }
401   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
402                        fp_mode);
403   CallStub(&stub);
404   if (lr_status == kLRHasNotBeenSaved) {
405     pop(r14);
406   }
407 
408   bind(&done);
409 
410   // Count number of write barriers in generated code.
411   isolate()->counters()->write_barriers_static()->Increment();
412   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
413                    value);
414 
415   // Clobber clobbered registers when running with the debug-code flag
416   // turned on to provoke errors.
417   if (emit_debug_code()) {
418     mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
419     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
420   }
421 }
422 
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)423 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
424                                                Register code_entry,
425                                                Register scratch) {
426   const int offset = JSFunction::kCodeEntryOffset;
427 
428   // Since a code entry (value) is always in old space, we don't need to update
429   // remembered set. If incremental marking is off, there is nothing for us to
430   // do.
431   if (!FLAG_incremental_marking) return;
432 
433   DCHECK(js_function.is(r3));
434   DCHECK(code_entry.is(r6));
435   DCHECK(scratch.is(r7));
436   AssertNotSmi(js_function);
437 
438   if (emit_debug_code()) {
439     AddP(scratch, js_function, Operand(offset - kHeapObjectTag));
440     LoadP(ip, MemOperand(scratch));
441     CmpP(ip, code_entry);
442     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
443   }
444 
445   // First, check if a write barrier is even needed. The tests below
446   // catch stores of Smis and stores into young gen.
447   Label done;
448 
449   CheckPageFlag(code_entry, scratch,
450                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
451   CheckPageFlag(js_function, scratch,
452                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
453 
454   const Register dst = scratch;
455   AddP(dst, js_function, Operand(offset - kHeapObjectTag));
456 
457   // Save caller-saved registers.  js_function and code_entry are in the
458   // caller-saved register list.
459   DCHECK(kJSCallerSaved & js_function.bit());
460   // DCHECK(kJSCallerSaved & code_entry.bit());
461   MultiPush(kJSCallerSaved | code_entry.bit() | r14.bit());
462 
463   int argument_count = 3;
464   PrepareCallCFunction(argument_count, code_entry);
465 
466   LoadRR(r2, js_function);
467   LoadRR(r3, dst);
468   mov(r4, Operand(ExternalReference::isolate_address(isolate())));
469 
470   {
471     AllowExternalCallThatCantCauseGC scope(this);
472     CallCFunction(
473         ExternalReference::incremental_marking_record_write_code_entry_function(
474             isolate()),
475         argument_count);
476   }
477 
478   // Restore caller-saved registers (including js_function and code_entry).
479   MultiPop(kJSCallerSaved | code_entry.bit() | r14.bit());
480 
481   bind(&done);
482 }
483 
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)484 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
485                                          Register address, Register scratch,
486                                          SaveFPRegsMode fp_mode,
487                                          RememberedSetFinalAction and_then) {
488   Label done;
489   if (emit_debug_code()) {
490     Label ok;
491     JumpIfNotInNewSpace(object, scratch, &ok);
492     stop("Remembered set pointer is in new space");
493     bind(&ok);
494   }
495   // Load store buffer top.
496   ExternalReference store_buffer =
497       ExternalReference::store_buffer_top(isolate());
498   mov(ip, Operand(store_buffer));
499   LoadP(scratch, MemOperand(ip));
500   // Store pointer to buffer and increment buffer top.
501   StoreP(address, MemOperand(scratch));
502   AddP(scratch, Operand(kPointerSize));
503   // Write back new top of buffer.
504   StoreP(scratch, MemOperand(ip));
505   // Call stub on end of buffer.
506   // Check for end of buffer.
507   AndP(scratch, Operand(StoreBuffer::kStoreBufferMask));
508 
509   if (and_then == kFallThroughAtEnd) {
510     bne(&done, Label::kNear);
511   } else {
512     DCHECK(and_then == kReturnAtEnd);
513     bne(&done, Label::kNear);
514   }
515   push(r14);
516   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
517   CallStub(&store_buffer_overflow);
518   pop(r14);
519   bind(&done);
520   if (and_then == kReturnAtEnd) {
521     Ret();
522   }
523 }
524 
PushCommonFrame(Register marker_reg)525 void MacroAssembler::PushCommonFrame(Register marker_reg) {
526   int fp_delta = 0;
527   CleanseP(r14);
528   if (marker_reg.is_valid()) {
529     Push(r14, fp, marker_reg);
530     fp_delta = 1;
531   } else {
532     Push(r14, fp);
533     fp_delta = 0;
534   }
535   la(fp, MemOperand(sp, fp_delta * kPointerSize));
536 }
537 
PopCommonFrame(Register marker_reg)538 void MacroAssembler::PopCommonFrame(Register marker_reg) {
539   if (marker_reg.is_valid()) {
540     Pop(r14, fp, marker_reg);
541   } else {
542     Pop(r14, fp);
543   }
544 }
545 
PushStandardFrame(Register function_reg)546 void MacroAssembler::PushStandardFrame(Register function_reg) {
547   int fp_delta = 0;
548   CleanseP(r14);
549   if (function_reg.is_valid()) {
550     Push(r14, fp, cp, function_reg);
551     fp_delta = 2;
552   } else {
553     Push(r14, fp, cp);
554     fp_delta = 1;
555   }
556   la(fp, MemOperand(sp, fp_delta * kPointerSize));
557 }
558 
RestoreFrameStateForTailCall()559 void MacroAssembler::RestoreFrameStateForTailCall() {
560   // if (FLAG_enable_embedded_constant_pool) {
561   //   LoadP(kConstantPoolRegister,
562   //         MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
563   //   set_constant_pool_available(false);
564   // }
565   DCHECK(!FLAG_enable_embedded_constant_pool);
566   LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
567   LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
568 }
569 
570 const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
571 const int MacroAssembler::kNumSafepointSavedRegisters =
572     Register::kNumAllocatable;
573 
574 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()575 void MacroAssembler::PushSafepointRegisters() {
576   // Safepoints expect a block of kNumSafepointRegisters values on the
577   // stack, so adjust the stack for unsaved registers.
578   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
579   DCHECK(num_unsaved >= 0);
580   if (num_unsaved > 0) {
581     lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize)));
582   }
583   MultiPush(kSafepointSavedRegisters);
584 }
585 
PopSafepointRegisters()586 void MacroAssembler::PopSafepointRegisters() {
587   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
588   MultiPop(kSafepointSavedRegisters);
589   if (num_unsaved > 0) {
590     la(sp, MemOperand(sp, num_unsaved * kPointerSize));
591   }
592 }
593 
StoreToSafepointRegisterSlot(Register src,Register dst)594 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
595   StoreP(src, SafepointRegisterSlot(dst));
596 }
597 
LoadFromSafepointRegisterSlot(Register dst,Register src)598 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
599   LoadP(dst, SafepointRegisterSlot(src));
600 }
601 
SafepointRegisterStackIndex(int reg_code)602 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
603   // The registers are pushed starting with the highest encoding,
604   // which means that lowest encodings are closest to the stack pointer.
605   RegList regs = kSafepointSavedRegisters;
606   int index = 0;
607 
608   DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
609 
610   for (int16_t i = 0; i < reg_code; i++) {
611     if ((regs & (1 << i)) != 0) {
612       index++;
613     }
614   }
615 
616   return index;
617 }
618 
SafepointRegisterSlot(Register reg)619 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
620   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
621 }
622 
SafepointRegistersAndDoublesSlot(Register reg)623 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
624   // General purpose registers are pushed last on the stack.
625   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
626   int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
627   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
628   return MemOperand(sp, doubles_size + register_offset);
629 }
630 
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)631 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
632                                      const DoubleRegister src) {
633   // Turn potential sNaN into qNaN
634   if (!dst.is(src)) ldr(dst, src);
635   lzdr(kDoubleRegZero);
636   sdbr(dst, kDoubleRegZero);
637 }
638 
ConvertIntToDouble(Register src,DoubleRegister dst)639 void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
640   cdfbr(dst, src);
641 }
642 
ConvertUnsignedIntToDouble(Register src,DoubleRegister dst)643 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
644                                                 DoubleRegister dst) {
645   if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
646     cdlfbr(Condition(5), Condition(0), dst, src);
647   } else {
648     // zero-extend src
649     llgfr(src, src);
650     // convert to double
651     cdgbr(dst, src);
652   }
653 }
654 
ConvertIntToFloat(Register src,DoubleRegister dst)655 void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
656   cefbr(Condition(4), dst, src);
657 }
658 
ConvertUnsignedIntToFloat(Register src,DoubleRegister dst)659 void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
660                                                DoubleRegister dst) {
661   celfbr(Condition(4), Condition(0), dst, src);
662 }
663 
664 #if V8_TARGET_ARCH_S390X
ConvertInt64ToDouble(Register src,DoubleRegister double_dst)665 void MacroAssembler::ConvertInt64ToDouble(Register src,
666                                           DoubleRegister double_dst) {
667   cdgbr(double_dst, src);
668 }
669 
ConvertUnsignedInt64ToFloat(Register src,DoubleRegister double_dst)670 void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
671                                                  DoubleRegister double_dst) {
672   celgbr(Condition(0), Condition(0), double_dst, src);
673 }
674 
ConvertUnsignedInt64ToDouble(Register src,DoubleRegister double_dst)675 void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
676                                                   DoubleRegister double_dst) {
677   cdlgbr(Condition(0), Condition(0), double_dst, src);
678 }
679 
ConvertInt64ToFloat(Register src,DoubleRegister double_dst)680 void MacroAssembler::ConvertInt64ToFloat(Register src,
681                                          DoubleRegister double_dst) {
682   cegbr(double_dst, src);
683 }
684 #endif
685 
ConvertFloat32ToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)686 void MacroAssembler::ConvertFloat32ToInt64(const DoubleRegister double_input,
687 #if !V8_TARGET_ARCH_S390X
688                                            const Register dst_hi,
689 #endif
690                                            const Register dst,
691                                            const DoubleRegister double_dst,
692                                            FPRoundingMode rounding_mode) {
693   Condition m = Condition(0);
694   switch (rounding_mode) {
695     case kRoundToZero:
696       m = Condition(5);
697       break;
698     case kRoundToNearest:
699       UNIMPLEMENTED();
700       break;
701     case kRoundToPlusInf:
702       m = Condition(6);
703       break;
704     case kRoundToMinusInf:
705       m = Condition(7);
706       break;
707     default:
708       UNIMPLEMENTED();
709       break;
710   }
711   cgebr(m, dst, double_input);
712   ldgr(double_dst, dst);
713 #if !V8_TARGET_ARCH_S390X
714   srlg(dst_hi, dst, Operand(32));
715 #endif
716 }
717 
ConvertDoubleToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)718 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
719 #if !V8_TARGET_ARCH_S390X
720                                           const Register dst_hi,
721 #endif
722                                           const Register dst,
723                                           const DoubleRegister double_dst,
724                                           FPRoundingMode rounding_mode) {
725   Condition m = Condition(0);
726   switch (rounding_mode) {
727     case kRoundToZero:
728       m = Condition(5);
729       break;
730     case kRoundToNearest:
731       UNIMPLEMENTED();
732       break;
733     case kRoundToPlusInf:
734       m = Condition(6);
735       break;
736     case kRoundToMinusInf:
737       m = Condition(7);
738       break;
739     default:
740       UNIMPLEMENTED();
741       break;
742   }
743   cgdbr(m, dst, double_input);
744   ldgr(double_dst, dst);
745 #if !V8_TARGET_ARCH_S390X
746   srlg(dst_hi, dst, Operand(32));
747 #endif
748 }
749 
ConvertFloat32ToInt32(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)750 void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input,
751                                            const Register dst,
752                                            const DoubleRegister double_dst,
753                                            FPRoundingMode rounding_mode) {
754   Condition m = Condition(0);
755   switch (rounding_mode) {
756     case kRoundToZero:
757       m = Condition(5);
758       break;
759     case kRoundToNearest:
760       m = Condition(4);
761       break;
762     case kRoundToPlusInf:
763       m = Condition(6);
764       break;
765     case kRoundToMinusInf:
766       m = Condition(7);
767       break;
768     default:
769       UNIMPLEMENTED();
770       break;
771   }
772   cfebr(m, dst, double_input);
773   Label done;
774   b(Condition(0xe), &done, Label::kNear);  // special case
775   LoadImmP(dst, Operand::Zero());
776   bind(&done);
777   ldgr(double_dst, dst);
778 }
779 
ConvertFloat32ToUnsignedInt32(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)780 void MacroAssembler::ConvertFloat32ToUnsignedInt32(
781     const DoubleRegister double_input, const Register dst,
782     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
783   Condition m = Condition(0);
784   switch (rounding_mode) {
785     case kRoundToZero:
786       m = Condition(5);
787       break;
788     case kRoundToNearest:
789       UNIMPLEMENTED();
790       break;
791     case kRoundToPlusInf:
792       m = Condition(6);
793       break;
794     case kRoundToMinusInf:
795       m = Condition(7);
796       break;
797     default:
798       UNIMPLEMENTED();
799       break;
800   }
801   clfebr(m, Condition(0), dst, double_input);
802   Label done;
803   b(Condition(0xe), &done, Label::kNear);  // special case
804   LoadImmP(dst, Operand::Zero());
805   bind(&done);
806   ldgr(double_dst, dst);
807 }
808 
809 #if V8_TARGET_ARCH_S390X
ConvertFloat32ToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)810 void MacroAssembler::ConvertFloat32ToUnsignedInt64(
811     const DoubleRegister double_input, const Register dst,
812     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
813   Condition m = Condition(0);
814   switch (rounding_mode) {
815     case kRoundToZero:
816       m = Condition(5);
817       break;
818     case kRoundToNearest:
819       UNIMPLEMENTED();
820       break;
821     case kRoundToPlusInf:
822       m = Condition(6);
823       break;
824     case kRoundToMinusInf:
825       m = Condition(7);
826       break;
827     default:
828       UNIMPLEMENTED();
829       break;
830   }
831   clgebr(m, Condition(0), dst, double_input);
832   ldgr(double_dst, dst);
833 }
834 
ConvertDoubleToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)835 void MacroAssembler::ConvertDoubleToUnsignedInt64(
836     const DoubleRegister double_input, const Register dst,
837     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
838   Condition m = Condition(0);
839   switch (rounding_mode) {
840     case kRoundToZero:
841       m = Condition(5);
842       break;
843     case kRoundToNearest:
844       UNIMPLEMENTED();
845       break;
846     case kRoundToPlusInf:
847       m = Condition(6);
848       break;
849     case kRoundToMinusInf:
850       m = Condition(7);
851       break;
852     default:
853       UNIMPLEMENTED();
854       break;
855   }
856   clgdbr(m, Condition(0), dst, double_input);
857   ldgr(double_dst, dst);
858 }
859 
860 #endif
861 
862 #if !V8_TARGET_ARCH_S390X
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)863 void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
864                                    Register src_low, Register src_high,
865                                    Register scratch, Register shift) {
866   LoadRR(r0, src_high);
867   LoadRR(r1, src_low);
868   sldl(r0, shift, Operand::Zero());
869   LoadRR(dst_high, r0);
870   LoadRR(dst_low, r1);
871 }
872 
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)873 void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
874                                    Register src_low, Register src_high,
875                                    uint32_t shift) {
876   LoadRR(r0, src_high);
877   LoadRR(r1, src_low);
878   sldl(r0, r0, Operand(shift));
879   LoadRR(dst_high, r0);
880   LoadRR(dst_low, r1);
881 }
882 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)883 void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
884                                     Register src_low, Register src_high,
885                                     Register scratch, Register shift) {
886   LoadRR(r0, src_high);
887   LoadRR(r1, src_low);
888   srdl(r0, shift, Operand::Zero());
889   LoadRR(dst_high, r0);
890   LoadRR(dst_low, r1);
891 }
892 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)893 void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
894                                     Register src_low, Register src_high,
895                                     uint32_t shift) {
896   LoadRR(r0, src_high);
897   LoadRR(r1, src_low);
898   srdl(r0, r0, Operand(shift));
899   LoadRR(dst_high, r0);
900   LoadRR(dst_low, r1);
901 }
902 
ShiftRightArithPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)903 void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
904                                          Register src_low, Register src_high,
905                                          Register scratch, Register shift) {
906   LoadRR(r0, src_high);
907   LoadRR(r1, src_low);
908   srda(r0, shift, Operand::Zero());
909   LoadRR(dst_high, r0);
910   LoadRR(dst_low, r1);
911 }
912 
ShiftRightArithPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)913 void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
914                                          Register src_low, Register src_high,
915                                          uint32_t shift) {
916   LoadRR(r0, src_high);
917   LoadRR(r1, src_low);
918   srda(r0, r0, Operand(shift));
919   LoadRR(dst_high, r0);
920   LoadRR(dst_low, r1);
921 }
922 #endif
923 
MovDoubleToInt64(Register dst,DoubleRegister src)924 void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
925   lgdr(dst, src);
926 }
927 
MovInt64ToDouble(DoubleRegister dst,Register src)928 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
929   ldgr(dst, src);
930 }
931 
StubPrologue(StackFrame::Type type,Register base,int prologue_offset)932 void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
933                                   int prologue_offset) {
934   {
935     ConstantPoolUnavailableScope constant_pool_unavailable(this);
936     Load(r1, Operand(StackFrame::TypeToMarker(type)));
937     PushCommonFrame(r1);
938   }
939 }
940 
Prologue(bool code_pre_aging,Register base,int prologue_offset)941 void MacroAssembler::Prologue(bool code_pre_aging, Register base,
942                               int prologue_offset) {
943   DCHECK(!base.is(no_reg));
944   {
945     PredictableCodeSizeScope predictible_code_size_scope(
946         this, kNoCodeAgeSequenceLength);
947     // The following instructions must remain together and unmodified
948     // for code aging to work properly.
949     if (code_pre_aging) {
950       // Pre-age the code.
951       // This matches the code found in PatchPlatformCodeAge()
952       Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
953       intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
954       nop();
955       CleanseP(r14);
956       Push(r14);
957       mov(r2, Operand(target));
958       Call(r2);
959       for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
960            i += 2) {
961         // TODO(joransiu): Create nop function to pad
962         //         (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
963         nop();  // 2-byte nops().
964       }
965     } else {
966       // This matches the code found in GetNoCodeAgeSequence()
967       PushStandardFrame(r3);
968     }
969   }
970 }
971 
EmitLoadFeedbackVector(Register vector)972 void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
973   LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
974   LoadP(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
975   LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
976 }
977 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)978 void MacroAssembler::EnterFrame(StackFrame::Type type,
979                                 bool load_constant_pool_pointer_reg) {
980   // We create a stack frame with:
981   //    Return Addr <-- old sp
982   //    Old FP      <-- new fp
983   //    CP
984   //    type
985   //    CodeObject  <-- new sp
986 
987   Load(ip, Operand(StackFrame::TypeToMarker(type)));
988   PushCommonFrame(ip);
989 
990   if (type == StackFrame::INTERNAL) {
991     mov(r0, Operand(CodeObject()));
992     push(r0);
993   }
994 }
995 
LeaveFrame(StackFrame::Type type,int stack_adjustment)996 int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
997   // Drop the execution stack down to the frame pointer and restore
998   // the caller frame pointer, return address and constant pool pointer.
999   LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1000   lay(r1, MemOperand(
1001               fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment));
1002   LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1003   LoadRR(sp, r1);
1004   int frame_ends = pc_offset();
1005   return frame_ends;
1006 }
1007 
EnterBuiltinFrame(Register context,Register target,Register argc)1008 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
1009                                        Register argc) {
1010   CleanseP(r14);
1011   Push(r14, fp, context, target);
1012   la(fp, MemOperand(sp, 2 * kPointerSize));
1013   Push(argc);
1014 }
1015 
LeaveBuiltinFrame(Register context,Register target,Register argc)1016 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
1017                                        Register argc) {
1018   Pop(argc);
1019   Pop(r14, fp, context, target);
1020 }
1021 
1022 // ExitFrame layout (probably wrongish.. needs updating)
1023 //
1024 //  SP -> previousSP
1025 //        LK reserved
1026 //        code
1027 //        sp_on_exit (for debug?)
1028 // oldSP->prev SP
1029 //        LK
1030 //        <parameters on stack>
1031 
1032 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1033 // on the stack that we need to wrap a real frame around.. so first
1034 // we reserve a slot for LK and push the previous SP which is captured
1035 // in the fp register (r11)
1036 // Then - we buy a new frame
1037 
1038 // r14
1039 // oldFP <- newFP
1040 // SP
1041 // Code
1042 // Floats
1043 // gaps
1044 // Args
1045 // ABIRes <- newSP
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1046 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1047                                     StackFrame::Type frame_type) {
1048   DCHECK(frame_type == StackFrame::EXIT ||
1049          frame_type == StackFrame::BUILTIN_EXIT);
1050   // Set up the frame structure on the stack.
1051   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1052   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1053   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1054   DCHECK(stack_space > 0);
1055 
1056   // This is an opportunity to build a frame to wrap
1057   // all of the pushes that have happened inside of V8
1058   // since we were called from C code
1059   CleanseP(r14);
1060   Load(r1, Operand(StackFrame::TypeToMarker(frame_type)));
1061   PushCommonFrame(r1);
1062   // Reserve room for saved entry sp and code object.
1063   lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
1064 
1065   if (emit_debug_code()) {
1066     StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
1067   }
1068   mov(r1, Operand(CodeObject()));
1069   StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1070 
1071   // Save the frame pointer and the context in top.
1072   mov(r1, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1073   StoreP(fp, MemOperand(r1));
1074   mov(r1, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1075   StoreP(cp, MemOperand(r1));
1076 
1077   // Optionally save all volatile double registers.
1078   if (save_doubles) {
1079     MultiPushDoubles(kCallerSavedDoubles);
1080     // Note that d0 will be accessible at
1081     //   fp - ExitFrameConstants::kFrameSize -
1082     //   kNumCallerSavedDoubles * kDoubleSize,
1083     // since the sp slot and code slot were pushed after the fp.
1084   }
1085 
1086   lay(sp, MemOperand(sp, -stack_space * kPointerSize));
1087 
1088   // Allocate and align the frame preparing for calling the runtime
1089   // function.
1090   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1091   if (frame_alignment > 0) {
1092     DCHECK(frame_alignment == 8);
1093     ClearRightImm(sp, sp, Operand(3));  // equivalent to &= -8
1094   }
1095 
1096   lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1097   StoreP(MemOperand(sp), Operand::Zero(), r0);
1098   // Set the exit frame sp value to point just before the return address
1099   // location.
1100   lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
1101   StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
1102 }
1103 
ActivationFrameAlignment()1104 int MacroAssembler::ActivationFrameAlignment() {
1105 #if !defined(USE_SIMULATOR)
1106   // Running on the real platform. Use the alignment as mandated by the local
1107   // environment.
1108   // Note: This will break if we ever start generating snapshots on one S390
1109   // platform for another S390 platform with a different alignment.
1110   return base::OS::ActivationFrameAlignment();
1111 #else  // Simulated
1112   // If we are using the simulator then we should always align to the expected
1113   // alignment. As the simulator is used to generate snapshots we do not know
1114   // if the target platform will need alignment, so this is controlled from a
1115   // flag.
1116   return FLAG_sim_stack_alignment;
1117 #endif
1118 }
1119 
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool argument_count_is_length)1120 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1121                                     bool restore_context,
1122                                     bool argument_count_is_length) {
1123   // Optionally restore all double registers.
1124   if (save_doubles) {
1125     // Calculate the stack location of the saved doubles and restore them.
1126     const int kNumRegs = kNumCallerSavedDoubles;
1127     lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
1128                              kNumRegs * kDoubleSize)));
1129     MultiPopDoubles(kCallerSavedDoubles, r5);
1130   }
1131 
1132   // Clear top frame.
1133   mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1134   StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
1135 
1136   // Restore current context from top and clear it in debug mode.
1137   if (restore_context) {
1138     mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1139     LoadP(cp, MemOperand(ip));
1140   }
1141 #ifdef DEBUG
1142   mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1143   StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
1144 #endif
1145 
1146   // Tear down the exit frame, pop the arguments, and return.
1147   LeaveFrame(StackFrame::EXIT);
1148 
1149   if (argument_count.is_valid()) {
1150     if (!argument_count_is_length) {
1151       ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
1152     }
1153     la(sp, MemOperand(sp, argument_count));
1154   }
1155 }
1156 
MovFromFloatResult(const DoubleRegister dst)1157 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
1158   Move(dst, d0);
1159 }
1160 
MovFromFloatParameter(const DoubleRegister dst)1161 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1162   Move(dst, d0);
1163 }
1164 
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1165 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1166                                         Register caller_args_count_reg,
1167                                         Register scratch0, Register scratch1) {
1168 #if DEBUG
1169   if (callee_args_count.is_reg()) {
1170     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1171                        scratch1));
1172   } else {
1173     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1174   }
1175 #endif
1176 
1177   // Calculate the end of destination area where we will put the arguments
1178   // after we drop current frame. We AddP kPointerSize to count the receiver
1179   // argument which is not included into formal parameters count.
1180   Register dst_reg = scratch0;
1181   ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1182   AddP(dst_reg, fp, dst_reg);
1183   AddP(dst_reg, dst_reg,
1184        Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1185 
1186   Register src_reg = caller_args_count_reg;
1187   // Calculate the end of source area. +kPointerSize is for the receiver.
1188   if (callee_args_count.is_reg()) {
1189     ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1190     AddP(src_reg, sp, src_reg);
1191     AddP(src_reg, src_reg, Operand(kPointerSize));
1192   } else {
1193     mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
1194     AddP(src_reg, src_reg, sp);
1195   }
1196 
1197   if (FLAG_debug_code) {
1198     CmpLogicalP(src_reg, dst_reg);
1199     Check(lt, kStackAccessBelowStackPointer);
1200   }
1201 
1202   // Restore caller's frame pointer and return address now as they will be
1203   // overwritten by the copying loop.
1204   RestoreFrameStateForTailCall();
1205 
1206   // Now copy callee arguments to the caller frame going backwards to avoid
1207   // callee arguments corruption (source and destination areas could overlap).
1208 
1209   // Both src_reg and dst_reg are pointing to the word after the one to copy,
1210   // so they must be pre-decremented in the loop.
1211   Register tmp_reg = scratch1;
1212   Label loop;
1213   if (callee_args_count.is_reg()) {
1214     AddP(tmp_reg, callee_args_count.reg(), Operand(1));  // +1 for receiver
1215   } else {
1216     mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1217   }
1218   LoadRR(r1, tmp_reg);
1219   bind(&loop);
1220   LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
1221   StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1222   lay(src_reg, MemOperand(src_reg, -kPointerSize));
1223   lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
1224   BranchOnCount(r1, &loop);
1225 
1226   // Leave current frame.
1227   LoadRR(sp, dst_reg);
1228 }
1229 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)1230 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1231                                     const ParameterCount& actual, Label* done,
1232                                     bool* definitely_mismatches,
1233                                     InvokeFlag flag,
1234                                     const CallWrapper& call_wrapper) {
1235   bool definitely_matches = false;
1236   *definitely_mismatches = false;
1237   Label regular_invoke;
1238 
1239   // Check whether the expected and actual arguments count match. If not,
1240   // setup registers according to contract with ArgumentsAdaptorTrampoline:
1241   //  r2: actual arguments count
1242   //  r3: function (passed through to callee)
1243   //  r4: expected arguments count
1244 
1245   // The code below is made a lot easier because the calling code already sets
1246   // up actual and expected registers according to the contract if values are
1247   // passed in registers.
1248 
1249   // ARM has some sanity checks as per below, considering add them for S390
1250   //  DCHECK(actual.is_immediate() || actual.reg().is(r2));
1251   //  DCHECK(expected.is_immediate() || expected.reg().is(r4));
1252 
1253   if (expected.is_immediate()) {
1254     DCHECK(actual.is_immediate());
1255     mov(r2, Operand(actual.immediate()));
1256     if (expected.immediate() == actual.immediate()) {
1257       definitely_matches = true;
1258     } else {
1259       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1260       if (expected.immediate() == sentinel) {
1261         // Don't worry about adapting arguments for builtins that
1262         // don't want that done. Skip adaption code by making it look
1263         // like we have a match between expected and actual number of
1264         // arguments.
1265         definitely_matches = true;
1266       } else {
1267         *definitely_mismatches = true;
1268         mov(r4, Operand(expected.immediate()));
1269       }
1270     }
1271   } else {
1272     if (actual.is_immediate()) {
1273       mov(r2, Operand(actual.immediate()));
1274       CmpPH(expected.reg(), Operand(actual.immediate()));
1275       beq(&regular_invoke);
1276     } else {
1277       CmpP(expected.reg(), actual.reg());
1278       beq(&regular_invoke);
1279     }
1280   }
1281 
1282   if (!definitely_matches) {
1283     Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1284     if (flag == CALL_FUNCTION) {
1285       call_wrapper.BeforeCall(CallSize(adaptor));
1286       Call(adaptor);
1287       call_wrapper.AfterCall();
1288       if (!*definitely_mismatches) {
1289         b(done);
1290       }
1291     } else {
1292       Jump(adaptor, RelocInfo::CODE_TARGET);
1293     }
1294     bind(&regular_invoke);
1295   }
1296 }
1297 
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1298 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1299                                     const ParameterCount& expected,
1300                                     const ParameterCount& actual) {
1301   Label skip_hook;
1302   ExternalReference debug_hook_avtive =
1303       ExternalReference::debug_hook_on_function_call_address(isolate());
1304   mov(r6, Operand(debug_hook_avtive));
1305   LoadB(r6, MemOperand(r6));
1306   CmpP(r6, Operand::Zero());
1307   beq(&skip_hook);
1308   {
1309     FrameScope frame(this,
1310                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1311     if (expected.is_reg()) {
1312       SmiTag(expected.reg());
1313       Push(expected.reg());
1314     }
1315     if (actual.is_reg()) {
1316       SmiTag(actual.reg());
1317       Push(actual.reg());
1318     }
1319     if (new_target.is_valid()) {
1320       Push(new_target);
1321     }
1322     Push(fun, fun);
1323     CallRuntime(Runtime::kDebugOnFunctionCall);
1324     Pop(fun);
1325     if (new_target.is_valid()) {
1326       Pop(new_target);
1327     }
1328     if (actual.is_reg()) {
1329       Pop(actual.reg());
1330       SmiUntag(actual.reg());
1331     }
1332     if (expected.is_reg()) {
1333       Pop(expected.reg());
1334       SmiUntag(expected.reg());
1335     }
1336   }
1337   bind(&skip_hook);
1338 }
1339 
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1340 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1341                                         const ParameterCount& expected,
1342                                         const ParameterCount& actual,
1343                                         InvokeFlag flag,
1344                                         const CallWrapper& call_wrapper) {
1345   // You can't call a function without a valid frame.
1346   DCHECK(flag == JUMP_FUNCTION || has_frame());
1347 
1348   DCHECK(function.is(r3));
1349   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5));
1350 
1351   if (call_wrapper.NeedsDebugHookCheck()) {
1352     CheckDebugHook(function, new_target, expected, actual);
1353   }
1354 
1355   // Clear the new.target register if not given.
1356   if (!new_target.is_valid()) {
1357     LoadRoot(r5, Heap::kUndefinedValueRootIndex);
1358   }
1359 
1360   Label done;
1361   bool definitely_mismatches = false;
1362   InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1363                  call_wrapper);
1364   if (!definitely_mismatches) {
1365     // We call indirectly through the code field in the function to
1366     // allow recompilation to take effect without changing any of the
1367     // call sites.
1368     Register code = ip;
1369     LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1370     if (flag == CALL_FUNCTION) {
1371       call_wrapper.BeforeCall(CallSize(code));
1372       CallJSEntry(code);
1373       call_wrapper.AfterCall();
1374     } else {
1375       DCHECK(flag == JUMP_FUNCTION);
1376       JumpToJSEntry(code);
1377     }
1378 
1379     // Continue here if InvokePrologue does handle the invocation due to
1380     // mismatched parameter counts.
1381     bind(&done);
1382   }
1383 }
1384 
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1385 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1386                                     const ParameterCount& actual,
1387                                     InvokeFlag flag,
1388                                     const CallWrapper& call_wrapper) {
1389   // You can't call a function without a valid frame.
1390   DCHECK(flag == JUMP_FUNCTION || has_frame());
1391 
1392   // Contract with called JS functions requires that function is passed in r3.
1393   DCHECK(fun.is(r3));
1394 
1395   Register expected_reg = r4;
1396   Register temp_reg = r6;
1397   LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
1398   LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1399   LoadW(expected_reg,
1400         FieldMemOperand(temp_reg,
1401                         SharedFunctionInfo::kFormalParameterCountOffset));
1402 #if !defined(V8_TARGET_ARCH_S390X)
1403   SmiUntag(expected_reg);
1404 #endif
1405 
1406   ParameterCount expected(expected_reg);
1407   InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1408 }
1409 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1410 void MacroAssembler::InvokeFunction(Register function,
1411                                     const ParameterCount& expected,
1412                                     const ParameterCount& actual,
1413                                     InvokeFlag flag,
1414                                     const CallWrapper& call_wrapper) {
1415   // You can't call a function without a valid frame.
1416   DCHECK(flag == JUMP_FUNCTION || has_frame());
1417 
1418   // Contract with called JS functions requires that function is passed in r3.
1419   DCHECK(function.is(r3));
1420 
1421   // Get the function and setup the context.
1422   LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1423 
1424   InvokeFunctionCode(r3, no_reg, expected, actual, flag, call_wrapper);
1425 }
1426 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1427 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1428                                     const ParameterCount& expected,
1429                                     const ParameterCount& actual,
1430                                     InvokeFlag flag,
1431                                     const CallWrapper& call_wrapper) {
1432   Move(r3, function);
1433   InvokeFunction(r3, expected, actual, flag, call_wrapper);
1434 }
1435 
IsObjectJSStringType(Register object,Register scratch,Label * fail)1436 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1437                                           Label* fail) {
1438   DCHECK(kNotStringTag != 0);
1439 
1440   LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1441   LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1442   mov(r0, Operand(kIsNotStringMask));
1443   AndP(r0, scratch);
1444   bne(fail);
1445 }
1446 
IsObjectNameType(Register object,Register scratch,Label * fail)1447 void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1448                                       Label* fail) {
1449   LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1450   LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1451   CmpP(scratch, Operand(LAST_NAME_TYPE));
1452   bgt(fail);
1453 }
1454 
MaybeDropFrames()1455 void MacroAssembler::MaybeDropFrames() {
1456   // Check whether we need to drop frames to restart a function on the stack.
1457   ExternalReference restart_fp =
1458       ExternalReference::debug_restart_fp_address(isolate());
1459   mov(r3, Operand(restart_fp));
1460   LoadP(r3, MemOperand(r3));
1461   CmpP(r3, Operand::Zero());
1462   Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
1463        ne);
1464 }
1465 
PushStackHandler()1466 void MacroAssembler::PushStackHandler() {
1467   // Adjust this code if not the case.
1468   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1469   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1470 
1471   // Link the current handler as the next handler.
1472   mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1473 
1474   // Buy the full stack frame for 5 slots.
1475   lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
1476 
1477   // Copy the old handler into the next handler slot.
1478   mvc(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
1479       kPointerSize);
1480   // Set this new handler as the current one.
1481   StoreP(sp, MemOperand(r7));
1482 }
1483 
PopStackHandler()1484 void MacroAssembler::PopStackHandler() {
1485   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1486   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1487 
1488   // Pop the Next Handler into r3 and store it into Handler Address reference.
1489   Pop(r3);
1490   mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1491 
1492   StoreP(r3, MemOperand(ip));
1493 }
1494 
1495 // Compute the hash code from the untagged key.  This must be kept in sync with
1496 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1497 // code-stub-hydrogen.cc
GetNumberHash(Register t0,Register scratch)1498 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1499   // First of all we assign the hash seed to scratch.
1500   LoadRoot(scratch, Heap::kHashSeedRootIndex);
1501   SmiUntag(scratch);
1502 
1503   // Xor original key with a seed.
1504   XorP(t0, scratch);
1505 
1506   // Compute the hash code from the untagged key.  This must be kept in sync
1507   // with ComputeIntegerHash in utils.h.
1508   //
1509   // hash = ~hash + (hash << 15);
1510   LoadRR(scratch, t0);
1511   NotP(scratch);
1512   sll(t0, Operand(15));
1513   AddP(t0, scratch, t0);
1514   // hash = hash ^ (hash >> 12);
1515   ShiftRight(scratch, t0, Operand(12));
1516   XorP(t0, scratch);
1517   // hash = hash + (hash << 2);
1518   ShiftLeft(scratch, t0, Operand(2));
1519   AddP(t0, t0, scratch);
1520   // hash = hash ^ (hash >> 4);
1521   ShiftRight(scratch, t0, Operand(4));
1522   XorP(t0, scratch);
1523   // hash = hash * 2057;
1524   LoadRR(r0, t0);
1525   ShiftLeft(scratch, t0, Operand(3));
1526   AddP(t0, t0, scratch);
1527   ShiftLeft(scratch, r0, Operand(11));
1528   AddP(t0, t0, scratch);
1529   // hash = hash ^ (hash >> 16);
1530   ShiftRight(scratch, t0, Operand(16));
1531   XorP(t0, scratch);
1532   // hash & 0x3fffffff
1533   ExtractBitRange(t0, t0, 29, 0);
1534 }
1535 
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1536 void MacroAssembler::Allocate(int object_size, Register result,
1537                               Register scratch1, Register scratch2,
1538                               Label* gc_required, AllocationFlags flags) {
1539   DCHECK(object_size <= kMaxRegularHeapObjectSize);
1540   DCHECK((flags & ALLOCATION_FOLDED) == 0);
1541   if (!FLAG_inline_new) {
1542     if (emit_debug_code()) {
1543       // Trash the registers to simulate an allocation failure.
1544       LoadImmP(result, Operand(0x7091));
1545       LoadImmP(scratch1, Operand(0x7191));
1546       LoadImmP(scratch2, Operand(0x7291));
1547     }
1548     b(gc_required);
1549     return;
1550   }
1551 
1552   DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1553 
1554   // Make object size into bytes.
1555   if ((flags & SIZE_IN_WORDS) != 0) {
1556     object_size *= kPointerSize;
1557   }
1558   DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1559 
1560   // Check relative positions of allocation top and limit addresses.
1561   ExternalReference allocation_top =
1562       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1563   ExternalReference allocation_limit =
1564       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1565 
1566   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1567   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1568   DCHECK((limit - top) == kPointerSize);
1569 
1570   // Set up allocation top address register.
1571   Register top_address = scratch1;
1572   Register result_end = scratch2;
1573   mov(top_address, Operand(allocation_top));
1574 
1575   if ((flags & RESULT_CONTAINS_TOP) == 0) {
1576     // Load allocation top into result and allocation limit into ip.
1577     LoadP(result, MemOperand(top_address));
1578   } else {
1579     if (emit_debug_code()) {
1580       // Assert that result actually contains top on entry.
1581       CmpP(result, MemOperand(top_address));
1582       Check(eq, kUnexpectedAllocationTop);
1583     }
1584   }
1585 
1586   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1587 // Align the next allocation. Storing the filler map without checking top is
1588 // safe in new-space because the limit of the heap is aligned there.
1589 #if V8_TARGET_ARCH_S390X
1590     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1591 #else
1592     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1593     AndP(result_end, result, Operand(kDoubleAlignmentMask));
1594     Label aligned;
1595     beq(&aligned, Label::kNear);
1596     if ((flags & PRETENURE) != 0) {
1597       CmpLogicalP(result, MemOperand(top_address, limit - top));
1598       bge(gc_required);
1599     }
1600     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1601     StoreW(result_end, MemOperand(result));
1602     AddP(result, result, Operand(kDoubleSize / 2));
1603     bind(&aligned);
1604 #endif
1605   }
1606 
1607   AddP(result_end, result, Operand(object_size));
1608 
1609   // Compare with allocation limit.
1610   CmpLogicalP(result_end, MemOperand(top_address, limit - top));
1611   bge(gc_required);
1612 
1613   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1614     // The top pointer is not updated for allocation folding dominators.
1615     StoreP(result_end, MemOperand(top_address));
1616   }
1617 
1618   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1619     // Prefetch the allocation_top's next cache line in advance to
1620     // help alleviate potential cache misses.
1621     // Mode 2 - Prefetch the data into a cache line for store access.
1622     pfd(static_cast<Condition>(2), MemOperand(result, 256));
1623   }
1624 
1625   // Tag object.
1626   la(result, MemOperand(result, kHeapObjectTag));
1627 }
1628 
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)1629 void MacroAssembler::Allocate(Register object_size, Register result,
1630                               Register result_end, Register scratch,
1631                               Label* gc_required, AllocationFlags flags) {
1632   DCHECK((flags & ALLOCATION_FOLDED) == 0);
1633   if (!FLAG_inline_new) {
1634     if (emit_debug_code()) {
1635       // Trash the registers to simulate an allocation failure.
1636       LoadImmP(result, Operand(0x7091));
1637       LoadImmP(scratch, Operand(0x7191));
1638       LoadImmP(result_end, Operand(0x7291));
1639     }
1640     b(gc_required);
1641     return;
1642   }
1643 
1644   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1645   // is not specified. Other registers must not overlap.
1646   DCHECK(!AreAliased(object_size, result, scratch, ip));
1647   DCHECK(!AreAliased(result_end, result, scratch, ip));
1648   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1649 
1650   // Check relative positions of allocation top and limit addresses.
1651   ExternalReference allocation_top =
1652       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1653   ExternalReference allocation_limit =
1654       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1655   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1656   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1657   DCHECK((limit - top) == kPointerSize);
1658 
1659   // Set up allocation top address and allocation limit registers.
1660   Register top_address = scratch;
1661   mov(top_address, Operand(allocation_top));
1662 
1663   if ((flags & RESULT_CONTAINS_TOP) == 0) {
1664     // Load allocation top into result
1665     LoadP(result, MemOperand(top_address));
1666   } else {
1667     if (emit_debug_code()) {
1668       // Assert that result actually contains top on entry.
1669       CmpP(result, MemOperand(top_address));
1670       Check(eq, kUnexpectedAllocationTop);
1671     }
1672   }
1673 
1674   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1675 // Align the next allocation. Storing the filler map without checking top is
1676 // safe in new-space because the limit of the heap is aligned there.
1677 #if V8_TARGET_ARCH_S390X
1678     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1679 #else
1680     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1681     AndP(result_end, result, Operand(kDoubleAlignmentMask));
1682     Label aligned;
1683     beq(&aligned, Label::kNear);
1684     if ((flags & PRETENURE) != 0) {
1685       CmpLogicalP(result, MemOperand(top_address, limit - top));
1686       bge(gc_required);
1687     }
1688     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1689     StoreW(result_end, MemOperand(result));
1690     AddP(result, result, Operand(kDoubleSize / 2));
1691     bind(&aligned);
1692 #endif
1693   }
1694 
1695   // Calculate new top and bail out if new space is exhausted. Use result
1696   // to calculate the new top. Object size may be in words so a shift is
1697   // required to get the number of bytes.
1698   if ((flags & SIZE_IN_WORDS) != 0) {
1699     ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
1700     AddP(result_end, result, result_end);
1701   } else {
1702     AddP(result_end, result, object_size);
1703   }
1704   CmpLogicalP(result_end, MemOperand(top_address, limit - top));
1705   bge(gc_required);
1706 
1707   // Update allocation top. result temporarily holds the new top.
1708   if (emit_debug_code()) {
1709     AndP(r0, result_end, Operand(kObjectAlignmentMask));
1710     Check(eq, kUnalignedAllocationInNewSpace, cr0);
1711   }
1712   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1713     // The top pointer is not updated for allocation folding dominators.
1714     StoreP(result_end, MemOperand(top_address));
1715   }
1716 
1717   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1718     // Prefetch the allocation_top's next cache line in advance to
1719     // help alleviate potential cache misses.
1720     // Mode 2 - Prefetch the data into a cache line for store access.
1721     pfd(static_cast<Condition>(2), MemOperand(result, 256));
1722   }
1723 
1724   // Tag object.
1725   la(result, MemOperand(result, kHeapObjectTag));
1726 }
1727 
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)1728 void MacroAssembler::FastAllocate(Register object_size, Register result,
1729                                   Register result_end, Register scratch,
1730                                   AllocationFlags flags) {
1731   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1732   // is not specified. Other registers must not overlap.
1733   DCHECK(!AreAliased(object_size, result, scratch, ip));
1734   DCHECK(!AreAliased(result_end, result, scratch, ip));
1735   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1736 
1737   ExternalReference allocation_top =
1738       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1739 
1740   Register top_address = scratch;
1741   mov(top_address, Operand(allocation_top));
1742   LoadP(result, MemOperand(top_address));
1743 
1744   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1745 // Align the next allocation. Storing the filler map without checking top is
1746 // safe in new-space because the limit of the heap is aligned there.
1747 #if V8_TARGET_ARCH_S390X
1748     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1749 #else
1750     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1751     AndP(result_end, result, Operand(kDoubleAlignmentMask));
1752     Label aligned;
1753     beq(&aligned, Label::kNear);
1754     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1755     StoreW(result_end, MemOperand(result));
1756     AddP(result, result, Operand(kDoubleSize / 2));
1757     bind(&aligned);
1758 #endif
1759   }
1760 
1761   // Calculate new top using result. Object size may be in words so a shift is
1762   // required to get the number of bytes.
1763   if ((flags & SIZE_IN_WORDS) != 0) {
1764     ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
1765     AddP(result_end, result, result_end);
1766   } else {
1767     AddP(result_end, result, object_size);
1768   }
1769 
1770   // Update allocation top. result temporarily holds the new top.
1771   if (emit_debug_code()) {
1772     AndP(r0, result_end, Operand(kObjectAlignmentMask));
1773     Check(eq, kUnalignedAllocationInNewSpace, cr0);
1774   }
1775   StoreP(result_end, MemOperand(top_address));
1776 
1777   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1778     // Prefetch the allocation_top's next cache line in advance to
1779     // help alleviate potential cache misses.
1780     // Mode 2 - Prefetch the data into a cache line for store access.
1781     pfd(static_cast<Condition>(2), MemOperand(result, 256));
1782   }
1783 
1784   // Tag object.
1785   la(result, MemOperand(result, kHeapObjectTag));
1786 }
1787 
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)1788 void MacroAssembler::FastAllocate(int object_size, Register result,
1789                                   Register scratch1, Register scratch2,
1790                                   AllocationFlags flags) {
1791   DCHECK(object_size <= kMaxRegularHeapObjectSize);
1792   DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1793 
1794   // Make object size into bytes.
1795   if ((flags & SIZE_IN_WORDS) != 0) {
1796     object_size *= kPointerSize;
1797   }
1798   DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1799 
1800   ExternalReference allocation_top =
1801       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1802 
1803   // Set up allocation top address register.
1804   Register top_address = scratch1;
1805   Register result_end = scratch2;
1806   mov(top_address, Operand(allocation_top));
1807   LoadP(result, MemOperand(top_address));
1808 
1809   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1810 // Align the next allocation. Storing the filler map without checking top is
1811 // safe in new-space because the limit of the heap is aligned there.
1812 #if V8_TARGET_ARCH_S390X
1813     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1814 #else
1815     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1816     AndP(result_end, result, Operand(kDoubleAlignmentMask));
1817     Label aligned;
1818     beq(&aligned, Label::kNear);
1819     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1820     StoreW(result_end, MemOperand(result));
1821     AddP(result, result, Operand(kDoubleSize / 2));
1822     bind(&aligned);
1823 #endif
1824   }
1825 
1826 #if V8_TARGET_ARCH_S390X
1827   // Limit to 64-bit only, as double alignment check above may adjust
1828   // allocation top by an extra kDoubleSize/2.
1829   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(object_size)) {
1830     // Update allocation top.
1831     AddP(MemOperand(top_address), Operand(object_size));
1832   } else {
1833     // Calculate new top using result.
1834     AddP(result_end, result, Operand(object_size));
1835     // Update allocation top.
1836     StoreP(result_end, MemOperand(top_address));
1837   }
1838 #else
1839   // Calculate new top using result.
1840   AddP(result_end, result, Operand(object_size));
1841   // Update allocation top.
1842   StoreP(result_end, MemOperand(top_address));
1843 #endif
1844 
1845   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1846     // Prefetch the allocation_top's next cache line in advance to
1847     // help alleviate potential cache misses.
1848     // Mode 2 - Prefetch the data into a cache line for store access.
1849     pfd(static_cast<Condition>(2), MemOperand(result, 256));
1850   }
1851 
1852   // Tag object.
1853   la(result, MemOperand(result, kHeapObjectTag));
1854 }
1855 
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1856 void MacroAssembler::CompareObjectType(Register object, Register map,
1857                                        Register type_reg, InstanceType type) {
1858   const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
1859 
1860   LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1861   CompareInstanceType(map, temp, type);
1862 }
1863 
CompareInstanceType(Register map,Register type_reg,InstanceType type)1864 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1865                                          InstanceType type) {
1866   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1867   STATIC_ASSERT(LAST_TYPE < 256);
1868   LoadlB(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1869   CmpP(type_reg, Operand(type));
1870 }
1871 
CompareRoot(Register obj,Heap::RootListIndex index)1872 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1873   CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
1874 }
1875 
SmiToDouble(DoubleRegister value,Register smi)1876 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
1877   SmiUntag(ip, smi);
1878   ConvertIntToDouble(ip, value);
1879 }
1880 
CompareMap(Register obj,Register scratch,Handle<Map> map,Label * early_success)1881 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
1882                                 Label* early_success) {
1883   LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1884   CompareMap(obj, map, early_success);
1885 }
1886 
CompareMap(Register obj_map,Handle<Map> map,Label * early_success)1887 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
1888                                 Label* early_success) {
1889   mov(r0, Operand(map));
1890   CmpP(r0, FieldMemOperand(obj_map, HeapObject::kMapOffset));
1891 }
1892 
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)1893 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
1894                               Label* fail, SmiCheckType smi_check_type) {
1895   if (smi_check_type == DO_SMI_CHECK) {
1896     JumpIfSmi(obj, fail);
1897   }
1898 
1899   Label success;
1900   CompareMap(obj, scratch, map, &success);
1901   bne(fail);
1902   bind(&success);
1903 }
1904 
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)1905 void MacroAssembler::CheckMap(Register obj, Register scratch,
1906                               Heap::RootListIndex index, Label* fail,
1907                               SmiCheckType smi_check_type) {
1908   if (smi_check_type == DO_SMI_CHECK) {
1909     JumpIfSmi(obj, fail);
1910   }
1911   LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1912   CompareRoot(scratch, index);
1913   bne(fail);
1914 }
1915 
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)1916 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
1917                                      Register scratch2, Handle<WeakCell> cell,
1918                                      Handle<Code> success,
1919                                      SmiCheckType smi_check_type) {
1920   Label fail;
1921   if (smi_check_type == DO_SMI_CHECK) {
1922     JumpIfSmi(obj, &fail);
1923   }
1924   LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
1925   CmpWeakValue(scratch1, cell, scratch2);
1926   Jump(success, RelocInfo::CODE_TARGET, eq);
1927   bind(&fail);
1928 }
1929 
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch,CRegister)1930 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
1931                                   Register scratch, CRegister) {
1932   mov(scratch, Operand(cell));
1933   CmpP(value, FieldMemOperand(scratch, WeakCell::kValueOffset));
1934 }
1935 
GetWeakValue(Register value,Handle<WeakCell> cell)1936 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
1937   mov(value, Operand(cell));
1938   LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
1939 }
1940 
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)1941 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
1942                                    Label* miss) {
1943   GetWeakValue(value, cell);
1944   JumpIfSmi(value, miss);
1945 }
1946 
GetMapConstructor(Register result,Register map,Register temp,Register temp2)1947 void MacroAssembler::GetMapConstructor(Register result, Register map,
1948                                        Register temp, Register temp2) {
1949   Label done, loop;
1950   LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
1951   bind(&loop);
1952   JumpIfSmi(result, &done);
1953   CompareObjectType(result, temp, temp2, MAP_TYPE);
1954   bne(&done);
1955   LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
1956   b(&loop);
1957   bind(&done);
1958 }
1959 
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)1960 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
1961                               Condition cond) {
1962   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
1963   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
1964 }
1965 
TailCallStub(CodeStub * stub,Condition cond)1966 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1967   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1968 }
1969 
AllowThisStubCall(CodeStub * stub)1970 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
1971   return has_frame_ || !stub->SometimesSetsUpAFrame();
1972 }
1973 
TestDoubleIsInt32(DoubleRegister double_input,Register scratch1,Register scratch2,DoubleRegister double_scratch)1974 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
1975                                        Register scratch1, Register scratch2,
1976                                        DoubleRegister double_scratch) {
1977   TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
1978 }
1979 
TestDoubleIsMinusZero(DoubleRegister input,Register scratch1,Register scratch2)1980 void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
1981                                            Register scratch1,
1982                                            Register scratch2) {
1983   lgdr(scratch1, input);
1984 #if V8_TARGET_ARCH_S390X
1985   llihf(scratch2, Operand(0x80000000));  // scratch2 = 0x80000000_00000000
1986   CmpP(scratch1, scratch2);
1987 #else
1988   Label done;
1989   CmpP(scratch1, Operand::Zero());
1990   bne(&done, Label::kNear);
1991 
1992   srlg(scratch1, scratch1, Operand(32));
1993   CmpP(scratch1, Operand(HeapNumber::kSignMask));
1994   bind(&done);
1995 #endif
1996 }
1997 
TestDoubleSign(DoubleRegister input,Register scratch)1998 void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
1999   lgdr(scratch, input);
2000   cgfi(scratch, Operand::Zero());
2001 }
2002 
TestHeapNumberSign(Register input,Register scratch)2003 void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
2004   LoadlW(scratch, FieldMemOperand(input, HeapNumber::kValueOffset +
2005                                              Register::kExponentOffset));
2006   Cmp32(scratch, Operand::Zero());
2007 }
2008 
TryDoubleToInt32Exact(Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch)2009 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2010                                            DoubleRegister double_input,
2011                                            Register scratch,
2012                                            DoubleRegister double_scratch) {
2013   Label done;
2014   DCHECK(!double_input.is(double_scratch));
2015 
2016   ConvertDoubleToInt64(double_input,
2017 #if !V8_TARGET_ARCH_S390X
2018                        scratch,
2019 #endif
2020                        result, double_scratch);
2021 
2022 #if V8_TARGET_ARCH_S390X
2023   TestIfInt32(result, r0);
2024 #else
2025   TestIfInt32(scratch, result, r0);
2026 #endif
2027   bne(&done);
2028 
2029   // convert back and compare
2030   lgdr(scratch, double_scratch);
2031   cdfbr(double_scratch, scratch);
2032   cdbr(double_scratch, double_input);
2033   bind(&done);
2034 }
2035 
TryInt32Floor(Register result,DoubleRegister double_input,Register input_high,Register scratch,DoubleRegister double_scratch,Label * done,Label * exact)2036 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2037                                    Register input_high, Register scratch,
2038                                    DoubleRegister double_scratch, Label* done,
2039                                    Label* exact) {
2040   DCHECK(!result.is(input_high));
2041   DCHECK(!double_input.is(double_scratch));
2042   Label exception;
2043 
2044   // Move high word into input_high
2045   lay(sp, MemOperand(sp, -kDoubleSize));
2046   StoreDouble(double_input, MemOperand(sp));
2047   LoadlW(input_high, MemOperand(sp, Register::kExponentOffset));
2048   la(sp, MemOperand(sp, kDoubleSize));
2049 
2050   // Test for NaN/Inf
2051   ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2052   CmpLogicalP(result, Operand(0x7ff));
2053   beq(&exception);
2054 
2055   // Convert (rounding to -Inf)
2056   ConvertDoubleToInt64(double_input,
2057 #if !V8_TARGET_ARCH_S390X
2058                        scratch,
2059 #endif
2060                        result, double_scratch, kRoundToMinusInf);
2061 
2062 // Test for overflow
2063 #if V8_TARGET_ARCH_S390X
2064   TestIfInt32(result, r0);
2065 #else
2066   TestIfInt32(scratch, result, r0);
2067 #endif
2068   bne(&exception);
2069 
2070   // Test for exactness
2071   lgdr(scratch, double_scratch);
2072   cdfbr(double_scratch, scratch);
2073   cdbr(double_scratch, double_input);
2074   beq(exact);
2075   b(done);
2076 
2077   bind(&exception);
2078 }
2079 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2080 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2081                                                 DoubleRegister double_input,
2082                                                 Label* done) {
2083   DoubleRegister double_scratch = kScratchDoubleReg;
2084 #if !V8_TARGET_ARCH_S390X
2085   Register scratch = ip;
2086 #endif
2087 
2088   ConvertDoubleToInt64(double_input,
2089 #if !V8_TARGET_ARCH_S390X
2090                        scratch,
2091 #endif
2092                        result, double_scratch);
2093 
2094 // Test for overflow
2095 #if V8_TARGET_ARCH_S390X
2096   TestIfInt32(result, r0);
2097 #else
2098   TestIfInt32(scratch, result, r0);
2099 #endif
2100   beq(done);
2101 }
2102 
TruncateDoubleToI(Register result,DoubleRegister double_input)2103 void MacroAssembler::TruncateDoubleToI(Register result,
2104                                        DoubleRegister double_input) {
2105   Label done;
2106 
2107   TryInlineTruncateDoubleToI(result, double_input, &done);
2108 
2109   // If we fell through then inline version didn't succeed - call stub instead.
2110   push(r14);
2111   // Put input on stack.
2112   lay(sp, MemOperand(sp, -kDoubleSize));
2113   StoreDouble(double_input, MemOperand(sp));
2114 
2115   DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2116   CallStub(&stub);
2117 
2118   la(sp, MemOperand(sp, kDoubleSize));
2119   pop(r14);
2120 
2121   bind(&done);
2122 }
2123 
TruncateHeapNumberToI(Register result,Register object)2124 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2125   Label done;
2126   DoubleRegister double_scratch = kScratchDoubleReg;
2127   DCHECK(!result.is(object));
2128 
2129   LoadDouble(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2130   TryInlineTruncateDoubleToI(result, double_scratch, &done);
2131 
2132   // If we fell through then inline version didn't succeed - call stub instead.
2133   push(r14);
2134   DoubleToIStub stub(isolate(), object, result,
2135                      HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2136   CallStub(&stub);
2137   pop(r14);
2138 
2139   bind(&done);
2140 }
2141 
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch1,Label * not_number)2142 void MacroAssembler::TruncateNumberToI(Register object, Register result,
2143                                        Register heap_number_map,
2144                                        Register scratch1, Label* not_number) {
2145   Label done;
2146   DCHECK(!result.is(object));
2147 
2148   UntagAndJumpIfSmi(result, object, &done);
2149   JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2150   TruncateHeapNumberToI(result, object);
2151 
2152   bind(&done);
2153 }
2154 
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2155 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2156                                          int num_least_bits) {
2157   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
2158     // We rotate by kSmiShift amount, and extract the num_least_bits
2159     risbg(dst, src, Operand(64 - num_least_bits), Operand(63),
2160           Operand(64 - kSmiShift), true);
2161   } else {
2162     SmiUntag(dst, src);
2163     AndP(dst, Operand((1 << num_least_bits) - 1));
2164   }
2165 }
2166 
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2167 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2168                                            int num_least_bits) {
2169   AndP(dst, src, Operand((1 << num_least_bits) - 1));
2170 }
2171 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2172 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2173                                  SaveFPRegsMode save_doubles) {
2174   // All parameters are on the stack.  r2 has the return value after call.
2175 
2176   // If the expected number of arguments of the runtime function is
2177   // constant, we check that the actual number of arguments match the
2178   // expectation.
2179   CHECK(f->nargs < 0 || f->nargs == num_arguments);
2180 
2181   // TODO(1236192): Most runtime routines don't need the number of
2182   // arguments passed in because it is constant. At some point we
2183   // should remove this need and make the runtime routine entry code
2184   // smarter.
2185   mov(r2, Operand(num_arguments));
2186   mov(r3, Operand(ExternalReference(f, isolate())));
2187   CEntryStub stub(isolate(),
2188 #if V8_TARGET_ARCH_S390X
2189                   f->result_size,
2190 #else
2191                   1,
2192 #endif
2193                   save_doubles);
2194   CallStub(&stub);
2195 }
2196 
CallExternalReference(const ExternalReference & ext,int num_arguments)2197 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2198                                            int num_arguments) {
2199   mov(r2, Operand(num_arguments));
2200   mov(r3, Operand(ext));
2201 
2202   CEntryStub stub(isolate(), 1);
2203   CallStub(&stub);
2204 }
2205 
TailCallRuntime(Runtime::FunctionId fid)2206 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2207   const Runtime::Function* function = Runtime::FunctionForId(fid);
2208   DCHECK_EQ(1, function->result_size);
2209   if (function->nargs >= 0) {
2210     mov(r2, Operand(function->nargs));
2211   }
2212   JumpToExternalReference(ExternalReference(fid, isolate()));
2213 }
2214 
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)2215 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2216                                              bool builtin_exit_frame) {
2217   mov(r3, Operand(builtin));
2218   CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
2219                   builtin_exit_frame);
2220   Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2221 }
2222 
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2223 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2224                                 Register scratch1, Register scratch2) {
2225   if (FLAG_native_code_counters && counter->Enabled()) {
2226     mov(scratch1, Operand(value));
2227     mov(scratch2, Operand(ExternalReference(counter)));
2228     StoreW(scratch1, MemOperand(scratch2));
2229   }
2230 }
2231 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2232 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2233                                       Register scratch1, Register scratch2) {
2234   DCHECK(value > 0 && is_int8(value));
2235   if (FLAG_native_code_counters && counter->Enabled()) {
2236     mov(scratch1, Operand(ExternalReference(counter)));
2237     // @TODO(john.yan): can be optimized by asi()
2238     LoadW(scratch2, MemOperand(scratch1));
2239     AddP(scratch2, Operand(value));
2240     StoreW(scratch2, MemOperand(scratch1));
2241   }
2242 }
2243 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2244 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2245                                       Register scratch1, Register scratch2) {
2246   DCHECK(value > 0 && is_int8(value));
2247   if (FLAG_native_code_counters && counter->Enabled()) {
2248     mov(scratch1, Operand(ExternalReference(counter)));
2249     // @TODO(john.yan): can be optimized by asi()
2250     LoadW(scratch2, MemOperand(scratch1));
2251     AddP(scratch2, Operand(-value));
2252     StoreW(scratch2, MemOperand(scratch1));
2253   }
2254 }
2255 
Assert(Condition cond,BailoutReason reason,CRegister cr)2256 void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2257                             CRegister cr) {
2258   if (emit_debug_code()) Check(cond, reason, cr);
2259 }
2260 
AssertFastElements(Register elements)2261 void MacroAssembler::AssertFastElements(Register elements) {
2262   if (emit_debug_code()) {
2263     DCHECK(!elements.is(r0));
2264     Label ok;
2265     push(elements);
2266     LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2267     CompareRoot(elements, Heap::kFixedArrayMapRootIndex);
2268     beq(&ok, Label::kNear);
2269     CompareRoot(elements, Heap::kFixedDoubleArrayMapRootIndex);
2270     beq(&ok, Label::kNear);
2271     CompareRoot(elements, Heap::kFixedCOWArrayMapRootIndex);
2272     beq(&ok, Label::kNear);
2273     Abort(kJSObjectWithFastElementsMapHasSlowElements);
2274     bind(&ok);
2275     pop(elements);
2276   }
2277 }
2278 
Check(Condition cond,BailoutReason reason,CRegister cr)2279 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2280   Label L;
2281   b(cond, &L);
2282   Abort(reason);
2283   // will not return here
2284   bind(&L);
2285 }
2286 
Abort(BailoutReason reason)2287 void MacroAssembler::Abort(BailoutReason reason) {
2288   Label abort_start;
2289   bind(&abort_start);
2290 #ifdef DEBUG
2291   const char* msg = GetBailoutReason(reason);
2292   if (msg != NULL) {
2293     RecordComment("Abort message: ");
2294     RecordComment(msg);
2295   }
2296 
2297   if (FLAG_trap_on_abort) {
2298     stop(msg);
2299     return;
2300   }
2301 #endif
2302 
2303   // Check if Abort() has already been initialized.
2304   DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
2305 
2306   LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
2307 
2308   // Disable stub call restrictions to always allow calls to abort.
2309   if (!has_frame_) {
2310     // We don't actually want to generate a pile of code for this, so just
2311     // claim there is a stack frame, without generating one.
2312     FrameScope scope(this, StackFrame::NONE);
2313     Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2314   } else {
2315     Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2316   }
2317   // will not return here
2318 }
2319 
LoadContext(Register dst,int context_chain_length)2320 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2321   if (context_chain_length > 0) {
2322     // Move up the chain of contexts to the context containing the slot.
2323     LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2324     for (int i = 1; i < context_chain_length; i++) {
2325       LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2326     }
2327   } else {
2328     // Slot is in the current function context.  Move it into the
2329     // destination register in case we store into it (the write barrier
2330     // cannot be allowed to destroy the context in esi).
2331     LoadRR(dst, cp);
2332   }
2333 }
2334 
LoadNativeContextSlot(int index,Register dst)2335 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2336   LoadP(dst, NativeContextMemOperand());
2337   LoadP(dst, ContextMemOperand(dst, index));
2338 }
2339 
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)2340 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2341                                                   Register map,
2342                                                   Register scratch) {
2343   // Load the initial map. The global functions all have initial maps.
2344   LoadP(map,
2345         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2346   if (emit_debug_code()) {
2347     Label ok, fail;
2348     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2349     b(&ok);
2350     bind(&fail);
2351     Abort(kGlobalFunctionsMustHaveInitialMap);
2352     bind(&ok);
2353   }
2354 }
2355 
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)2356 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2357     Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2358   SubP(scratch, reg, Operand(1));
2359   CmpP(scratch, Operand::Zero());
2360   blt(not_power_of_two_or_zero);
2361   AndP(r0, reg, scratch /*, SetRC*/);  // Should be okay to remove rc
2362   bne(not_power_of_two_or_zero /*, cr0*/);
2363 }
2364 
JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,Register scratch,Label * zero_and_neg,Label * not_power_of_two)2365 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2366                                                      Register scratch,
2367                                                      Label* zero_and_neg,
2368                                                      Label* not_power_of_two) {
2369   SubP(scratch, reg, Operand(1));
2370   CmpP(scratch, Operand::Zero());
2371   blt(zero_and_neg);
2372   AndP(r0, reg, scratch /*, SetRC*/);  // Should be okay to remove rc
2373   bne(not_power_of_two /*, cr0*/);
2374 }
2375 
2376 #if !V8_TARGET_ARCH_S390X
SmiTagCheckOverflow(Register reg,Register overflow)2377 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2378   DCHECK(!reg.is(overflow));
2379   LoadRR(overflow, reg);  // Save original value.
2380   SmiTag(reg);
2381   XorP(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
2382   LoadAndTestRR(overflow, overflow);
2383 }
2384 
SmiTagCheckOverflow(Register dst,Register src,Register overflow)2385 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2386                                          Register overflow) {
2387   if (dst.is(src)) {
2388     // Fall back to slower case.
2389     SmiTagCheckOverflow(dst, overflow);
2390   } else {
2391     DCHECK(!dst.is(src));
2392     DCHECK(!dst.is(overflow));
2393     DCHECK(!src.is(overflow));
2394     SmiTag(dst, src);
2395     XorP(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
2396     LoadAndTestRR(overflow, overflow);
2397   }
2398 }
2399 #endif
2400 
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)2401 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2402                                       Label* on_not_both_smi) {
2403   STATIC_ASSERT(kSmiTag == 0);
2404   OrP(r0, reg1, reg2 /*, LeaveRC*/);  // should be okay to remove LeaveRC
2405   JumpIfNotSmi(r0, on_not_both_smi);
2406 }
2407 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)2408 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2409                                        Label* smi_case) {
2410   STATIC_ASSERT(kSmiTag == 0);
2411   STATIC_ASSERT(kSmiTagSize == 1);
2412   // this won't work if src == dst
2413   DCHECK(src.code() != dst.code());
2414   SmiUntag(dst, src);
2415   TestIfSmi(src);
2416   beq(smi_case);
2417 }
2418 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)2419 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2420                                      Label* on_either_smi) {
2421   STATIC_ASSERT(kSmiTag == 0);
2422   JumpIfSmi(reg1, on_either_smi);
2423   JumpIfSmi(reg2, on_either_smi);
2424 }
2425 
AssertNotNumber(Register object)2426 void MacroAssembler::AssertNotNumber(Register object) {
2427   if (emit_debug_code()) {
2428     STATIC_ASSERT(kSmiTag == 0);
2429     TestIfSmi(object);
2430     Check(ne, kOperandIsANumber, cr0);
2431     push(object);
2432     CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
2433     pop(object);
2434     Check(ne, kOperandIsANumber);
2435   }
2436 }
2437 
AssertNotSmi(Register object)2438 void MacroAssembler::AssertNotSmi(Register object) {
2439   if (emit_debug_code()) {
2440     STATIC_ASSERT(kSmiTag == 0);
2441     TestIfSmi(object);
2442     Check(ne, kOperandIsASmi, cr0);
2443   }
2444 }
2445 
AssertSmi(Register object)2446 void MacroAssembler::AssertSmi(Register object) {
2447   if (emit_debug_code()) {
2448     STATIC_ASSERT(kSmiTag == 0);
2449     TestIfSmi(object);
2450     Check(eq, kOperandIsNotSmi, cr0);
2451   }
2452 }
2453 
AssertString(Register object)2454 void MacroAssembler::AssertString(Register object) {
2455   if (emit_debug_code()) {
2456     STATIC_ASSERT(kSmiTag == 0);
2457     TestIfSmi(object);
2458     Check(ne, kOperandIsASmiAndNotAString, cr0);
2459     push(object);
2460     LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2461     CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2462     pop(object);
2463     Check(lt, kOperandIsNotAString);
2464   }
2465 }
2466 
AssertName(Register object)2467 void MacroAssembler::AssertName(Register object) {
2468   if (emit_debug_code()) {
2469     STATIC_ASSERT(kSmiTag == 0);
2470     TestIfSmi(object);
2471     Check(ne, kOperandIsASmiAndNotAName, cr0);
2472     push(object);
2473     LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2474     CompareInstanceType(object, object, LAST_NAME_TYPE);
2475     pop(object);
2476     Check(le, kOperandIsNotAName);
2477   }
2478 }
2479 
AssertFunction(Register object)2480 void MacroAssembler::AssertFunction(Register object) {
2481   if (emit_debug_code()) {
2482     STATIC_ASSERT(kSmiTag == 0);
2483     TestIfSmi(object);
2484     Check(ne, kOperandIsASmiAndNotAFunction, cr0);
2485     push(object);
2486     CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2487     pop(object);
2488     Check(eq, kOperandIsNotAFunction);
2489   }
2490 }
2491 
AssertBoundFunction(Register object)2492 void MacroAssembler::AssertBoundFunction(Register object) {
2493   if (emit_debug_code()) {
2494     STATIC_ASSERT(kSmiTag == 0);
2495     TestIfSmi(object);
2496     Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
2497     push(object);
2498     CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2499     pop(object);
2500     Check(eq, kOperandIsNotABoundFunction);
2501   }
2502 }
2503 
AssertGeneratorObject(Register object)2504 void MacroAssembler::AssertGeneratorObject(Register object) {
2505   if (emit_debug_code()) {
2506     STATIC_ASSERT(kSmiTag == 0);
2507     TestIfSmi(object);
2508     Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
2509     push(object);
2510     CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
2511     pop(object);
2512     Check(eq, kOperandIsNotAGeneratorObject);
2513   }
2514 }
2515 
AssertReceiver(Register object)2516 void MacroAssembler::AssertReceiver(Register object) {
2517   if (emit_debug_code()) {
2518     STATIC_ASSERT(kSmiTag == 0);
2519     TestIfSmi(object);
2520     Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
2521     push(object);
2522     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2523     CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
2524     pop(object);
2525     Check(ge, kOperandIsNotAReceiver);
2526   }
2527 }
2528 
AssertUndefinedOrAllocationSite(Register object,Register scratch)2529 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2530                                                      Register scratch) {
2531   if (emit_debug_code()) {
2532     Label done_checking;
2533     AssertNotSmi(object);
2534     CompareRoot(object, Heap::kUndefinedValueRootIndex);
2535     beq(&done_checking, Label::kNear);
2536     LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2537     CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2538     Assert(eq, kExpectedUndefinedOrCell);
2539     bind(&done_checking);
2540   }
2541 }
2542 
AssertIsRoot(Register reg,Heap::RootListIndex index)2543 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2544   if (emit_debug_code()) {
2545     CompareRoot(reg, index);
2546     Check(eq, kHeapNumberMapRegisterClobbered);
2547   }
2548 }
2549 
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)2550 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2551                                          Register heap_number_map,
2552                                          Register scratch,
2553                                          Label* on_not_heap_number) {
2554   LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2555   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2556   CmpP(scratch, heap_number_map);
2557   bne(on_not_heap_number);
2558 }
2559 
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2560 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
2561     Register first, Register second, Register scratch1, Register scratch2,
2562     Label* failure) {
2563   // Test that both first and second are sequential one-byte strings.
2564   // Assume that they are non-smis.
2565   LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2566   LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2567   LoadlB(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2568   LoadlB(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2569 
2570   JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
2571                                                  scratch2, failure);
2572 }
2573 
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2574 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
2575                                                            Register second,
2576                                                            Register scratch1,
2577                                                            Register scratch2,
2578                                                            Label* failure) {
2579   // Check that neither is a smi.
2580   AndP(scratch1, first, second);
2581   JumpIfSmi(scratch1, failure);
2582   JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
2583                                                scratch2, failure);
2584 }
2585 
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)2586 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2587                                                      Label* not_unique_name) {
2588   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2589   Label succeed;
2590   AndP(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2591   beq(&succeed, Label::kNear);
2592   CmpP(reg, Operand(SYMBOL_TYPE));
2593   bne(not_unique_name);
2594 
2595   bind(&succeed);
2596 }
2597 
2598 // Allocates a heap number or jumps to the need_gc label if the young space
2599 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required,MutableMode mode)2600 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
2601                                         Register scratch2,
2602                                         Register heap_number_map,
2603                                         Label* gc_required,
2604                                         MutableMode mode) {
2605   // Allocate an object in the heap for the heap number and tag it as a heap
2606   // object.
2607   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
2608            NO_ALLOCATION_FLAGS);
2609 
2610   Heap::RootListIndex map_index = mode == MUTABLE
2611                                       ? Heap::kMutableHeapNumberMapRootIndex
2612                                       : Heap::kHeapNumberMapRootIndex;
2613   AssertIsRoot(heap_number_map, map_index);
2614 
2615   // Store heap number map in the allocated object.
2616     StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2617 }
2618 
AllocateHeapNumberWithValue(Register result,DoubleRegister value,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required)2619 void MacroAssembler::AllocateHeapNumberWithValue(
2620     Register result, DoubleRegister value, Register scratch1, Register scratch2,
2621     Register heap_number_map, Label* gc_required) {
2622   AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2623   StoreDouble(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2624 }
2625 
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)2626 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
2627                                      Register value, Register scratch1,
2628                                      Register scratch2, Label* gc_required) {
2629   DCHECK(!result.is(constructor));
2630   DCHECK(!result.is(scratch1));
2631   DCHECK(!result.is(scratch2));
2632   DCHECK(!result.is(value));
2633 
2634   // Allocate JSValue in new space.
2635   Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
2636            NO_ALLOCATION_FLAGS);
2637 
2638   // Initialize the JSValue.
2639   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
2640   StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
2641   LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
2642   StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
2643   StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
2644   StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
2645   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
2646 }
2647 
InitializeNFieldsWithFiller(Register current_address,Register count,Register filler)2648 void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
2649                                                  Register count,
2650                                                  Register filler) {
2651   Label loop;
2652   bind(&loop);
2653   StoreP(filler, MemOperand(current_address));
2654   AddP(current_address, current_address, Operand(kPointerSize));
2655   BranchOnCount(r1, &loop);
2656 }
2657 
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)2658 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
2659                                                 Register end_address,
2660                                                 Register filler) {
2661   Label done;
2662   DCHECK(!filler.is(r1));
2663   DCHECK(!current_address.is(r1));
2664   DCHECK(!end_address.is(r1));
2665   SubP(r1, end_address, current_address /*, LeaveOE, SetRC*/);
2666   beq(&done, Label::kNear);
2667   ShiftRightP(r1, r1, Operand(kPointerSizeLog2));
2668   InitializeNFieldsWithFiller(current_address, r1, filler);
2669   bind(&done);
2670 }
2671 
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2672 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2673     Register first, Register second, Register scratch1, Register scratch2,
2674     Label* failure) {
2675   const int kFlatOneByteStringMask =
2676       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2677   const int kFlatOneByteStringTag =
2678       kStringTag | kOneByteStringTag | kSeqStringTag;
2679   if (!scratch1.is(first)) LoadRR(scratch1, first);
2680   if (!scratch2.is(second)) LoadRR(scratch2, second);
2681   nilf(scratch1, Operand(kFlatOneByteStringMask));
2682   CmpP(scratch1, Operand(kFlatOneByteStringTag));
2683   bne(failure);
2684   nilf(scratch2, Operand(kFlatOneByteStringMask));
2685   CmpP(scratch2, Operand(kFlatOneByteStringTag));
2686   bne(failure);
2687 }
2688 
2689 static const int kRegisterPassedArguments = 5;
2690 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)2691 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
2692                                               int num_double_arguments) {
2693   int stack_passed_words = 0;
2694   if (num_double_arguments > DoubleRegister::kNumRegisters) {
2695     stack_passed_words +=
2696         2 * (num_double_arguments - DoubleRegister::kNumRegisters);
2697   }
2698   // Up to five simple arguments are passed in registers r2..r6
2699   if (num_reg_arguments > kRegisterPassedArguments) {
2700     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2701   }
2702   return stack_passed_words;
2703 }
2704 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)2705 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
2706                                                Register value,
2707                                                uint32_t encoding_mask) {
2708   Label is_object;
2709   TestIfSmi(string);
2710   Check(ne, kNonObject, cr0);
2711 
2712   LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
2713   LoadlB(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
2714 
2715   AndP(ip, Operand(kStringRepresentationMask | kStringEncodingMask));
2716   CmpP(ip, Operand(encoding_mask));
2717   Check(eq, kUnexpectedStringType);
2718 
2719 // The index is assumed to be untagged coming in, tag it to compare with the
2720 // string length without using a temp register, it is restored at the end of
2721 // this function.
2722 #if !V8_TARGET_ARCH_S390X
2723   Label index_tag_ok, index_tag_bad;
2724   JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
2725 #endif
2726   SmiTag(index, index);
2727 #if !V8_TARGET_ARCH_S390X
2728   b(&index_tag_ok);
2729   bind(&index_tag_bad);
2730   Abort(kIndexIsTooLarge);
2731   bind(&index_tag_ok);
2732 #endif
2733 
2734   LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
2735   CmpP(index, ip);
2736   Check(lt, kIndexIsTooLarge);
2737 
2738   DCHECK(Smi::kZero == 0);
2739   CmpP(index, Operand::Zero());
2740   Check(ge, kIndexIsNegative);
2741 
2742   SmiUntag(index, index);
2743 }
2744 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)2745 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2746                                           int num_double_arguments,
2747                                           Register scratch) {
2748   int frame_alignment = ActivationFrameAlignment();
2749   int stack_passed_arguments =
2750       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2751   int stack_space = kNumRequiredStackFrameSlots;
2752   if (frame_alignment > kPointerSize) {
2753     // Make stack end at alignment and make room for stack arguments
2754     // -- preserving original value of sp.
2755     LoadRR(scratch, sp);
2756     lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
2757     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2758     ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
2759     StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
2760   } else {
2761     stack_space += stack_passed_arguments;
2762   }
2763   lay(sp, MemOperand(sp, -(stack_space)*kPointerSize));
2764 }
2765 
PrepareCallCFunction(int num_reg_arguments,Register scratch)2766 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2767                                           Register scratch) {
2768   PrepareCallCFunction(num_reg_arguments, 0, scratch);
2769 }
2770 
MovToFloatParameter(DoubleRegister src)2771 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
2772 
MovToFloatResult(DoubleRegister src)2773 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
2774 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)2775 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
2776                                           DoubleRegister src2) {
2777   if (src2.is(d0)) {
2778     DCHECK(!src1.is(d2));
2779     Move(d2, src2);
2780     Move(d0, src1);
2781   } else {
2782     Move(d0, src1);
2783     Move(d2, src2);
2784   }
2785 }
2786 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)2787 void MacroAssembler::CallCFunction(ExternalReference function,
2788                                    int num_reg_arguments,
2789                                    int num_double_arguments) {
2790   mov(ip, Operand(function));
2791   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
2792 }
2793 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)2794 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
2795                                    int num_double_arguments) {
2796   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
2797 }
2798 
CallCFunction(ExternalReference function,int num_arguments)2799 void MacroAssembler::CallCFunction(ExternalReference function,
2800                                    int num_arguments) {
2801   CallCFunction(function, num_arguments, 0);
2802 }
2803 
CallCFunction(Register function,int num_arguments)2804 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
2805   CallCFunction(function, num_arguments, 0);
2806 }
2807 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)2808 void MacroAssembler::CallCFunctionHelper(Register function,
2809                                          int num_reg_arguments,
2810                                          int num_double_arguments) {
2811   DCHECK(has_frame());
2812 
2813   // Just call directly. The function called cannot cause a GC, or
2814   // allow preemption, so the return address in the link register
2815   // stays correct.
2816   Register dest = function;
2817   if (ABI_CALL_VIA_IP) {
2818     Move(ip, function);
2819     dest = ip;
2820   }
2821 
2822   Call(dest);
2823 
2824   int stack_passed_arguments =
2825       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2826   int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
2827   if (ActivationFrameAlignment() > kPointerSize) {
2828     // Load the original stack pointer (pre-alignment) from the stack
2829     LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
2830   } else {
2831     la(sp, MemOperand(sp, stack_space * kPointerSize));
2832   }
2833 }
2834 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)2835 void MacroAssembler::CheckPageFlag(
2836     Register object,
2837     Register scratch,  // scratch may be same register as object
2838     int mask, Condition cc, Label* condition_met) {
2839   DCHECK(cc == ne || cc == eq);
2840   ClearRightImm(scratch, object, Operand(kPageSizeBits));
2841 
2842   if (base::bits::IsPowerOfTwo32(mask)) {
2843     // If it's a power of two, we can use Test-Under-Mask Memory-Imm form
2844     // which allows testing of a single byte in memory.
2845     int32_t byte_offset = 4;
2846     uint32_t shifted_mask = mask;
2847     // Determine the byte offset to be tested
2848     if (mask <= 0x80) {
2849       byte_offset = kPointerSize - 1;
2850     } else if (mask < 0x8000) {
2851       byte_offset = kPointerSize - 2;
2852       shifted_mask = mask >> 8;
2853     } else if (mask < 0x800000) {
2854       byte_offset = kPointerSize - 3;
2855       shifted_mask = mask >> 16;
2856     } else {
2857       byte_offset = kPointerSize - 4;
2858       shifted_mask = mask >> 24;
2859     }
2860 #if V8_TARGET_LITTLE_ENDIAN
2861     // Reverse the byte_offset if emulating on little endian platform
2862     byte_offset = kPointerSize - byte_offset - 1;
2863 #endif
2864     tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
2865        Operand(shifted_mask));
2866   } else {
2867     LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2868     AndP(r0, scratch, Operand(mask));
2869   }
2870   // Should be okay to remove rc
2871 
2872   if (cc == ne) {
2873     bne(condition_met);
2874   }
2875   if (cc == eq) {
2876     beq(condition_met);
2877   }
2878 }
2879 
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)2880 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
2881                                  Register scratch1, Label* on_black) {
2882   HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
2883   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
2884 }
2885 
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)2886 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
2887                               Register mask_scratch, Label* has_color,
2888                               int first_bit, int second_bit) {
2889   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
2890 
2891   GetMarkBits(object, bitmap_scratch, mask_scratch);
2892 
2893   Label other_color, word_boundary;
2894   LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
2895   // Test the first bit
2896   AndP(r0, ip, mask_scratch /*, SetRC*/);  // Should be okay to remove rc
2897   b(first_bit == 1 ? eq : ne, &other_color, Label::kNear);
2898   // Shift left 1
2899   // May need to load the next cell
2900   sll(mask_scratch, Operand(1) /*, SetRC*/);
2901   LoadAndTest32(mask_scratch, mask_scratch);
2902   beq(&word_boundary, Label::kNear);
2903   // Test the second bit
2904   AndP(r0, ip, mask_scratch /*, SetRC*/);  // Should be okay to remove rc
2905   b(second_bit == 1 ? ne : eq, has_color);
2906   b(&other_color, Label::kNear);
2907 
2908   bind(&word_boundary);
2909   LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
2910   AndP(r0, ip, Operand(1));
2911   b(second_bit == 1 ? ne : eq, has_color);
2912   bind(&other_color);
2913 }
2914 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)2915 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
2916                                  Register mask_reg) {
2917   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
2918   LoadRR(bitmap_reg, addr_reg);
2919   nilf(bitmap_reg, Operand(~Page::kPageAlignmentMask));
2920   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
2921   ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
2922   ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
2923   ShiftLeftP(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
2924   AddP(bitmap_reg, ip);
2925   LoadRR(ip, mask_reg);  // Have to do some funky reg shuffling as
2926                          // 31-bit shift left clobbers on s390.
2927   LoadImmP(mask_reg, Operand(1));
2928   ShiftLeftP(mask_reg, mask_reg, ip);
2929 }
2930 
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)2931 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
2932                                  Register mask_scratch, Register load_scratch,
2933                                  Label* value_is_white) {
2934   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
2935   GetMarkBits(value, bitmap_scratch, mask_scratch);
2936 
2937   // If the value is black or grey we don't need to do anything.
2938   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
2939   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
2940   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
2941   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
2942 
2943   // Since both black and grey have a 1 in the first position and white does
2944   // not have a 1 there we only need to check one bit.
2945   LoadlW(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
2946   LoadRR(r0, load_scratch);
2947   AndP(r0, mask_scratch);
2948   beq(value_is_white);
2949 }
2950 
2951 // Saturate a value into 8-bit unsigned integer
2952 //   if input_value < 0, output_value is 0
2953 //   if input_value > 255, output_value is 255
2954 //   otherwise output_value is the input_value
ClampUint8(Register output_reg,Register input_reg)2955 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
2956   int satval = (1 << 8) - 1;
2957 
2958   Label done, negative_label, overflow_label;
2959   CmpP(input_reg, Operand::Zero());
2960   blt(&negative_label);
2961 
2962   CmpP(input_reg, Operand(satval));
2963   bgt(&overflow_label);
2964   if (!output_reg.is(input_reg)) {
2965     LoadRR(output_reg, input_reg);
2966   }
2967   b(&done);
2968 
2969   bind(&negative_label);
2970   LoadImmP(output_reg, Operand::Zero());  // set to 0 if negative
2971   b(&done);
2972 
2973   bind(&overflow_label);  // set to satval if > satval
2974   LoadImmP(output_reg, Operand(satval));
2975 
2976   bind(&done);
2977 }
2978 
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister double_scratch)2979 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
2980                                         DoubleRegister input_reg,
2981                                         DoubleRegister double_scratch) {
2982   Label above_zero;
2983   Label done;
2984   Label in_bounds;
2985 
2986   LoadDoubleLiteral(double_scratch, 0.0, result_reg);
2987   cdbr(input_reg, double_scratch);
2988   bgt(&above_zero, Label::kNear);
2989 
2990   // Double value is less than zero, NaN or Inf, return 0.
2991   LoadIntLiteral(result_reg, 0);
2992   b(&done, Label::kNear);
2993 
2994   // Double value is >= 255, return 255.
2995   bind(&above_zero);
2996   LoadDoubleLiteral(double_scratch, 255.0, result_reg);
2997   cdbr(input_reg, double_scratch);
2998   ble(&in_bounds, Label::kNear);
2999   LoadIntLiteral(result_reg, 255);
3000   b(&done, Label::kNear);
3001 
3002   // In 0-255 range, round and truncate.
3003   bind(&in_bounds);
3004 
3005   // round to nearest (default rounding mode)
3006   cfdbr(ROUND_TO_NEAREST_WITH_TIES_TO_EVEN, result_reg, input_reg);
3007   bind(&done);
3008 }
3009 
LoadInstanceDescriptors(Register map,Register descriptors)3010 void MacroAssembler::LoadInstanceDescriptors(Register map,
3011                                              Register descriptors) {
3012   LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3013 }
3014 
NumberOfOwnDescriptors(Register dst,Register map)3015 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3016   LoadlW(dst, FieldMemOperand(map, Map::kBitField3Offset));
3017   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3018 }
3019 
EnumLength(Register dst,Register map)3020 void MacroAssembler::EnumLength(Register dst, Register map) {
3021   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3022   LoadW(dst, FieldMemOperand(map, Map::kBitField3Offset));
3023   And(dst, Operand(Map::EnumLengthBits::kMask));
3024   SmiTag(dst);
3025 }
3026 
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3027 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3028                                   int accessor_index,
3029                                   AccessorComponent accessor) {
3030   LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3031   LoadInstanceDescriptors(dst, dst);
3032   LoadP(dst,
3033         FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3034   const int getterOffset = AccessorPair::kGetterOffset;
3035   const int setterOffset = AccessorPair::kSetterOffset;
3036   int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
3037   LoadP(dst, FieldMemOperand(dst, offset));
3038 }
3039 
CheckEnumCache(Label * call_runtime)3040 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3041   Register null_value = r7;
3042   Register empty_fixed_array_value = r8;
3043   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3044   Label next, start;
3045   LoadRR(r4, r2);
3046 
3047   // Check if the enum length field is properly initialized, indicating that
3048   // there is an enum cache.
3049   LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
3050 
3051   EnumLength(r5, r3);
3052   CmpSmiLiteral(r5, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3053   beq(call_runtime);
3054 
3055   LoadRoot(null_value, Heap::kNullValueRootIndex);
3056   b(&start, Label::kNear);
3057 
3058   bind(&next);
3059   LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
3060 
3061   // For all objects but the receiver, check that the cache is empty.
3062   EnumLength(r5, r3);
3063   CmpSmiLiteral(r5, Smi::kZero, r0);
3064   bne(call_runtime);
3065 
3066   bind(&start);
3067 
3068   // Check that there are no elements. Register r4 contains the current JS
3069   // object we've reached through the prototype chain.
3070   Label no_elements;
3071   LoadP(r4, FieldMemOperand(r4, JSObject::kElementsOffset));
3072   CmpP(r4, empty_fixed_array_value);
3073   beq(&no_elements, Label::kNear);
3074 
3075   // Second chance, the object may be using the empty slow element dictionary.
3076   CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
3077   bne(call_runtime);
3078 
3079   bind(&no_elements);
3080   LoadP(r4, FieldMemOperand(r3, Map::kPrototypeOffset));
3081   CmpP(r4, null_value);
3082   bne(&next);
3083 }
3084 
3085 ////////////////////////////////////////////////////////////////////////////////
3086 //
3087 // New MacroAssembler Interfaces added for S390
3088 //
3089 ////////////////////////////////////////////////////////////////////////////////
3090 // Primarily used for loading constants
3091 // This should really move to be in macro-assembler as it
3092 // is really a pseudo instruction
3093 // Some usages of this intend for a FIXED_SEQUENCE to be used
3094 // @TODO - break this dependency so we can optimize mov() in general
3095 // and only use the generic version when we require a fixed sequence
LoadRepresentation(Register dst,const MemOperand & mem,Representation r,Register scratch)3096 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
3097                                         Representation r, Register scratch) {
3098   DCHECK(!r.IsDouble());
3099   if (r.IsInteger8()) {
3100     LoadB(dst, mem);
3101   } else if (r.IsUInteger8()) {
3102     LoadlB(dst, mem);
3103   } else if (r.IsInteger16()) {
3104     LoadHalfWordP(dst, mem, scratch);
3105   } else if (r.IsUInteger16()) {
3106     LoadHalfWordP(dst, mem, scratch);
3107 #if V8_TARGET_ARCH_S390X
3108   } else if (r.IsInteger32()) {
3109     LoadW(dst, mem, scratch);
3110 #endif
3111   } else {
3112     LoadP(dst, mem, scratch);
3113   }
3114 }
3115 
StoreRepresentation(Register src,const MemOperand & mem,Representation r,Register scratch)3116 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
3117                                          Representation r, Register scratch) {
3118   DCHECK(!r.IsDouble());
3119   if (r.IsInteger8() || r.IsUInteger8()) {
3120     StoreByte(src, mem, scratch);
3121   } else if (r.IsInteger16() || r.IsUInteger16()) {
3122     StoreHalfWord(src, mem, scratch);
3123 #if V8_TARGET_ARCH_S390X
3124   } else if (r.IsInteger32()) {
3125     StoreW(src, mem, scratch);
3126 #endif
3127   } else {
3128     if (r.IsHeapObject()) {
3129       AssertNotSmi(src);
3130     } else if (r.IsSmi()) {
3131       AssertSmi(src);
3132     }
3133     StoreP(src, mem, scratch);
3134   }
3135 }
3136 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Register scratch2_reg,Label * no_memento_found)3137 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
3138                                                      Register scratch_reg,
3139                                                      Register scratch2_reg,
3140                                                      Label* no_memento_found) {
3141   Label map_check;
3142   Label top_check;
3143   ExternalReference new_space_allocation_top_adr =
3144       ExternalReference::new_space_allocation_top_address(isolate());
3145   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
3146   const int kMementoLastWordOffset =
3147       kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
3148 
3149   DCHECK(!AreAliased(receiver_reg, scratch_reg));
3150 
3151   // Bail out if the object is not in new space.
3152   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
3153 
3154   DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3155 
3156   // If the object is in new space, we need to check whether it is on the same
3157   // page as the current top.
3158   AddP(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
3159   mov(ip, Operand(new_space_allocation_top_adr));
3160   LoadP(ip, MemOperand(ip));
3161   XorP(r0, scratch_reg, ip);
3162   AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
3163   beq(&top_check, Label::kNear);
3164   // The object is on a different page than allocation top. Bail out if the
3165   // object sits on the page boundary as no memento can follow and we cannot
3166   // touch the memory following it.
3167   XorP(r0, scratch_reg, receiver_reg);
3168   AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
3169   bne(no_memento_found);
3170   // Continue with the actual map check.
3171   b(&map_check, Label::kNear);
3172   // If top is on the same page as the current object, we need to check whether
3173   // we are below top.
3174   bind(&top_check);
3175   CmpP(scratch_reg, ip);
3176   bge(no_memento_found);
3177   // Memento map check.
3178   bind(&map_check);
3179   LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
3180   CmpP(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
3181 }
3182 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)3183 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
3184                                    Register reg4, Register reg5,
3185                                    Register reg6) {
3186   RegList regs = 0;
3187   if (reg1.is_valid()) regs |= reg1.bit();
3188   if (reg2.is_valid()) regs |= reg2.bit();
3189   if (reg3.is_valid()) regs |= reg3.bit();
3190   if (reg4.is_valid()) regs |= reg4.bit();
3191   if (reg5.is_valid()) regs |= reg5.bit();
3192   if (reg6.is_valid()) regs |= reg6.bit();
3193 
3194   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
3195   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3196     int code = config->GetAllocatableGeneralCode(i);
3197     Register candidate = Register::from_code(code);
3198     if (regs & candidate.bit()) continue;
3199     return candidate;
3200   }
3201   UNREACHABLE();
3202   return no_reg;
3203 }
3204 
mov(Register dst,const Operand & src)3205 void MacroAssembler::mov(Register dst, const Operand& src) {
3206   if (src.rmode_ != kRelocInfo_NONEPTR) {
3207     // some form of relocation needed
3208     RecordRelocInfo(src.rmode_, src.imm_);
3209   }
3210 
3211 #if V8_TARGET_ARCH_S390X
3212   int64_t value = src.immediate();
3213   int32_t hi_32 = static_cast<int64_t>(value) >> 32;
3214   int32_t lo_32 = static_cast<int32_t>(value);
3215 
3216   iihf(dst, Operand(hi_32));
3217   iilf(dst, Operand(lo_32));
3218 #else
3219   int value = src.immediate();
3220   iilf(dst, Operand(value));
3221 #endif
3222 }
3223 
Mul32(Register dst,const MemOperand & src1)3224 void MacroAssembler::Mul32(Register dst, const MemOperand& src1) {
3225   if (is_uint12(src1.offset())) {
3226     ms(dst, src1);
3227   } else if (is_int20(src1.offset())) {
3228     msy(dst, src1);
3229   } else {
3230     UNIMPLEMENTED();
3231   }
3232 }
3233 
Mul32(Register dst,Register src1)3234 void MacroAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
3235 
Mul32(Register dst,const Operand & src1)3236 void MacroAssembler::Mul32(Register dst, const Operand& src1) {
3237   msfi(dst, src1);
3238 }
3239 
3240 #define Generate_MulHigh32(instr) \
3241   {                               \
3242     lgfr(dst, src1);              \
3243     instr(dst, src2);             \
3244     srlg(dst, dst, Operand(32));  \
3245   }
3246 
MulHigh32(Register dst,Register src1,const MemOperand & src2)3247 void MacroAssembler::MulHigh32(Register dst, Register src1,
3248                                const MemOperand& src2) {
3249   Generate_MulHigh32(msgf);
3250 }
3251 
MulHigh32(Register dst,Register src1,Register src2)3252 void MacroAssembler::MulHigh32(Register dst, Register src1, Register src2) {
3253   if (dst.is(src2)) {
3254     std::swap(src1, src2);
3255   }
3256   Generate_MulHigh32(msgfr);
3257 }
3258 
MulHigh32(Register dst,Register src1,const Operand & src2)3259 void MacroAssembler::MulHigh32(Register dst, Register src1,
3260                                const Operand& src2) {
3261   Generate_MulHigh32(msgfi);
3262 }
3263 
3264 #undef Generate_MulHigh32
3265 
3266 #define Generate_MulHighU32(instr) \
3267   {                                \
3268     lr(r1, src1);                  \
3269     instr(r0, src2);               \
3270     LoadlW(dst, r0);               \
3271   }
3272 
MulHighU32(Register dst,Register src1,const MemOperand & src2)3273 void MacroAssembler::MulHighU32(Register dst, Register src1,
3274                                 const MemOperand& src2) {
3275   Generate_MulHighU32(ml);
3276 }
3277 
MulHighU32(Register dst,Register src1,Register src2)3278 void MacroAssembler::MulHighU32(Register dst, Register src1, Register src2) {
3279   Generate_MulHighU32(mlr);
3280 }
3281 
MulHighU32(Register dst,Register src1,const Operand & src2)3282 void MacroAssembler::MulHighU32(Register dst, Register src1,
3283                                 const Operand& src2) {
3284   USE(dst);
3285   USE(src1);
3286   USE(src2);
3287   UNREACHABLE();
3288 }
3289 
3290 #undef Generate_MulHighU32
3291 
3292 #define Generate_Mul32WithOverflowIfCCUnequal(instr) \
3293   {                                                  \
3294     lgfr(dst, src1);                                 \
3295     instr(dst, src2);                                \
3296     cgfr(dst, dst);                                  \
3297   }
3298 
Mul32WithOverflowIfCCUnequal(Register dst,Register src1,const MemOperand & src2)3299 void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
3300                                                   const MemOperand& src2) {
3301   Register result = dst;
3302   if (src2.rx().is(dst) || src2.rb().is(dst)) dst = r0;
3303   Generate_Mul32WithOverflowIfCCUnequal(msgf);
3304   if (!result.is(dst)) llgfr(result, dst);
3305 }
3306 
Mul32WithOverflowIfCCUnequal(Register dst,Register src1,Register src2)3307 void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
3308                                                   Register src2) {
3309   if (dst.is(src2)) {
3310     std::swap(src1, src2);
3311   }
3312   Generate_Mul32WithOverflowIfCCUnequal(msgfr);
3313 }
3314 
Mul32WithOverflowIfCCUnequal(Register dst,Register src1,const Operand & src2)3315 void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
3316                                                   const Operand& src2) {
3317   Generate_Mul32WithOverflowIfCCUnequal(msgfi);
3318 }
3319 
3320 #undef Generate_Mul32WithOverflowIfCCUnequal
3321 
Mul64(Register dst,const MemOperand & src1)3322 void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
3323   if (is_int20(src1.offset())) {
3324     msg(dst, src1);
3325   } else {
3326     UNIMPLEMENTED();
3327   }
3328 }
3329 
Mul64(Register dst,Register src1)3330 void MacroAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
3331 
Mul64(Register dst,const Operand & src1)3332 void MacroAssembler::Mul64(Register dst, const Operand& src1) {
3333   msgfi(dst, src1);
3334 }
3335 
Mul(Register dst,Register src1,Register src2)3336 void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
3337   if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
3338     MulPWithCondition(dst, src1, src2);
3339   } else {
3340     if (dst.is(src2)) {
3341       MulP(dst, src1);
3342     } else if (dst.is(src1)) {
3343       MulP(dst, src2);
3344     } else {
3345       Move(dst, src1);
3346       MulP(dst, src2);
3347     }
3348   }
3349 }
3350 
DivP(Register dividend,Register divider)3351 void MacroAssembler::DivP(Register dividend, Register divider) {
3352   // have to make sure the src and dst are reg pairs
3353   DCHECK(dividend.code() % 2 == 0);
3354 #if V8_TARGET_ARCH_S390X
3355   dsgr(dividend, divider);
3356 #else
3357   dr(dividend, divider);
3358 #endif
3359 }
3360 
3361 #define Generate_Div32(instr) \
3362   {                           \
3363     lgfr(r1, src1);           \
3364     instr(r0, src2);          \
3365     LoadlW(dst, r1);          \
3366   }
3367 
Div32(Register dst,Register src1,const MemOperand & src2)3368 void MacroAssembler::Div32(Register dst, Register src1,
3369                            const MemOperand& src2) {
3370   Generate_Div32(dsgf);
3371 }
3372 
Div32(Register dst,Register src1,Register src2)3373 void MacroAssembler::Div32(Register dst, Register src1, Register src2) {
3374   Generate_Div32(dsgfr);
3375 }
3376 
Div32(Register dst,Register src1,const Operand & src2)3377 void MacroAssembler::Div32(Register dst, Register src1, const Operand& src2) {
3378   USE(dst);
3379   USE(src1);
3380   USE(src2);
3381   UNREACHABLE();
3382 }
3383 
3384 #undef Generate_Div32
3385 
3386 #define Generate_DivU32(instr) \
3387   {                            \
3388     lr(r0, src1);              \
3389     srdl(r0, Operand(32));     \
3390     instr(r0, src2);           \
3391     LoadlW(dst, r1);           \
3392   }
3393 
DivU32(Register dst,Register src1,const MemOperand & src2)3394 void MacroAssembler::DivU32(Register dst, Register src1,
3395                             const MemOperand& src2) {
3396   Generate_DivU32(dl);
3397 }
3398 
DivU32(Register dst,Register src1,Register src2)3399 void MacroAssembler::DivU32(Register dst, Register src1, Register src2) {
3400   Generate_DivU32(dlr);
3401 }
3402 
DivU32(Register dst,Register src1,const Operand & src2)3403 void MacroAssembler::DivU32(Register dst, Register src1, const Operand& src2) {
3404   USE(dst);
3405   USE(src1);
3406   USE(src2);
3407   UNREACHABLE();
3408 }
3409 
3410 #undef Generate_DivU32
3411 
3412 #define Generate_Mod32(instr) \
3413   {                           \
3414     lgfr(r1, src1);           \
3415     instr(r0, src2);          \
3416     LoadlW(dst, r0);          \
3417   }
3418 
Mod32(Register dst,Register src1,const MemOperand & src2)3419 void MacroAssembler::Mod32(Register dst, Register src1,
3420                            const MemOperand& src2) {
3421   Generate_Mod32(dsgf);
3422 }
3423 
Mod32(Register dst,Register src1,Register src2)3424 void MacroAssembler::Mod32(Register dst, Register src1, Register src2) {
3425   Generate_Mod32(dsgfr);
3426 }
3427 
Mod32(Register dst,Register src1,const Operand & src2)3428 void MacroAssembler::Mod32(Register dst, Register src1, const Operand& src2) {
3429   USE(dst);
3430   USE(src1);
3431   USE(src2);
3432   UNREACHABLE();
3433 }
3434 
3435 #undef Generate_Mod32
3436 
3437 #define Generate_ModU32(instr) \
3438   {                            \
3439     lr(r0, src1);              \
3440     srdl(r0, Operand(32));     \
3441     instr(r0, src2);           \
3442     LoadlW(dst, r0);           \
3443   }
3444 
ModU32(Register dst,Register src1,const MemOperand & src2)3445 void MacroAssembler::ModU32(Register dst, Register src1,
3446                             const MemOperand& src2) {
3447   Generate_ModU32(dl);
3448 }
3449 
ModU32(Register dst,Register src1,Register src2)3450 void MacroAssembler::ModU32(Register dst, Register src1, Register src2) {
3451   Generate_ModU32(dlr);
3452 }
3453 
ModU32(Register dst,Register src1,const Operand & src2)3454 void MacroAssembler::ModU32(Register dst, Register src1, const Operand& src2) {
3455   USE(dst);
3456   USE(src1);
3457   USE(src2);
3458   UNREACHABLE();
3459 }
3460 
3461 #undef Generate_ModU32
3462 
MulP(Register dst,const Operand & opnd)3463 void MacroAssembler::MulP(Register dst, const Operand& opnd) {
3464 #if V8_TARGET_ARCH_S390X
3465   msgfi(dst, opnd);
3466 #else
3467   msfi(dst, opnd);
3468 #endif
3469 }
3470 
MulP(Register dst,Register src)3471 void MacroAssembler::MulP(Register dst, Register src) {
3472 #if V8_TARGET_ARCH_S390X
3473   msgr(dst, src);
3474 #else
3475   msr(dst, src);
3476 #endif
3477 }
3478 
MulPWithCondition(Register dst,Register src1,Register src2)3479 void MacroAssembler::MulPWithCondition(Register dst, Register src1,
3480                                        Register src2) {
3481   CHECK(CpuFeatures::IsSupported(MISC_INSTR_EXT2));
3482 #if V8_TARGET_ARCH_S390X
3483   msgrkc(dst, src1, src2);
3484 #else
3485   msrkc(dst, src1, src2);
3486 #endif
3487 }
3488 
MulP(Register dst,const MemOperand & opnd)3489 void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
3490 #if V8_TARGET_ARCH_S390X
3491   if (is_uint16(opnd.offset())) {
3492     ms(dst, opnd);
3493   } else if (is_int20(opnd.offset())) {
3494     msy(dst, opnd);
3495   } else {
3496     UNIMPLEMENTED();
3497   }
3498 #else
3499   if (is_int20(opnd.offset())) {
3500     msg(dst, opnd);
3501   } else {
3502     UNIMPLEMENTED();
3503   }
3504 #endif
3505 }
3506 
Sqrt(DoubleRegister result,DoubleRegister input)3507 void MacroAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
3508   sqdbr(result, input);
3509 }
Sqrt(DoubleRegister result,const MemOperand & input)3510 void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
3511   if (is_uint12(input.offset())) {
3512     sqdb(result, input);
3513   } else {
3514     ldy(result, input);
3515     sqdbr(result, result);
3516   }
3517 }
3518 //----------------------------------------------------------------------------
3519 //  Add Instructions
3520 //----------------------------------------------------------------------------
3521 
3522 // Add 32-bit (Register dst = Register dst + Immediate opnd)
Add32(Register dst,const Operand & opnd)3523 void MacroAssembler::Add32(Register dst, const Operand& opnd) {
3524   if (is_int16(opnd.immediate()))
3525     ahi(dst, opnd);
3526   else
3527     afi(dst, opnd);
3528 }
3529 
3530 // Add 32-bit (Register dst = Register dst + Immediate opnd)
Add32_RI(Register dst,const Operand & opnd)3531 void MacroAssembler::Add32_RI(Register dst, const Operand& opnd) {
3532   // Just a wrapper for above
3533   Add32(dst, opnd);
3534 }
3535 
3536 // Add Pointer Size (Register dst = Register dst + Immediate opnd)
AddP(Register dst,const Operand & opnd)3537 void MacroAssembler::AddP(Register dst, const Operand& opnd) {
3538 #if V8_TARGET_ARCH_S390X
3539   if (is_int16(opnd.immediate()))
3540     aghi(dst, opnd);
3541   else
3542     agfi(dst, opnd);
3543 #else
3544   Add32(dst, opnd);
3545 #endif
3546 }
3547 
3548 // Add 32-bit (Register dst = Register src + Immediate opnd)
Add32(Register dst,Register src,const Operand & opnd)3549 void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
3550   if (!dst.is(src)) {
3551     if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
3552       ahik(dst, src, opnd);
3553       return;
3554     }
3555     lr(dst, src);
3556   }
3557   Add32(dst, opnd);
3558 }
3559 
3560 // Add 32-bit (Register dst = Register src + Immediate opnd)
Add32_RRI(Register dst,Register src,const Operand & opnd)3561 void MacroAssembler::Add32_RRI(Register dst, Register src,
3562                                const Operand& opnd) {
3563   // Just a wrapper for above
3564   Add32(dst, src, opnd);
3565 }
3566 
3567 // Add Pointer Size (Register dst = Register src + Immediate opnd)
AddP(Register dst,Register src,const Operand & opnd)3568 void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
3569   if (!dst.is(src)) {
3570     if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
3571       AddPImm_RRI(dst, src, opnd);
3572       return;
3573     }
3574     LoadRR(dst, src);
3575   }
3576   AddP(dst, opnd);
3577 }
3578 
3579 // Add 32-bit (Register dst = Register dst + Register src)
Add32(Register dst,Register src)3580 void MacroAssembler::Add32(Register dst, Register src) { ar(dst, src); }
3581 
3582 // Add Pointer Size (Register dst = Register dst + Register src)
AddP(Register dst,Register src)3583 void MacroAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
3584 
3585 // Add Pointer Size with src extension
3586 //     (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
3587 // src is treated as a 32-bit signed integer, which is sign extended to
3588 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,Register src)3589 void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) {
3590 #if V8_TARGET_ARCH_S390X
3591   agfr(dst, src);
3592 #else
3593   ar(dst, src);
3594 #endif
3595 }
3596 
3597 // Add 32-bit (Register dst = Register src1 + Register src2)
Add32(Register dst,Register src1,Register src2)3598 void MacroAssembler::Add32(Register dst, Register src1, Register src2) {
3599   if (!dst.is(src1) && !dst.is(src2)) {
3600     // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
3601     // as AR is a smaller instruction
3602     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3603       ark(dst, src1, src2);
3604       return;
3605     } else {
3606       lr(dst, src1);
3607     }
3608   } else if (dst.is(src2)) {
3609     src2 = src1;
3610   }
3611   ar(dst, src2);
3612 }
3613 
3614 // Add Pointer Size (Register dst = Register src1 + Register src2)
AddP(Register dst,Register src1,Register src2)3615 void MacroAssembler::AddP(Register dst, Register src1, Register src2) {
3616   if (!dst.is(src1) && !dst.is(src2)) {
3617     // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
3618     // as AR is a smaller instruction
3619     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3620       AddP_RRR(dst, src1, src2);
3621       return;
3622     } else {
3623       LoadRR(dst, src1);
3624     }
3625   } else if (dst.is(src2)) {
3626     src2 = src1;
3627   }
3628   AddRR(dst, src2);
3629 }
3630 
3631 // Add Pointer Size with src extension
3632 //      (Register dst (ptr) = Register dst (ptr) + Register src1 (ptr) +
3633 //                            Register src2 (32 | 32->64))
3634 // src is treated as a 32-bit signed integer, which is sign extended to
3635 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,Register src1,Register src2)3636 void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1,
3637                                     Register src2) {
3638 #if V8_TARGET_ARCH_S390X
3639   if (dst.is(src2)) {
3640     // The source we need to sign extend is the same as result.
3641     lgfr(dst, src2);
3642     agr(dst, src1);
3643   } else {
3644     if (!dst.is(src1)) LoadRR(dst, src1);
3645     agfr(dst, src2);
3646   }
3647 #else
3648   AddP(dst, src1, src2);
3649 #endif
3650 }
3651 
3652 // Add 32-bit (Register-Memory)
Add32(Register dst,const MemOperand & opnd)3653 void MacroAssembler::Add32(Register dst, const MemOperand& opnd) {
3654   DCHECK(is_int20(opnd.offset()));
3655   if (is_uint12(opnd.offset()))
3656     a(dst, opnd);
3657   else
3658     ay(dst, opnd);
3659 }
3660 
3661 // Add Pointer Size (Register-Memory)
AddP(Register dst,const MemOperand & opnd)3662 void MacroAssembler::AddP(Register dst, const MemOperand& opnd) {
3663 #if V8_TARGET_ARCH_S390X
3664   DCHECK(is_int20(opnd.offset()));
3665   ag(dst, opnd);
3666 #else
3667   Add32(dst, opnd);
3668 #endif
3669 }
3670 
3671 // Add Pointer Size with src extension
3672 //      (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64))
3673 // src is treated as a 32-bit signed integer, which is sign extended to
3674 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,const MemOperand & opnd)3675 void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
3676 #if V8_TARGET_ARCH_S390X
3677   DCHECK(is_int20(opnd.offset()));
3678   agf(dst, opnd);
3679 #else
3680   Add32(dst, opnd);
3681 #endif
3682 }
3683 
3684 // Add 32-bit (Memory - Immediate)
Add32(const MemOperand & opnd,const Operand & imm)3685 void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
3686   DCHECK(is_int8(imm.immediate()));
3687   DCHECK(is_int20(opnd.offset()));
3688   DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
3689   asi(opnd, imm);
3690 }
3691 
3692 // Add Pointer-sized (Memory - Immediate)
AddP(const MemOperand & opnd,const Operand & imm)3693 void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
3694   DCHECK(is_int8(imm.immediate()));
3695   DCHECK(is_int20(opnd.offset()));
3696   DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
3697 #if V8_TARGET_ARCH_S390X
3698   agsi(opnd, imm);
3699 #else
3700   asi(opnd, imm);
3701 #endif
3702 }
3703 
3704 //----------------------------------------------------------------------------
3705 //  Add Logical Instructions
3706 //----------------------------------------------------------------------------
3707 
3708 // Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
AddLogicalWithCarry32(Register dst,Register src1,Register src2)3709 void MacroAssembler::AddLogicalWithCarry32(Register dst, Register src1,
3710                                            Register src2) {
3711   if (!dst.is(src2) && !dst.is(src1)) {
3712     lr(dst, src1);
3713     alcr(dst, src2);
3714   } else if (!dst.is(src2)) {
3715     // dst == src1
3716     DCHECK(dst.is(src1));
3717     alcr(dst, src2);
3718   } else {
3719     // dst == src2
3720     DCHECK(dst.is(src2));
3721     alcr(dst, src1);
3722   }
3723 }
3724 
3725 // Add Logical 32-bit (Register dst = Register src1 + Register src2)
AddLogical32(Register dst,Register src1,Register src2)3726 void MacroAssembler::AddLogical32(Register dst, Register src1, Register src2) {
3727   if (!dst.is(src2) && !dst.is(src1)) {
3728     lr(dst, src1);
3729     alr(dst, src2);
3730   } else if (!dst.is(src2)) {
3731     // dst == src1
3732     DCHECK(dst.is(src1));
3733     alr(dst, src2);
3734   } else {
3735     // dst == src2
3736     DCHECK(dst.is(src2));
3737     alr(dst, src1);
3738   }
3739 }
3740 
3741 // Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
AddLogical(Register dst,const Operand & imm)3742 void MacroAssembler::AddLogical(Register dst, const Operand& imm) {
3743   alfi(dst, imm);
3744 }
3745 
3746 // Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
AddLogicalP(Register dst,const Operand & imm)3747 void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) {
3748 #ifdef V8_TARGET_ARCH_S390X
3749   algfi(dst, imm);
3750 #else
3751   AddLogical(dst, imm);
3752 #endif
3753 }
3754 
3755 // Add Logical 32-bit (Register-Memory)
AddLogical(Register dst,const MemOperand & opnd)3756 void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) {
3757   DCHECK(is_int20(opnd.offset()));
3758   if (is_uint12(opnd.offset()))
3759     al_z(dst, opnd);
3760   else
3761     aly(dst, opnd);
3762 }
3763 
3764 // Add Logical Pointer Size (Register-Memory)
AddLogicalP(Register dst,const MemOperand & opnd)3765 void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
3766 #if V8_TARGET_ARCH_S390X
3767   DCHECK(is_int20(opnd.offset()));
3768   alg(dst, opnd);
3769 #else
3770   AddLogical(dst, opnd);
3771 #endif
3772 }
3773 
3774 //----------------------------------------------------------------------------
3775 //  Subtract Instructions
3776 //----------------------------------------------------------------------------
3777 
3778 // Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
3779 // src2)
SubLogicalWithBorrow32(Register dst,Register src1,Register src2)3780 void MacroAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
3781                                             Register src2) {
3782   if (!dst.is(src2) && !dst.is(src1)) {
3783     lr(dst, src1);
3784     slbr(dst, src2);
3785   } else if (!dst.is(src2)) {
3786     // dst == src1
3787     DCHECK(dst.is(src1));
3788     slbr(dst, src2);
3789   } else {
3790     // dst == src2
3791     DCHECK(dst.is(src2));
3792     lr(r0, dst);
3793     SubLogicalWithBorrow32(dst, src1, r0);
3794   }
3795 }
3796 
3797 // Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
SubLogical32(Register dst,Register src1,Register src2)3798 void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
3799   if (!dst.is(src2) && !dst.is(src1)) {
3800     lr(dst, src1);
3801     slr(dst, src2);
3802   } else if (!dst.is(src2)) {
3803     // dst == src1
3804     DCHECK(dst.is(src1));
3805     slr(dst, src2);
3806   } else {
3807     // dst == src2
3808     DCHECK(dst.is(src2));
3809     lr(r0, dst);
3810     SubLogical32(dst, src1, r0);
3811   }
3812 }
3813 
3814 // Subtract 32-bit (Register dst = Register dst - Immediate opnd)
Sub32(Register dst,const Operand & imm)3815 void MacroAssembler::Sub32(Register dst, const Operand& imm) {
3816   Add32(dst, Operand(-(imm.imm_)));
3817 }
3818 
3819 // Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
SubP(Register dst,const Operand & imm)3820 void MacroAssembler::SubP(Register dst, const Operand& imm) {
3821   AddP(dst, Operand(-(imm.imm_)));
3822 }
3823 
3824 // Subtract 32-bit (Register dst = Register src - Immediate opnd)
Sub32(Register dst,Register src,const Operand & imm)3825 void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) {
3826   Add32(dst, src, Operand(-(imm.imm_)));
3827 }
3828 
3829 // Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
SubP(Register dst,Register src,const Operand & imm)3830 void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) {
3831   AddP(dst, src, Operand(-(imm.imm_)));
3832 }
3833 
3834 // Subtract 32-bit (Register dst = Register dst - Register src)
Sub32(Register dst,Register src)3835 void MacroAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
3836 
3837 // Subtract Pointer Size (Register dst = Register dst - Register src)
SubP(Register dst,Register src)3838 void MacroAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
3839 
3840 // Subtract Pointer Size with src extension
3841 //     (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
3842 // src is treated as a 32-bit signed integer, which is sign extended to
3843 // 64-bit if necessary.
SubP_ExtendSrc(Register dst,Register src)3844 void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
3845 #if V8_TARGET_ARCH_S390X
3846   sgfr(dst, src);
3847 #else
3848   sr(dst, src);
3849 #endif
3850 }
3851 
3852 // Subtract 32-bit (Register = Register - Register)
Sub32(Register dst,Register src1,Register src2)3853 void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
3854   // Use non-clobbering version if possible
3855   if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3856     srk(dst, src1, src2);
3857     return;
3858   }
3859   if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1);
3860   // In scenario where we have dst = src - dst, we need to swap and negate
3861   if (!dst.is(src1) && dst.is(src2)) {
3862     Label done;
3863     lcr(dst, dst);  // dst = -dst
3864     b(overflow, &done);
3865     ar(dst, src1);  // dst = dst + src
3866     bind(&done);
3867   } else {
3868     sr(dst, src2);
3869   }
3870 }
3871 
3872 // Subtract Pointer Sized (Register = Register - Register)
SubP(Register dst,Register src1,Register src2)3873 void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
3874   // Use non-clobbering version if possible
3875   if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3876     SubP_RRR(dst, src1, src2);
3877     return;
3878   }
3879   if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
3880   // In scenario where we have dst = src - dst, we need to swap and negate
3881   if (!dst.is(src1) && dst.is(src2)) {
3882     Label done;
3883     LoadComplementRR(dst, dst);  // dst = -dst
3884     b(overflow, &done);
3885     AddP(dst, src1);  // dst = dst + src
3886     bind(&done);
3887   } else {
3888     SubP(dst, src2);
3889   }
3890 }
3891 
3892 // Subtract Pointer Size with src extension
3893 //     (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
3894 // src is treated as a 32-bit signed integer, which is sign extended to
3895 // 64-bit if necessary.
SubP_ExtendSrc(Register dst,Register src1,Register src2)3896 void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
3897                                     Register src2) {
3898 #if V8_TARGET_ARCH_S390X
3899   if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
3900 
3901   // In scenario where we have dst = src - dst, we need to swap and negate
3902   if (!dst.is(src1) && dst.is(src2)) {
3903     lgfr(dst, dst);              // Sign extend this operand first.
3904     LoadComplementRR(dst, dst);  // dst = -dst
3905     AddP(dst, src1);             // dst = -dst + src
3906   } else {
3907     sgfr(dst, src2);
3908   }
3909 #else
3910   SubP(dst, src1, src2);
3911 #endif
3912 }
3913 
3914 // Subtract 32-bit (Register-Memory)
Sub32(Register dst,const MemOperand & opnd)3915 void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) {
3916   DCHECK(is_int20(opnd.offset()));
3917   if (is_uint12(opnd.offset()))
3918     s(dst, opnd);
3919   else
3920     sy(dst, opnd);
3921 }
3922 
3923 // Subtract Pointer Sized (Register - Memory)
SubP(Register dst,const MemOperand & opnd)3924 void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
3925 #if V8_TARGET_ARCH_S390X
3926   sg(dst, opnd);
3927 #else
3928   Sub32(dst, opnd);
3929 #endif
3930 }
3931 
MovIntToFloat(DoubleRegister dst,Register src)3932 void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
3933   sllg(r0, src, Operand(32));
3934   ldgr(dst, r0);
3935 }
3936 
MovFloatToInt(Register dst,DoubleRegister src)3937 void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
3938   lgdr(dst, src);
3939   srlg(dst, dst, Operand(32));
3940 }
3941 
SubP_ExtendSrc(Register dst,const MemOperand & opnd)3942 void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
3943 #if V8_TARGET_ARCH_S390X
3944   DCHECK(is_int20(opnd.offset()));
3945   sgf(dst, opnd);
3946 #else
3947   Sub32(dst, opnd);
3948 #endif
3949 }
3950 
3951 //----------------------------------------------------------------------------
3952 //  Subtract Logical Instructions
3953 //----------------------------------------------------------------------------
3954 
3955 // Subtract Logical 32-bit (Register - Memory)
SubLogical(Register dst,const MemOperand & opnd)3956 void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) {
3957   DCHECK(is_int20(opnd.offset()));
3958   if (is_uint12(opnd.offset()))
3959     sl(dst, opnd);
3960   else
3961     sly(dst, opnd);
3962 }
3963 
3964 // Subtract Logical Pointer Sized (Register - Memory)
SubLogicalP(Register dst,const MemOperand & opnd)3965 void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
3966   DCHECK(is_int20(opnd.offset()));
3967 #if V8_TARGET_ARCH_S390X
3968   slgf(dst, opnd);
3969 #else
3970   SubLogical(dst, opnd);
3971 #endif
3972 }
3973 
3974 // Subtract Logical Pointer Size with src extension
3975 //      (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64))
3976 // src is treated as a 32-bit signed integer, which is sign extended to
3977 // 64-bit if necessary.
SubLogicalP_ExtendSrc(Register dst,const MemOperand & opnd)3978 void MacroAssembler::SubLogicalP_ExtendSrc(Register dst,
3979                                            const MemOperand& opnd) {
3980 #if V8_TARGET_ARCH_S390X
3981   DCHECK(is_int20(opnd.offset()));
3982   slgf(dst, opnd);
3983 #else
3984   SubLogical(dst, opnd);
3985 #endif
3986 }
3987 
3988 //----------------------------------------------------------------------------
3989 //  Bitwise Operations
3990 //----------------------------------------------------------------------------
3991 
3992 // AND 32-bit - dst = dst & src
And(Register dst,Register src)3993 void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
3994 
3995 // AND Pointer Size - dst = dst & src
AndP(Register dst,Register src)3996 void MacroAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
3997 
3998 // Non-clobbering AND 32-bit - dst = src1 & src1
And(Register dst,Register src1,Register src2)3999 void MacroAssembler::And(Register dst, Register src1, Register src2) {
4000   if (!dst.is(src1) && !dst.is(src2)) {
4001     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4002     // as XR is a smaller instruction
4003     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4004       nrk(dst, src1, src2);
4005       return;
4006     } else {
4007       lr(dst, src1);
4008     }
4009   } else if (dst.is(src2)) {
4010     src2 = src1;
4011   }
4012   And(dst, src2);
4013 }
4014 
4015 // Non-clobbering AND pointer size - dst = src1 & src1
AndP(Register dst,Register src1,Register src2)4016 void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
4017   if (!dst.is(src1) && !dst.is(src2)) {
4018     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4019     // as XR is a smaller instruction
4020     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4021       AndP_RRR(dst, src1, src2);
4022       return;
4023     } else {
4024       LoadRR(dst, src1);
4025     }
4026   } else if (dst.is(src2)) {
4027     src2 = src1;
4028   }
4029   AndP(dst, src2);
4030 }
4031 
4032 // AND 32-bit (Reg - Mem)
And(Register dst,const MemOperand & opnd)4033 void MacroAssembler::And(Register dst, const MemOperand& opnd) {
4034   DCHECK(is_int20(opnd.offset()));
4035   if (is_uint12(opnd.offset()))
4036     n(dst, opnd);
4037   else
4038     ny(dst, opnd);
4039 }
4040 
4041 // AND Pointer Size (Reg - Mem)
AndP(Register dst,const MemOperand & opnd)4042 void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
4043   DCHECK(is_int20(opnd.offset()));
4044 #if V8_TARGET_ARCH_S390X
4045   ng(dst, opnd);
4046 #else
4047   And(dst, opnd);
4048 #endif
4049 }
4050 
4051 // AND 32-bit - dst = dst & imm
And(Register dst,const Operand & opnd)4052 void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
4053 
4054 // AND Pointer Size - dst = dst & imm
AndP(Register dst,const Operand & opnd)4055 void MacroAssembler::AndP(Register dst, const Operand& opnd) {
4056 #if V8_TARGET_ARCH_S390X
4057   intptr_t value = opnd.imm_;
4058   if (value >> 32 != -1) {
4059     // this may not work b/c condition code won't be set correctly
4060     nihf(dst, Operand(value >> 32));
4061   }
4062   nilf(dst, Operand(value & 0xFFFFFFFF));
4063 #else
4064   And(dst, opnd);
4065 #endif
4066 }
4067 
4068 // AND 32-bit - dst = src & imm
And(Register dst,Register src,const Operand & opnd)4069 void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
4070   if (!dst.is(src)) lr(dst, src);
4071   nilf(dst, opnd);
4072 }
4073 
4074 // AND Pointer Size - dst = src & imm
AndP(Register dst,Register src,const Operand & opnd)4075 void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
4076   // Try to exploit RISBG first
4077   intptr_t value = opnd.imm_;
4078   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
4079     intptr_t shifted_value = value;
4080     int trailing_zeros = 0;
4081 
4082     // We start checking how many trailing zeros are left at the end.
4083     while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
4084       trailing_zeros++;
4085       shifted_value >>= 1;
4086     }
4087 
4088     // If temp (value with right-most set of zeros shifted out) is 1 less
4089     // than power of 2, we have consecutive bits of 1.
4090     // Special case: If shift_value is zero, we cannot use RISBG, as it requires
4091     //               selection of at least 1 bit.
4092     if ((0 != shifted_value) && base::bits::IsPowerOfTwo64(shifted_value + 1)) {
4093       int startBit =
4094           base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
4095       int endBit = 63 - trailing_zeros;
4096       // Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
4097       risbg(dst, src, Operand(startBit), Operand(endBit), Operand::Zero(),
4098             true);
4099       return;
4100     } else if (-1 == shifted_value) {
4101       // A Special case in which all top bits up to MSB are 1's.  In this case,
4102       // we can set startBit to be 0.
4103       int endBit = 63 - trailing_zeros;
4104       risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
4105       return;
4106     }
4107   }
4108 
4109   // If we are &'ing zero, we can just whack the dst register and skip copy
4110   if (!dst.is(src) && (0 != value)) LoadRR(dst, src);
4111   AndP(dst, opnd);
4112 }
4113 
4114 // OR 32-bit - dst = dst & src
Or(Register dst,Register src)4115 void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
4116 
4117 // OR Pointer Size - dst = dst & src
OrP(Register dst,Register src)4118 void MacroAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
4119 
4120 // Non-clobbering OR 32-bit - dst = src1 & src1
Or(Register dst,Register src1,Register src2)4121 void MacroAssembler::Or(Register dst, Register src1, Register src2) {
4122   if (!dst.is(src1) && !dst.is(src2)) {
4123     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4124     // as XR is a smaller instruction
4125     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4126       ork(dst, src1, src2);
4127       return;
4128     } else {
4129       lr(dst, src1);
4130     }
4131   } else if (dst.is(src2)) {
4132     src2 = src1;
4133   }
4134   Or(dst, src2);
4135 }
4136 
4137 // Non-clobbering OR pointer size - dst = src1 & src1
OrP(Register dst,Register src1,Register src2)4138 void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
4139   if (!dst.is(src1) && !dst.is(src2)) {
4140     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4141     // as XR is a smaller instruction
4142     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4143       OrP_RRR(dst, src1, src2);
4144       return;
4145     } else {
4146       LoadRR(dst, src1);
4147     }
4148   } else if (dst.is(src2)) {
4149     src2 = src1;
4150   }
4151   OrP(dst, src2);
4152 }
4153 
4154 // OR 32-bit (Reg - Mem)
Or(Register dst,const MemOperand & opnd)4155 void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
4156   DCHECK(is_int20(opnd.offset()));
4157   if (is_uint12(opnd.offset()))
4158     o(dst, opnd);
4159   else
4160     oy(dst, opnd);
4161 }
4162 
4163 // OR Pointer Size (Reg - Mem)
OrP(Register dst,const MemOperand & opnd)4164 void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
4165   DCHECK(is_int20(opnd.offset()));
4166 #if V8_TARGET_ARCH_S390X
4167   og(dst, opnd);
4168 #else
4169   Or(dst, opnd);
4170 #endif
4171 }
4172 
4173 // OR 32-bit - dst = dst & imm
Or(Register dst,const Operand & opnd)4174 void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
4175 
4176 // OR Pointer Size - dst = dst & imm
OrP(Register dst,const Operand & opnd)4177 void MacroAssembler::OrP(Register dst, const Operand& opnd) {
4178 #if V8_TARGET_ARCH_S390X
4179   intptr_t value = opnd.imm_;
4180   if (value >> 32 != 0) {
4181     // this may not work b/c condition code won't be set correctly
4182     oihf(dst, Operand(value >> 32));
4183   }
4184   oilf(dst, Operand(value & 0xFFFFFFFF));
4185 #else
4186   Or(dst, opnd);
4187 #endif
4188 }
4189 
4190 // OR 32-bit - dst = src & imm
Or(Register dst,Register src,const Operand & opnd)4191 void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
4192   if (!dst.is(src)) lr(dst, src);
4193   oilf(dst, opnd);
4194 }
4195 
4196 // OR Pointer Size - dst = src & imm
OrP(Register dst,Register src,const Operand & opnd)4197 void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
4198   if (!dst.is(src)) LoadRR(dst, src);
4199   OrP(dst, opnd);
4200 }
4201 
4202 // XOR 32-bit - dst = dst & src
Xor(Register dst,Register src)4203 void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
4204 
4205 // XOR Pointer Size - dst = dst & src
XorP(Register dst,Register src)4206 void MacroAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
4207 
4208 // Non-clobbering XOR 32-bit - dst = src1 & src1
Xor(Register dst,Register src1,Register src2)4209 void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
4210   if (!dst.is(src1) && !dst.is(src2)) {
4211     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4212     // as XR is a smaller instruction
4213     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4214       xrk(dst, src1, src2);
4215       return;
4216     } else {
4217       lr(dst, src1);
4218     }
4219   } else if (dst.is(src2)) {
4220     src2 = src1;
4221   }
4222   Xor(dst, src2);
4223 }
4224 
4225 // Non-clobbering XOR pointer size - dst = src1 & src1
XorP(Register dst,Register src1,Register src2)4226 void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
4227   if (!dst.is(src1) && !dst.is(src2)) {
4228     // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4229     // as XR is a smaller instruction
4230     if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4231       XorP_RRR(dst, src1, src2);
4232       return;
4233     } else {
4234       LoadRR(dst, src1);
4235     }
4236   } else if (dst.is(src2)) {
4237     src2 = src1;
4238   }
4239   XorP(dst, src2);
4240 }
4241 
4242 // XOR 32-bit (Reg - Mem)
Xor(Register dst,const MemOperand & opnd)4243 void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
4244   DCHECK(is_int20(opnd.offset()));
4245   if (is_uint12(opnd.offset()))
4246     x(dst, opnd);
4247   else
4248     xy(dst, opnd);
4249 }
4250 
4251 // XOR Pointer Size (Reg - Mem)
XorP(Register dst,const MemOperand & opnd)4252 void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
4253   DCHECK(is_int20(opnd.offset()));
4254 #if V8_TARGET_ARCH_S390X
4255   xg(dst, opnd);
4256 #else
4257   Xor(dst, opnd);
4258 #endif
4259 }
4260 
4261 // XOR 32-bit - dst = dst & imm
Xor(Register dst,const Operand & opnd)4262 void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
4263 
4264 // XOR Pointer Size - dst = dst & imm
XorP(Register dst,const Operand & opnd)4265 void MacroAssembler::XorP(Register dst, const Operand& opnd) {
4266 #if V8_TARGET_ARCH_S390X
4267   intptr_t value = opnd.imm_;
4268   xihf(dst, Operand(value >> 32));
4269   xilf(dst, Operand(value & 0xFFFFFFFF));
4270 #else
4271   Xor(dst, opnd);
4272 #endif
4273 }
4274 
4275 // XOR 32-bit - dst = src & imm
Xor(Register dst,Register src,const Operand & opnd)4276 void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
4277   if (!dst.is(src)) lr(dst, src);
4278   xilf(dst, opnd);
4279 }
4280 
4281 // XOR Pointer Size - dst = src & imm
XorP(Register dst,Register src,const Operand & opnd)4282 void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
4283   if (!dst.is(src)) LoadRR(dst, src);
4284   XorP(dst, opnd);
4285 }
4286 
Not32(Register dst,Register src)4287 void MacroAssembler::Not32(Register dst, Register src) {
4288   if (!src.is(no_reg) && !src.is(dst)) lr(dst, src);
4289   xilf(dst, Operand(0xFFFFFFFF));
4290 }
4291 
Not64(Register dst,Register src)4292 void MacroAssembler::Not64(Register dst, Register src) {
4293   if (!src.is(no_reg) && !src.is(dst)) lgr(dst, src);
4294   xihf(dst, Operand(0xFFFFFFFF));
4295   xilf(dst, Operand(0xFFFFFFFF));
4296 }
4297 
NotP(Register dst,Register src)4298 void MacroAssembler::NotP(Register dst, Register src) {
4299 #if V8_TARGET_ARCH_S390X
4300   Not64(dst, src);
4301 #else
4302   Not32(dst, src);
4303 #endif
4304 }
4305 
4306 // works the same as mov
Load(Register dst,const Operand & opnd)4307 void MacroAssembler::Load(Register dst, const Operand& opnd) {
4308   intptr_t value = opnd.immediate();
4309   if (is_int16(value)) {
4310 #if V8_TARGET_ARCH_S390X
4311     lghi(dst, opnd);
4312 #else
4313     lhi(dst, opnd);
4314 #endif
4315   } else if (is_int32(value)) {
4316 #if V8_TARGET_ARCH_S390X
4317     lgfi(dst, opnd);
4318 #else
4319     iilf(dst, opnd);
4320 #endif
4321   } else if (is_uint32(value)) {
4322 #if V8_TARGET_ARCH_S390X
4323     llilf(dst, opnd);
4324 #else
4325     iilf(dst, opnd);
4326 #endif
4327   } else {
4328     int32_t hi_32 = static_cast<int64_t>(value) >> 32;
4329     int32_t lo_32 = static_cast<int32_t>(value);
4330 
4331     iihf(dst, Operand(hi_32));
4332     iilf(dst, Operand(lo_32));
4333   }
4334 }
4335 
Load(Register dst,const MemOperand & opnd)4336 void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
4337   DCHECK(is_int20(opnd.offset()));
4338 #if V8_TARGET_ARCH_S390X
4339   lgf(dst, opnd);  // 64<-32
4340 #else
4341   if (is_uint12(opnd.offset())) {
4342     l(dst, opnd);
4343   } else {
4344     ly(dst, opnd);
4345   }
4346 #endif
4347 }
4348 
LoadPositiveP(Register result,Register input)4349 void MacroAssembler::LoadPositiveP(Register result, Register input) {
4350 #if V8_TARGET_ARCH_S390X
4351   lpgr(result, input);
4352 #else
4353   lpr(result, input);
4354 #endif
4355 }
4356 
LoadPositive32(Register result,Register input)4357 void MacroAssembler::LoadPositive32(Register result, Register input) {
4358   lpr(result, input);
4359   lgfr(result, result);
4360 }
4361 
4362 //-----------------------------------------------------------------------------
4363 //  Compare Helpers
4364 //-----------------------------------------------------------------------------
4365 
4366 // Compare 32-bit Register vs Register
Cmp32(Register src1,Register src2)4367 void MacroAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
4368 
4369 // Compare Pointer Sized Register vs Register
CmpP(Register src1,Register src2)4370 void MacroAssembler::CmpP(Register src1, Register src2) {
4371 #if V8_TARGET_ARCH_S390X
4372   cgr(src1, src2);
4373 #else
4374   Cmp32(src1, src2);
4375 #endif
4376 }
4377 
4378 // Compare 32-bit Register vs Immediate
4379 // This helper will set up proper relocation entries if required.
Cmp32(Register dst,const Operand & opnd)4380 void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
4381   if (opnd.rmode_ == kRelocInfo_NONEPTR) {
4382     intptr_t value = opnd.immediate();
4383     if (is_int16(value))
4384       chi(dst, opnd);
4385     else
4386       cfi(dst, opnd);
4387   } else {
4388     // Need to generate relocation record here
4389     RecordRelocInfo(opnd.rmode_, opnd.imm_);
4390     cfi(dst, opnd);
4391   }
4392 }
4393 
4394 // Compare Pointer Sized  Register vs Immediate
4395 // This helper will set up proper relocation entries if required.
CmpP(Register dst,const Operand & opnd)4396 void MacroAssembler::CmpP(Register dst, const Operand& opnd) {
4397 #if V8_TARGET_ARCH_S390X
4398   if (opnd.rmode_ == kRelocInfo_NONEPTR) {
4399     cgfi(dst, opnd);
4400   } else {
4401     mov(r0, opnd);  // Need to generate 64-bit relocation
4402     cgr(dst, r0);
4403   }
4404 #else
4405   Cmp32(dst, opnd);
4406 #endif
4407 }
4408 
4409 // Compare 32-bit Register vs Memory
Cmp32(Register dst,const MemOperand & opnd)4410 void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) {
4411   // make sure offset is within 20 bit range
4412   DCHECK(is_int20(opnd.offset()));
4413   if (is_uint12(opnd.offset()))
4414     c(dst, opnd);
4415   else
4416     cy(dst, opnd);
4417 }
4418 
4419 // Compare Pointer Size Register vs Memory
CmpP(Register dst,const MemOperand & opnd)4420 void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) {
4421   // make sure offset is within 20 bit range
4422   DCHECK(is_int20(opnd.offset()));
4423 #if V8_TARGET_ARCH_S390X
4424   cg(dst, opnd);
4425 #else
4426   Cmp32(dst, opnd);
4427 #endif
4428 }
4429 
4430 //-----------------------------------------------------------------------------
4431 // Compare Logical Helpers
4432 //-----------------------------------------------------------------------------
4433 
4434 // Compare Logical 32-bit Register vs Register
CmpLogical32(Register dst,Register src)4435 void MacroAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
4436 
4437 // Compare Logical Pointer Sized Register vs Register
CmpLogicalP(Register dst,Register src)4438 void MacroAssembler::CmpLogicalP(Register dst, Register src) {
4439 #ifdef V8_TARGET_ARCH_S390X
4440   clgr(dst, src);
4441 #else
4442   CmpLogical32(dst, src);
4443 #endif
4444 }
4445 
4446 // Compare Logical 32-bit Register vs Immediate
CmpLogical32(Register dst,const Operand & opnd)4447 void MacroAssembler::CmpLogical32(Register dst, const Operand& opnd) {
4448   clfi(dst, opnd);
4449 }
4450 
4451 // Compare Logical Pointer Sized Register vs Immediate
CmpLogicalP(Register dst,const Operand & opnd)4452 void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
4453 #if V8_TARGET_ARCH_S390X
4454   DCHECK(static_cast<uint32_t>(opnd.immediate() >> 32) == 0);
4455   clgfi(dst, opnd);
4456 #else
4457   CmpLogical32(dst, opnd);
4458 #endif
4459 }
4460 
4461 // Compare Logical 32-bit Register vs Memory
CmpLogical32(Register dst,const MemOperand & opnd)4462 void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
4463   // make sure offset is within 20 bit range
4464   DCHECK(is_int20(opnd.offset()));
4465   if (is_uint12(opnd.offset()))
4466     cl(dst, opnd);
4467   else
4468     cly(dst, opnd);
4469 }
4470 
4471 // Compare Logical Pointer Sized Register vs Memory
CmpLogicalP(Register dst,const MemOperand & opnd)4472 void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
4473   // make sure offset is within 20 bit range
4474   DCHECK(is_int20(opnd.offset()));
4475 #if V8_TARGET_ARCH_S390X
4476   clg(dst, opnd);
4477 #else
4478   CmpLogical32(dst, opnd);
4479 #endif
4480 }
4481 
4482 // Compare Logical Byte (Mem - Imm)
CmpLogicalByte(const MemOperand & mem,const Operand & imm)4483 void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
4484   DCHECK(is_uint8(imm.immediate()));
4485   if (is_uint12(mem.offset()))
4486     cli(mem, imm);
4487   else
4488     cliy(mem, imm);
4489 }
4490 
Branch(Condition c,const Operand & opnd)4491 void MacroAssembler::Branch(Condition c, const Operand& opnd) {
4492   intptr_t value = opnd.immediate();
4493   if (is_int16(value))
4494     brc(c, opnd);
4495   else
4496     brcl(c, opnd);
4497 }
4498 
4499 // Branch On Count.  Decrement R1, and branch if R1 != 0.
BranchOnCount(Register r1,Label * l)4500 void MacroAssembler::BranchOnCount(Register r1, Label* l) {
4501   int32_t offset = branch_offset(l);
4502   if (is_int16(offset)) {
4503 #if V8_TARGET_ARCH_S390X
4504     brctg(r1, Operand(offset));
4505 #else
4506     brct(r1, Operand(offset));
4507 #endif
4508   } else {
4509     AddP(r1, Operand(-1));
4510     Branch(ne, Operand(offset));
4511   }
4512 }
4513 
LoadIntLiteral(Register dst,int value)4514 void MacroAssembler::LoadIntLiteral(Register dst, int value) {
4515   Load(dst, Operand(value));
4516 }
4517 
LoadSmiLiteral(Register dst,Smi * smi)4518 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
4519   intptr_t value = reinterpret_cast<intptr_t>(smi);
4520 #if V8_TARGET_ARCH_S390X
4521   DCHECK((value & 0xffffffff) == 0);
4522   // The smi value is loaded in upper 32-bits.  Lower 32-bit are zeros.
4523   llihf(dst, Operand(value >> 32));
4524 #else
4525   llilf(dst, Operand(value));
4526 #endif
4527 }
4528 
LoadDoubleLiteral(DoubleRegister result,uint64_t value,Register scratch)4529 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
4530                                        Register scratch) {
4531   uint32_t hi_32 = value >> 32;
4532   uint32_t lo_32 = static_cast<uint32_t>(value);
4533 
4534   // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
4535   if (value == 0) {
4536     lzdr(result);
4537   } else if (lo_32 == 0) {
4538     llihf(scratch, Operand(hi_32));
4539     ldgr(result, scratch);
4540   } else {
4541     iihf(scratch, Operand(hi_32));
4542     iilf(scratch, Operand(lo_32));
4543     ldgr(result, scratch);
4544   }
4545 }
4546 
LoadDoubleLiteral(DoubleRegister result,double value,Register scratch)4547 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
4548                                        Register scratch) {
4549   uint64_t int_val = bit_cast<uint64_t, double>(value);
4550   LoadDoubleLiteral(result, int_val, scratch);
4551 }
4552 
LoadFloat32Literal(DoubleRegister result,float value,Register scratch)4553 void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
4554                                         Register scratch) {
4555   uint64_t int_val = static_cast<uint64_t>(bit_cast<uint32_t, float>(value))
4556                      << 32;
4557   LoadDoubleLiteral(result, int_val, scratch);
4558 }
4559 
CmpSmiLiteral(Register src1,Smi * smi,Register scratch)4560 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
4561 #if V8_TARGET_ARCH_S390X
4562   if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4563     cih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
4564   } else {
4565     LoadSmiLiteral(scratch, smi);
4566     cgr(src1, scratch);
4567   }
4568 #else
4569   // CFI takes 32-bit immediate.
4570   cfi(src1, Operand(smi));
4571 #endif
4572 }
4573 
CmpLogicalSmiLiteral(Register src1,Smi * smi,Register scratch)4574 void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
4575                                           Register scratch) {
4576 #if V8_TARGET_ARCH_S390X
4577   if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4578     clih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
4579   } else {
4580     LoadSmiLiteral(scratch, smi);
4581     clgr(src1, scratch);
4582   }
4583 #else
4584   // CLFI takes 32-bit immediate
4585   clfi(src1, Operand(smi));
4586 #endif
4587 }
4588 
AddSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)4589 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
4590                                    Register scratch) {
4591 #if V8_TARGET_ARCH_S390X
4592   if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4593     if (!dst.is(src)) LoadRR(dst, src);
4594     aih(dst, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
4595   } else {
4596     LoadSmiLiteral(scratch, smi);
4597     AddP(dst, src, scratch);
4598   }
4599 #else
4600   AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
4601 #endif
4602 }
4603 
SubSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)4604 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
4605                                    Register scratch) {
4606 #if V8_TARGET_ARCH_S390X
4607   if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4608     if (!dst.is(src)) LoadRR(dst, src);
4609     aih(dst, Operand((-reinterpret_cast<intptr_t>(smi)) >> 32));
4610   } else {
4611     LoadSmiLiteral(scratch, smi);
4612     SubP(dst, src, scratch);
4613   }
4614 #else
4615   AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
4616 #endif
4617 }
4618 
AndSmiLiteral(Register dst,Register src,Smi * smi)4619 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
4620   if (!dst.is(src)) LoadRR(dst, src);
4621 #if V8_TARGET_ARCH_S390X
4622   DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0);
4623   int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
4624   nihf(dst, Operand(value));
4625 #else
4626   nilf(dst, Operand(reinterpret_cast<int>(smi)));
4627 #endif
4628 }
4629 
4630 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)4631 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
4632                            Register scratch) {
4633   int offset = mem.offset();
4634 
4635   if (!scratch.is(no_reg) && !is_int20(offset)) {
4636     /* cannot use d-form */
4637     LoadIntLiteral(scratch, offset);
4638 #if V8_TARGET_ARCH_S390X
4639     lg(dst, MemOperand(mem.rb(), scratch));
4640 #else
4641     l(dst, MemOperand(mem.rb(), scratch));
4642 #endif
4643   } else {
4644 #if V8_TARGET_ARCH_S390X
4645     lg(dst, mem);
4646 #else
4647     if (is_uint12(offset)) {
4648       l(dst, mem);
4649     } else {
4650       ly(dst, mem);
4651     }
4652 #endif
4653   }
4654 }
4655 
4656 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)4657 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
4658                             Register scratch) {
4659   if (!is_int20(mem.offset())) {
4660     DCHECK(!scratch.is(no_reg));
4661     DCHECK(!scratch.is(r0));
4662     LoadIntLiteral(scratch, mem.offset());
4663 #if V8_TARGET_ARCH_S390X
4664     stg(src, MemOperand(mem.rb(), scratch));
4665 #else
4666     st(src, MemOperand(mem.rb(), scratch));
4667 #endif
4668   } else {
4669 #if V8_TARGET_ARCH_S390X
4670     stg(src, mem);
4671 #else
4672     // StoreW will try to generate ST if offset fits, otherwise
4673     // it'll generate STY.
4674     StoreW(src, mem);
4675 #endif
4676   }
4677 }
4678 
4679 // Store a "pointer" sized constant to the memory location
StoreP(const MemOperand & mem,const Operand & opnd,Register scratch)4680 void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
4681                             Register scratch) {
4682   // Relocations not supported
4683   DCHECK(opnd.rmode_ == kRelocInfo_NONEPTR);
4684 
4685   // Try to use MVGHI/MVHI
4686   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
4687       mem.getIndexRegister().is(r0) && is_int16(opnd.imm_)) {
4688 #if V8_TARGET_ARCH_S390X
4689     mvghi(mem, opnd);
4690 #else
4691     mvhi(mem, opnd);
4692 #endif
4693   } else {
4694     LoadImmP(scratch, opnd);
4695     StoreP(scratch, mem);
4696   }
4697 }
4698 
LoadMultipleP(Register dst1,Register dst2,const MemOperand & mem)4699 void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
4700                                    const MemOperand& mem) {
4701 #if V8_TARGET_ARCH_S390X
4702   DCHECK(is_int20(mem.offset()));
4703   lmg(dst1, dst2, mem);
4704 #else
4705   if (is_uint12(mem.offset())) {
4706     lm(dst1, dst2, mem);
4707   } else {
4708     DCHECK(is_int20(mem.offset()));
4709     lmy(dst1, dst2, mem);
4710   }
4711 #endif
4712 }
4713 
StoreMultipleP(Register src1,Register src2,const MemOperand & mem)4714 void MacroAssembler::StoreMultipleP(Register src1, Register src2,
4715                                     const MemOperand& mem) {
4716 #if V8_TARGET_ARCH_S390X
4717   DCHECK(is_int20(mem.offset()));
4718   stmg(src1, src2, mem);
4719 #else
4720   if (is_uint12(mem.offset())) {
4721     stm(src1, src2, mem);
4722   } else {
4723     DCHECK(is_int20(mem.offset()));
4724     stmy(src1, src2, mem);
4725   }
4726 #endif
4727 }
4728 
LoadMultipleW(Register dst1,Register dst2,const MemOperand & mem)4729 void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
4730                                    const MemOperand& mem) {
4731   if (is_uint12(mem.offset())) {
4732     lm(dst1, dst2, mem);
4733   } else {
4734     DCHECK(is_int20(mem.offset()));
4735     lmy(dst1, dst2, mem);
4736   }
4737 }
4738 
StoreMultipleW(Register src1,Register src2,const MemOperand & mem)4739 void MacroAssembler::StoreMultipleW(Register src1, Register src2,
4740                                     const MemOperand& mem) {
4741   if (is_uint12(mem.offset())) {
4742     stm(src1, src2, mem);
4743   } else {
4744     DCHECK(is_int20(mem.offset()));
4745     stmy(src1, src2, mem);
4746   }
4747 }
4748 
4749 // Load 32-bits and sign extend if necessary.
LoadW(Register dst,Register src)4750 void MacroAssembler::LoadW(Register dst, Register src) {
4751 #if V8_TARGET_ARCH_S390X
4752   lgfr(dst, src);
4753 #else
4754   if (!dst.is(src)) lr(dst, src);
4755 #endif
4756 }
4757 
4758 // Load 32-bits and sign extend if necessary.
LoadW(Register dst,const MemOperand & mem,Register scratch)4759 void MacroAssembler::LoadW(Register dst, const MemOperand& mem,
4760                            Register scratch) {
4761   int offset = mem.offset();
4762 
4763   if (!is_int20(offset)) {
4764     DCHECK(!scratch.is(no_reg));
4765     LoadIntLiteral(scratch, offset);
4766 #if V8_TARGET_ARCH_S390X
4767     lgf(dst, MemOperand(mem.rb(), scratch));
4768 #else
4769     l(dst, MemOperand(mem.rb(), scratch));
4770 #endif
4771   } else {
4772 #if V8_TARGET_ARCH_S390X
4773     lgf(dst, mem);
4774 #else
4775     if (is_uint12(offset)) {
4776       l(dst, mem);
4777     } else {
4778       ly(dst, mem);
4779     }
4780 #endif
4781   }
4782 }
4783 
4784 // Load 32-bits and zero extend if necessary.
LoadlW(Register dst,Register src)4785 void MacroAssembler::LoadlW(Register dst, Register src) {
4786 #if V8_TARGET_ARCH_S390X
4787   llgfr(dst, src);
4788 #else
4789   if (!dst.is(src)) lr(dst, src);
4790 #endif
4791 }
4792 
4793 // Variable length depending on whether offset fits into immediate field
4794 // MemOperand of RX or RXY format
LoadlW(Register dst,const MemOperand & mem,Register scratch)4795 void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
4796                             Register scratch) {
4797   Register base = mem.rb();
4798   int offset = mem.offset();
4799 
4800 #if V8_TARGET_ARCH_S390X
4801   if (is_int20(offset)) {
4802     llgf(dst, mem);
4803   } else if (!scratch.is(no_reg)) {
4804     // Materialize offset into scratch register.
4805     LoadIntLiteral(scratch, offset);
4806     llgf(dst, MemOperand(base, scratch));
4807   } else {
4808     DCHECK(false);
4809   }
4810 #else
4811   bool use_RXform = false;
4812   bool use_RXYform = false;
4813   if (is_uint12(offset)) {
4814     // RX-format supports unsigned 12-bits offset.
4815     use_RXform = true;
4816   } else if (is_int20(offset)) {
4817     // RXY-format supports signed 20-bits offset.
4818     use_RXYform = true;
4819   } else if (!scratch.is(no_reg)) {
4820     // Materialize offset into scratch register.
4821     LoadIntLiteral(scratch, offset);
4822   } else {
4823     DCHECK(false);
4824   }
4825 
4826   if (use_RXform) {
4827     l(dst, mem);
4828   } else if (use_RXYform) {
4829     ly(dst, mem);
4830   } else {
4831     ly(dst, MemOperand(base, scratch));
4832   }
4833 #endif
4834 }
4835 
LoadLogicalHalfWordP(Register dst,const MemOperand & mem)4836 void MacroAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
4837 #if V8_TARGET_ARCH_S390X
4838   llgh(dst, mem);
4839 #else
4840   llh(dst, mem);
4841 #endif
4842 }
4843 
LoadLogicalHalfWordP(Register dst,Register src)4844 void MacroAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
4845 #if V8_TARGET_ARCH_S390X
4846   llghr(dst, src);
4847 #else
4848   llhr(dst, src);
4849 #endif
4850 }
4851 
LoadB(Register dst,const MemOperand & mem)4852 void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
4853 #if V8_TARGET_ARCH_S390X
4854   lgb(dst, mem);
4855 #else
4856   lb(dst, mem);
4857 #endif
4858 }
4859 
LoadB(Register dst,Register src)4860 void MacroAssembler::LoadB(Register dst, Register src) {
4861 #if V8_TARGET_ARCH_S390X
4862   lgbr(dst, src);
4863 #else
4864   lbr(dst, src);
4865 #endif
4866 }
4867 
LoadlB(Register dst,const MemOperand & mem)4868 void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
4869 #if V8_TARGET_ARCH_S390X
4870   llgc(dst, mem);
4871 #else
4872   llc(dst, mem);
4873 #endif
4874 }
4875 
LoadlB(Register dst,Register src)4876 void MacroAssembler::LoadlB(Register dst, Register src) {
4877 #if V8_TARGET_ARCH_S390X
4878   llgcr(dst, src);
4879 #else
4880   llcr(dst, src);
4881 #endif
4882 }
4883 
LoadLogicalReversedWordP(Register dst,const MemOperand & mem)4884 void MacroAssembler::LoadLogicalReversedWordP(Register dst,
4885                                               const MemOperand& mem) {
4886   lrv(dst, mem);
4887   LoadlW(dst, dst);
4888 }
4889 
4890 
LoadLogicalReversedHalfWordP(Register dst,const MemOperand & mem)4891 void MacroAssembler::LoadLogicalReversedHalfWordP(Register dst,
4892                                               const MemOperand& mem) {
4893   lrvh(dst, mem);
4894   LoadLogicalHalfWordP(dst, dst);
4895 }
4896 
4897 
4898 // Load And Test (Reg <- Reg)
LoadAndTest32(Register dst,Register src)4899 void MacroAssembler::LoadAndTest32(Register dst, Register src) {
4900   ltr(dst, src);
4901 }
4902 
4903 // Load And Test
4904 //     (Register dst(ptr) = Register src (32 | 32->64))
4905 // src is treated as a 32-bit signed integer, which is sign extended to
4906 // 64-bit if necessary.
LoadAndTestP_ExtendSrc(Register dst,Register src)4907 void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
4908 #if V8_TARGET_ARCH_S390X
4909   ltgfr(dst, src);
4910 #else
4911   ltr(dst, src);
4912 #endif
4913 }
4914 
4915 // Load And Test Pointer Sized (Reg <- Reg)
LoadAndTestP(Register dst,Register src)4916 void MacroAssembler::LoadAndTestP(Register dst, Register src) {
4917 #if V8_TARGET_ARCH_S390X
4918   ltgr(dst, src);
4919 #else
4920   ltr(dst, src);
4921 #endif
4922 }
4923 
4924 // Load And Test 32-bit (Reg <- Mem)
LoadAndTest32(Register dst,const MemOperand & mem)4925 void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
4926   lt_z(dst, mem);
4927 }
4928 
4929 // Load And Test Pointer Sized (Reg <- Mem)
LoadAndTestP(Register dst,const MemOperand & mem)4930 void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
4931 #if V8_TARGET_ARCH_S390X
4932   ltg(dst, mem);
4933 #else
4934   lt_z(dst, mem);
4935 #endif
4936 }
4937 
4938 // Load On Condition Pointer Sized (Reg <- Reg)
LoadOnConditionP(Condition cond,Register dst,Register src)4939 void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
4940                                       Register src) {
4941 #if V8_TARGET_ARCH_S390X
4942   locgr(cond, dst, src);
4943 #else
4944   locr(cond, dst, src);
4945 #endif
4946 }
4947 
4948 // Load Double Precision (64-bit) Floating Point number from memory
LoadDouble(DoubleRegister dst,const MemOperand & mem)4949 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
4950   // for 32bit and 64bit we all use 64bit floating point regs
4951   if (is_uint12(mem.offset())) {
4952     ld(dst, mem);
4953   } else {
4954     ldy(dst, mem);
4955   }
4956 }
4957 
4958 // Load Single Precision (32-bit) Floating Point number from memory
LoadFloat32(DoubleRegister dst,const MemOperand & mem)4959 void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
4960   if (is_uint12(mem.offset())) {
4961     le_z(dst, mem);
4962   } else {
4963     DCHECK(is_int20(mem.offset()));
4964     ley(dst, mem);
4965   }
4966 }
4967 
4968 // Load Single Precision (32-bit) Floating Point number from memory,
4969 // and convert to Double Precision (64-bit)
LoadFloat32ConvertToDouble(DoubleRegister dst,const MemOperand & mem)4970 void MacroAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
4971                                                 const MemOperand& mem) {
4972   LoadFloat32(dst, mem);
4973   ldebr(dst, dst);
4974 }
4975 
4976 // Store Double Precision (64-bit) Floating Point number to memory
StoreDouble(DoubleRegister dst,const MemOperand & mem)4977 void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
4978   if (is_uint12(mem.offset())) {
4979     std(dst, mem);
4980   } else {
4981     stdy(dst, mem);
4982   }
4983 }
4984 
4985 // Store Single Precision (32-bit) Floating Point number to memory
StoreFloat32(DoubleRegister src,const MemOperand & mem)4986 void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
4987   if (is_uint12(mem.offset())) {
4988     ste(src, mem);
4989   } else {
4990     stey(src, mem);
4991   }
4992 }
4993 
4994 // Convert Double precision (64-bit) to Single Precision (32-bit)
4995 // and store resulting Float32 to memory
StoreDoubleAsFloat32(DoubleRegister src,const MemOperand & mem,DoubleRegister scratch)4996 void MacroAssembler::StoreDoubleAsFloat32(DoubleRegister src,
4997                                           const MemOperand& mem,
4998                                           DoubleRegister scratch) {
4999   ledbr(scratch, src);
5000   StoreFloat32(scratch, mem);
5001 }
5002 
5003 // Variable length depending on whether offset fits into immediate field
5004 // MemOperand of RX or RXY format
StoreW(Register src,const MemOperand & mem,Register scratch)5005 void MacroAssembler::StoreW(Register src, const MemOperand& mem,
5006                             Register scratch) {
5007   Register base = mem.rb();
5008   int offset = mem.offset();
5009 
5010   bool use_RXform = false;
5011   bool use_RXYform = false;
5012 
5013   if (is_uint12(offset)) {
5014     // RX-format supports unsigned 12-bits offset.
5015     use_RXform = true;
5016   } else if (is_int20(offset)) {
5017     // RXY-format supports signed 20-bits offset.
5018     use_RXYform = true;
5019   } else if (!scratch.is(no_reg)) {
5020     // Materialize offset into scratch register.
5021     LoadIntLiteral(scratch, offset);
5022   } else {
5023     // scratch is no_reg
5024     DCHECK(false);
5025   }
5026 
5027   if (use_RXform) {
5028     st(src, mem);
5029   } else if (use_RXYform) {
5030     sty(src, mem);
5031   } else {
5032     StoreW(src, MemOperand(base, scratch));
5033   }
5034 }
5035 
5036 // Loads 16-bits half-word value from memory and sign extends to pointer
5037 // sized register
LoadHalfWordP(Register dst,const MemOperand & mem,Register scratch)5038 void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
5039                                    Register scratch) {
5040   Register base = mem.rb();
5041   int offset = mem.offset();
5042 
5043   if (!is_int20(offset)) {
5044     DCHECK(!scratch.is(no_reg));
5045     LoadIntLiteral(scratch, offset);
5046 #if V8_TARGET_ARCH_S390X
5047     lgh(dst, MemOperand(base, scratch));
5048 #else
5049     lh(dst, MemOperand(base, scratch));
5050 #endif
5051   } else {
5052 #if V8_TARGET_ARCH_S390X
5053     lgh(dst, mem);
5054 #else
5055     if (is_uint12(offset)) {
5056       lh(dst, mem);
5057     } else {
5058       lhy(dst, mem);
5059     }
5060 #endif
5061   }
5062 }
5063 
5064 // Variable length depending on whether offset fits into immediate field
5065 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)5066 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
5067                                    Register scratch) {
5068   Register base = mem.rb();
5069   int offset = mem.offset();
5070 
5071   if (is_uint12(offset)) {
5072     sth(src, mem);
5073   } else if (is_int20(offset)) {
5074     sthy(src, mem);
5075   } else {
5076     DCHECK(!scratch.is(no_reg));
5077     LoadIntLiteral(scratch, offset);
5078     sth(src, MemOperand(base, scratch));
5079   }
5080 }
5081 
5082 // Variable length depending on whether offset fits into immediate field
5083 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)5084 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
5085                                Register scratch) {
5086   Register base = mem.rb();
5087   int offset = mem.offset();
5088 
5089   if (is_uint12(offset)) {
5090     stc(src, mem);
5091   } else if (is_int20(offset)) {
5092     stcy(src, mem);
5093   } else {
5094     DCHECK(!scratch.is(no_reg));
5095     LoadIntLiteral(scratch, offset);
5096     stc(src, MemOperand(base, scratch));
5097   }
5098 }
5099 
5100 // Shift left logical for 32-bit integer types.
ShiftLeft(Register dst,Register src,const Operand & val)5101 void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
5102   if (dst.is(src)) {
5103     sll(dst, val);
5104   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5105     sllk(dst, src, val);
5106   } else {
5107     lr(dst, src);
5108     sll(dst, val);
5109   }
5110 }
5111 
5112 // Shift left logical for 32-bit integer types.
ShiftLeft(Register dst,Register src,Register val)5113 void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) {
5114   if (dst.is(src)) {
5115     sll(dst, val);
5116   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5117     sllk(dst, src, val);
5118   } else {
5119     DCHECK(!dst.is(val));  // The lr/sll path clobbers val.
5120     lr(dst, src);
5121     sll(dst, val);
5122   }
5123 }
5124 
5125 // Shift right logical for 32-bit integer types.
ShiftRight(Register dst,Register src,const Operand & val)5126 void MacroAssembler::ShiftRight(Register dst, Register src,
5127                                 const Operand& val) {
5128   if (dst.is(src)) {
5129     srl(dst, val);
5130   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5131     srlk(dst, src, val);
5132   } else {
5133     lr(dst, src);
5134     srl(dst, val);
5135   }
5136 }
5137 
5138 // Shift right logical for 32-bit integer types.
ShiftRight(Register dst,Register src,Register val)5139 void MacroAssembler::ShiftRight(Register dst, Register src, Register val) {
5140   if (dst.is(src)) {
5141     srl(dst, val);
5142   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5143     srlk(dst, src, val);
5144   } else {
5145     DCHECK(!dst.is(val));  // The lr/srl path clobbers val.
5146     lr(dst, src);
5147     srl(dst, val);
5148   }
5149 }
5150 
5151 // Shift left arithmetic for 32-bit integer types.
ShiftLeftArith(Register dst,Register src,const Operand & val)5152 void MacroAssembler::ShiftLeftArith(Register dst, Register src,
5153                                     const Operand& val) {
5154   if (dst.is(src)) {
5155     sla(dst, val);
5156   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5157     slak(dst, src, val);
5158   } else {
5159     lr(dst, src);
5160     sla(dst, val);
5161   }
5162 }
5163 
5164 // Shift left arithmetic for 32-bit integer types.
ShiftLeftArith(Register dst,Register src,Register val)5165 void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
5166   if (dst.is(src)) {
5167     sla(dst, val);
5168   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5169     slak(dst, src, val);
5170   } else {
5171     DCHECK(!dst.is(val));  // The lr/sla path clobbers val.
5172     lr(dst, src);
5173     sla(dst, val);
5174   }
5175 }
5176 
5177 // Shift right arithmetic for 32-bit integer types.
ShiftRightArith(Register dst,Register src,const Operand & val)5178 void MacroAssembler::ShiftRightArith(Register dst, Register src,
5179                                      const Operand& val) {
5180   if (dst.is(src)) {
5181     sra(dst, val);
5182   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5183     srak(dst, src, val);
5184   } else {
5185     lr(dst, src);
5186     sra(dst, val);
5187   }
5188 }
5189 
5190 // Shift right arithmetic for 32-bit integer types.
ShiftRightArith(Register dst,Register src,Register val)5191 void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
5192   if (dst.is(src)) {
5193     sra(dst, val);
5194   } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5195     srak(dst, src, val);
5196   } else {
5197     DCHECK(!dst.is(val));  // The lr/sra path clobbers val.
5198     lr(dst, src);
5199     sra(dst, val);
5200   }
5201 }
5202 
5203 // Clear right most # of bits
ClearRightImm(Register dst,Register src,const Operand & val)5204 void MacroAssembler::ClearRightImm(Register dst, Register src,
5205                                    const Operand& val) {
5206   int numBitsToClear = val.imm_ % (kPointerSize * 8);
5207 
5208   // Try to use RISBG if possible
5209   if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
5210     int endBit = 63 - numBitsToClear;
5211     risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
5212     return;
5213   }
5214 
5215   uint64_t hexMask = ~((1L << numBitsToClear) - 1);
5216 
5217   // S390 AND instr clobbers source.  Make a copy if necessary
5218   if (!dst.is(src)) LoadRR(dst, src);
5219 
5220   if (numBitsToClear <= 16) {
5221     nill(dst, Operand(static_cast<uint16_t>(hexMask)));
5222   } else if (numBitsToClear <= 32) {
5223     nilf(dst, Operand(static_cast<uint32_t>(hexMask)));
5224   } else if (numBitsToClear <= 64) {
5225     nilf(dst, Operand(static_cast<intptr_t>(0)));
5226     nihf(dst, Operand(hexMask >> 32));
5227   }
5228 }
5229 
Popcnt32(Register dst,Register src)5230 void MacroAssembler::Popcnt32(Register dst, Register src) {
5231   DCHECK(!src.is(r0));
5232   DCHECK(!dst.is(r0));
5233 
5234   popcnt(dst, src);
5235   ShiftRight(r0, dst, Operand(16));
5236   ar(dst, r0);
5237   ShiftRight(r0, dst, Operand(8));
5238   ar(dst, r0);
5239   LoadlB(dst, dst);
5240 }
5241 
5242 #ifdef V8_TARGET_ARCH_S390X
Popcnt64(Register dst,Register src)5243 void MacroAssembler::Popcnt64(Register dst, Register src) {
5244   DCHECK(!src.is(r0));
5245   DCHECK(!dst.is(r0));
5246 
5247   popcnt(dst, src);
5248   ShiftRightP(r0, dst, Operand(32));
5249   AddP(dst, r0);
5250   ShiftRightP(r0, dst, Operand(16));
5251   AddP(dst, r0);
5252   ShiftRightP(r0, dst, Operand(8));
5253   AddP(dst, r0);
5254   LoadlB(dst, dst);
5255 }
5256 #endif
5257 
5258 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)5259 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
5260                 Register reg5, Register reg6, Register reg7, Register reg8,
5261                 Register reg9, Register reg10) {
5262   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
5263                         reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5264                         reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
5265                         reg10.is_valid();
5266 
5267   RegList regs = 0;
5268   if (reg1.is_valid()) regs |= reg1.bit();
5269   if (reg2.is_valid()) regs |= reg2.bit();
5270   if (reg3.is_valid()) regs |= reg3.bit();
5271   if (reg4.is_valid()) regs |= reg4.bit();
5272   if (reg5.is_valid()) regs |= reg5.bit();
5273   if (reg6.is_valid()) regs |= reg6.bit();
5274   if (reg7.is_valid()) regs |= reg7.bit();
5275   if (reg8.is_valid()) regs |= reg8.bit();
5276   if (reg9.is_valid()) regs |= reg9.bit();
5277   if (reg10.is_valid()) regs |= reg10.bit();
5278   int n_of_non_aliasing_regs = NumRegs(regs);
5279 
5280   return n_of_valid_regs != n_of_non_aliasing_regs;
5281 }
5282 #endif
5283 
CodePatcher(Isolate * isolate,byte * address,int size,FlushICache flush_cache)5284 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size,
5285                          FlushICache flush_cache)
5286     : address_(address),
5287       size_(size),
5288       masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
5289       flush_cache_(flush_cache) {
5290   // Create a new macro assembler pointing to the address of the code to patch.
5291   // The size is adjusted with kGap on order for the assembler to generate size
5292   // bytes of instructions without failing with buffer size constraints.
5293   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5294 }
5295 
~CodePatcher()5296 CodePatcher::~CodePatcher() {
5297   // Indicate that code has changed.
5298   if (flush_cache_ == FLUSH) {
5299     Assembler::FlushICache(masm_.isolate(), address_, size_);
5300   }
5301 
5302   // Check that the code was patched as expected.
5303   DCHECK(masm_.pc_ == address_ + size_);
5304   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5305 }
5306 
TruncatingDiv(Register result,Register dividend,int32_t divisor)5307 void MacroAssembler::TruncatingDiv(Register result, Register dividend,
5308                                    int32_t divisor) {
5309   DCHECK(!dividend.is(result));
5310   DCHECK(!dividend.is(r0));
5311   DCHECK(!result.is(r0));
5312   base::MagicNumbersForDivision<uint32_t> mag =
5313       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5314 #ifdef V8_TARGET_ARCH_S390X
5315   LoadRR(result, dividend);
5316   MulP(result, Operand(mag.multiplier));
5317   ShiftRightArithP(result, result, Operand(32));
5318 
5319 #else
5320   lay(sp, MemOperand(sp, -kPointerSize));
5321   StoreP(r1, MemOperand(sp));
5322 
5323   mov(r1, Operand(mag.multiplier));
5324   mr_z(r0, dividend);  // r0:r1 = r1 * dividend
5325 
5326   LoadRR(result, r0);
5327   LoadP(r1, MemOperand(sp));
5328   la(sp, MemOperand(sp, kPointerSize));
5329 #endif
5330   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5331   if (divisor > 0 && neg) {
5332     AddP(result, dividend);
5333   }
5334   if (divisor < 0 && !neg && mag.multiplier > 0) {
5335     SubP(result, dividend);
5336   }
5337   if (mag.shift > 0) ShiftRightArith(result, result, Operand(mag.shift));
5338   ExtractBit(r0, dividend, 31);
5339   AddP(result, r0);
5340 }
5341 
5342 }  // namespace internal
5343 }  // namespace v8
5344 
5345 #endif  // V8_TARGET_ARCH_S390
5346