1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <assert.h> // For assert
6 #include <limits.h> // For LONG_MIN, LONG_MAX.
7
8 #if V8_TARGET_ARCH_S390
9
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/callable.h"
14 #include "src/code-factory.h"
15 #include "src/code-stubs.h"
16 #include "src/debug/debug.h"
17 #include "src/external-reference-table.h"
18 #include "src/frames-inl.h"
19 #include "src/instruction-stream.h"
20 #include "src/register-configuration.h"
21 #include "src/runtime/runtime.h"
22 #include "src/snapshot/snapshot.h"
23 #include "src/wasm/wasm-code-manager.h"
24
25 #include "src/s390/macro-assembler-s390.h"
26
27 namespace v8 {
28 namespace internal {
29
MacroAssembler(Isolate * isolate,const AssemblerOptions & options,void * buffer,int size,CodeObjectRequired create_code_object)30 MacroAssembler::MacroAssembler(Isolate* isolate,
31 const AssemblerOptions& options, void* buffer,
32 int size, CodeObjectRequired create_code_object)
33 : TurboAssembler(isolate, options, buffer, size, create_code_object) {
34 if (create_code_object == CodeObjectRequired::kYes) {
35 // Unlike TurboAssembler, which can be used off the main thread and may not
36 // allocate, macro assembler creates its own copy of the self-reference
37 // marker in order to disambiguate between self-references during nested
38 // code generation (e.g.: codegen of the current object triggers stub
39 // compilation through CodeStub::GetCode()).
40 code_object_ = Handle<HeapObject>::New(
41 *isolate->factory()->NewSelfReferenceMarker(), isolate);
42 }
43 }
44
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const45 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
46 Register exclusion1,
47 Register exclusion2,
48 Register exclusion3) const {
49 int bytes = 0;
50 RegList exclusions = 0;
51 if (exclusion1 != no_reg) {
52 exclusions |= exclusion1.bit();
53 if (exclusion2 != no_reg) {
54 exclusions |= exclusion2.bit();
55 if (exclusion3 != no_reg) {
56 exclusions |= exclusion3.bit();
57 }
58 }
59 }
60
61 RegList list = kJSCallerSaved & ~exclusions;
62 bytes += NumRegs(list) * kPointerSize;
63
64 if (fp_mode == kSaveFPRegs) {
65 bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
66 }
67
68 return bytes;
69 }
70
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)71 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
72 Register exclusion2, Register exclusion3) {
73 int bytes = 0;
74 RegList exclusions = 0;
75 if (exclusion1 != no_reg) {
76 exclusions |= exclusion1.bit();
77 if (exclusion2 != no_reg) {
78 exclusions |= exclusion2.bit();
79 if (exclusion3 != no_reg) {
80 exclusions |= exclusion3.bit();
81 }
82 }
83 }
84
85 RegList list = kJSCallerSaved & ~exclusions;
86 MultiPush(list);
87 bytes += NumRegs(list) * kPointerSize;
88
89 if (fp_mode == kSaveFPRegs) {
90 MultiPushDoubles(kCallerSavedDoubles);
91 bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
92 }
93
94 return bytes;
95 }
96
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)97 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
98 Register exclusion2, Register exclusion3) {
99 int bytes = 0;
100 if (fp_mode == kSaveFPRegs) {
101 MultiPopDoubles(kCallerSavedDoubles);
102 bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
103 }
104
105 RegList exclusions = 0;
106 if (exclusion1 != no_reg) {
107 exclusions |= exclusion1.bit();
108 if (exclusion2 != no_reg) {
109 exclusions |= exclusion2.bit();
110 if (exclusion3 != no_reg) {
111 exclusions |= exclusion3.bit();
112 }
113 }
114 }
115
116 RegList list = kJSCallerSaved & ~exclusions;
117 MultiPop(list);
118 bytes += NumRegs(list) * kPointerSize;
119
120 return bytes;
121 }
122
LoadFromConstantsTable(Register destination,int constant_index)123 void TurboAssembler::LoadFromConstantsTable(Register destination,
124 int constant_index) {
125 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
126 Heap::kBuiltinsConstantsTableRootIndex));
127
128 const uint32_t offset =
129 FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
130
131 CHECK(is_uint19(offset));
132 DCHECK_NE(destination, r0);
133 LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
134 LoadP(destination, MemOperand(destination, offset), r1);
135 }
136
LoadRootRelative(Register destination,int32_t offset)137 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
138 LoadP(destination, MemOperand(kRootRegister, offset));
139 }
140
LoadRootRegisterOffset(Register destination,intptr_t offset)141 void TurboAssembler::LoadRootRegisterOffset(Register destination,
142 intptr_t offset) {
143 if (offset == 0) {
144 LoadRR(destination, kRootRegister);
145 } else if (is_uint12(offset)) {
146 la(destination, MemOperand(kRootRegister, offset));
147 } else {
148 DCHECK(is_int20(offset));
149 lay(destination, MemOperand(kRootRegister, offset));
150 }
151 }
152
Jump(Register target,Condition cond)153 void TurboAssembler::Jump(Register target, Condition cond) { b(cond, target); }
154
JumpToJSEntry(Register target)155 void MacroAssembler::JumpToJSEntry(Register target) {
156 Move(ip, target);
157 Jump(ip);
158 }
159
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)160 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
161 Condition cond) {
162 Label skip;
163
164 if (cond != al) b(NegateCondition(cond), &skip);
165
166 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
167
168 mov(ip, Operand(target, rmode));
169 b(ip);
170
171 bind(&skip);
172 }
173
Jump(Address target,RelocInfo::Mode rmode,Condition cond)174 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
175 Condition cond) {
176 DCHECK(!RelocInfo::IsCodeTarget(rmode));
177 Jump(static_cast<intptr_t>(target), rmode, cond);
178 }
179
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)180 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
181 Condition cond) {
182 DCHECK(RelocInfo::IsCodeTarget(rmode));
183 if (FLAG_embedded_builtins) {
184 if (root_array_available_ && options().isolate_independent_code) {
185 Register scratch = r1;
186 IndirectLoadConstant(scratch, code);
187 la(scratch, MemOperand(scratch, Code::kHeaderSize - kHeapObjectTag));
188 b(cond, scratch);
189 return;
190 } else if (options().inline_offheap_trampolines) {
191 int builtin_index = Builtins::kNoBuiltinId;
192 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
193 Builtins::IsIsolateIndependent(builtin_index)) {
194 // Inline the trampoline.
195 RecordCommentForOffHeapTrampoline(builtin_index);
196 EmbeddedData d = EmbeddedData::FromBlob();
197 Address entry = d.InstructionStartOfBuiltin(builtin_index);
198 // Use ip directly instead of using UseScratchRegisterScope, as we do
199 // not preserve scratch registers across calls.
200 mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
201 Jump(ip, cond);
202 return;
203 }
204 }
205 }
206 jump(code, rmode, cond);
207 }
208
Call(Register target)209 void TurboAssembler::Call(Register target) {
210 // Branch to target via indirect branch
211 basr(r14, target);
212 }
213
CallJSEntry(Register target)214 void MacroAssembler::CallJSEntry(Register target) {
215 DCHECK(target == r4);
216 Call(target);
217 }
218
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)219 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
220 RelocInfo::Mode rmode,
221 Condition cond) {
222 // S390 Assembler::move sequence is IILF / IIHF
223 int size;
224 #if V8_TARGET_ARCH_S390X
225 size = 14; // IILF + IIHF + BASR
226 #else
227 size = 8; // IILF + BASR
228 #endif
229 return size;
230 }
231
Call(Address target,RelocInfo::Mode rmode,Condition cond)232 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
233 Condition cond) {
234 DCHECK(cond == al);
235
236 mov(ip, Operand(target, rmode));
237 basr(r14, ip);
238 }
239
Call(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)240 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
241 Condition cond) {
242 DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
243
244 if (FLAG_embedded_builtins) {
245 if (root_array_available_ && options().isolate_independent_code) {
246 // Use ip directly instead of using UseScratchRegisterScope, as we do not
247 // preserve scratch registers across calls.
248 IndirectLoadConstant(ip, code);
249 la(ip, MemOperand(ip, Code::kHeaderSize - kHeapObjectTag));
250 Call(ip);
251 return;
252 } else if (options().inline_offheap_trampolines) {
253 int builtin_index = Builtins::kNoBuiltinId;
254 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
255 Builtins::IsIsolateIndependent(builtin_index)) {
256 // Inline the trampoline.
257 RecordCommentForOffHeapTrampoline(builtin_index);
258 DCHECK(Builtins::IsBuiltinId(builtin_index));
259 EmbeddedData d = EmbeddedData::FromBlob();
260 Address entry = d.InstructionStartOfBuiltin(builtin_index);
261 // Use ip directly instead of using UseScratchRegisterScope, as we do
262 // not preserve scratch registers across calls.
263 mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
264 Call(ip);
265 return;
266 }
267 }
268 }
269 call(code, rmode);
270 }
271
Drop(int count)272 void TurboAssembler::Drop(int count) {
273 if (count > 0) {
274 int total = count * kPointerSize;
275 if (is_uint12(total)) {
276 la(sp, MemOperand(sp, total));
277 } else if (is_int20(total)) {
278 lay(sp, MemOperand(sp, total));
279 } else {
280 AddP(sp, Operand(total));
281 }
282 }
283 }
284
Drop(Register count,Register scratch)285 void TurboAssembler::Drop(Register count, Register scratch) {
286 ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
287 AddP(sp, sp, scratch);
288 }
289
Call(Label * target)290 void TurboAssembler::Call(Label* target) { b(r14, target); }
291
Push(Handle<HeapObject> handle)292 void TurboAssembler::Push(Handle<HeapObject> handle) {
293 mov(r0, Operand(handle));
294 push(r0);
295 }
296
Push(Smi * smi)297 void TurboAssembler::Push(Smi* smi) {
298 mov(r0, Operand(smi));
299 push(r0);
300 }
301
Move(Register dst,Handle<HeapObject> value)302 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
303 if (FLAG_embedded_builtins) {
304 if (root_array_available_ && options().isolate_independent_code) {
305 IndirectLoadConstant(dst, value);
306 return;
307 }
308 }
309 mov(dst, Operand(value));
310 }
311
Move(Register dst,ExternalReference reference)312 void TurboAssembler::Move(Register dst, ExternalReference reference) {
313 if (FLAG_embedded_builtins) {
314 if (root_array_available_ && options().isolate_independent_code) {
315 IndirectLoadExternalReference(dst, reference);
316 return;
317 }
318 }
319 mov(dst, Operand(reference));
320 }
321
Move(Register dst,Register src,Condition cond)322 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
323 if (dst != src) {
324 if (cond == al) {
325 LoadRR(dst, src);
326 } else {
327 LoadOnConditionP(cond, dst, src);
328 }
329 }
330 }
331
Move(DoubleRegister dst,DoubleRegister src)332 void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
333 if (dst != src) {
334 ldr(dst, src);
335 }
336 }
337
338 // Wrapper around Assembler::mvc (SS-a format)
MoveChar(const MemOperand & opnd1,const MemOperand & opnd2,const Operand & length)339 void TurboAssembler::MoveChar(const MemOperand& opnd1,
340 const MemOperand& opnd2,
341 const Operand& length) {
342 mvc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
343 }
344
345 // Wrapper around Assembler::clc (SS-a format)
CompareLogicalChar(const MemOperand & opnd1,const MemOperand & opnd2,const Operand & length)346 void TurboAssembler::CompareLogicalChar(const MemOperand& opnd1,
347 const MemOperand& opnd2,
348 const Operand& length) {
349 clc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
350 }
351
352 // Wrapper around Assembler::xc (SS-a format)
ExclusiveOrChar(const MemOperand & opnd1,const MemOperand & opnd2,const Operand & length)353 void TurboAssembler::ExclusiveOrChar(const MemOperand& opnd1,
354 const MemOperand& opnd2,
355 const Operand& length) {
356 xc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
357 }
358
359 // Wrapper around Assembler::risbg(n) (RIE-f)
RotateInsertSelectBits(Register dst,Register src,const Operand & startBit,const Operand & endBit,const Operand & shiftAmt,bool zeroBits)360 void TurboAssembler::RotateInsertSelectBits(Register dst, Register src,
361 const Operand& startBit, const Operand& endBit,
362 const Operand& shiftAmt, bool zeroBits) {
363 if (zeroBits)
364 // High tag the top bit of I4/EndBit to zero out any unselected bits
365 risbg(dst, src, startBit,
366 Operand(static_cast<intptr_t>(endBit.immediate() | 0x80)), shiftAmt);
367 else
368 risbg(dst, src, startBit, endBit, shiftAmt);
369 }
370
BranchRelativeOnIdxHighP(Register dst,Register inc,Label * L)371 void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
372 Label* L) {
373 #if V8_TARGET_ARCH_S390X
374 brxhg(dst, inc, L);
375 #else
376 brxh(dst, inc, L);
377 #endif // V8_TARGET_ARCH_S390X
378 }
379
MultiPush(RegList regs,Register location)380 void TurboAssembler::MultiPush(RegList regs, Register location) {
381 int16_t num_to_push = base::bits::CountPopulation(regs);
382 int16_t stack_offset = num_to_push * kPointerSize;
383
384 SubP(location, location, Operand(stack_offset));
385 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
386 if ((regs & (1 << i)) != 0) {
387 stack_offset -= kPointerSize;
388 StoreP(ToRegister(i), MemOperand(location, stack_offset));
389 }
390 }
391 }
392
MultiPop(RegList regs,Register location)393 void TurboAssembler::MultiPop(RegList regs, Register location) {
394 int16_t stack_offset = 0;
395
396 for (int16_t i = 0; i < Register::kNumRegisters; i++) {
397 if ((regs & (1 << i)) != 0) {
398 LoadP(ToRegister(i), MemOperand(location, stack_offset));
399 stack_offset += kPointerSize;
400 }
401 }
402 AddP(location, location, Operand(stack_offset));
403 }
404
MultiPushDoubles(RegList dregs,Register location)405 void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
406 int16_t num_to_push = base::bits::CountPopulation(dregs);
407 int16_t stack_offset = num_to_push * kDoubleSize;
408
409 SubP(location, location, Operand(stack_offset));
410 for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
411 if ((dregs & (1 << i)) != 0) {
412 DoubleRegister dreg = DoubleRegister::from_code(i);
413 stack_offset -= kDoubleSize;
414 StoreDouble(dreg, MemOperand(location, stack_offset));
415 }
416 }
417 }
418
MultiPopDoubles(RegList dregs,Register location)419 void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
420 int16_t stack_offset = 0;
421
422 for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
423 if ((dregs & (1 << i)) != 0) {
424 DoubleRegister dreg = DoubleRegister::from_code(i);
425 LoadDouble(dreg, MemOperand(location, stack_offset));
426 stack_offset += kDoubleSize;
427 }
428 }
429 AddP(location, location, Operand(stack_offset));
430 }
431
LoadRoot(Register destination,Heap::RootListIndex index,Condition)432 void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
433 Condition) {
434 LoadP(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), r0);
435 }
436
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)437 void MacroAssembler::RecordWriteField(Register object, int offset,
438 Register value, Register dst,
439 LinkRegisterStatus lr_status,
440 SaveFPRegsMode save_fp,
441 RememberedSetAction remembered_set_action,
442 SmiCheck smi_check) {
443 // First, check if a write barrier is even needed. The tests below
444 // catch stores of Smis.
445 Label done;
446
447 // Skip barrier if writing a smi.
448 if (smi_check == INLINE_SMI_CHECK) {
449 JumpIfSmi(value, &done);
450 }
451
452 // Although the object register is tagged, the offset is relative to the start
453 // of the object, so so offset must be a multiple of kPointerSize.
454 DCHECK(IsAligned(offset, kPointerSize));
455
456 lay(dst, MemOperand(object, offset - kHeapObjectTag));
457 if (emit_debug_code()) {
458 Label ok;
459 AndP(r0, dst, Operand(kPointerSize - 1));
460 beq(&ok, Label::kNear);
461 stop("Unaligned cell in write barrier");
462 bind(&ok);
463 }
464
465 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
466 OMIT_SMI_CHECK);
467
468 bind(&done);
469
470 // Clobber clobbered input registers when running with the debug-code flag
471 // turned on to provoke errors.
472 if (emit_debug_code()) {
473 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
474 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
475 }
476 }
477
SaveRegisters(RegList registers)478 void TurboAssembler::SaveRegisters(RegList registers) {
479 DCHECK_GT(NumRegs(registers), 0);
480 RegList regs = 0;
481 for (int i = 0; i < Register::kNumRegisters; ++i) {
482 if ((registers >> i) & 1u) {
483 regs |= Register::from_code(i).bit();
484 }
485 }
486 MultiPush(regs);
487 }
488
RestoreRegisters(RegList registers)489 void TurboAssembler::RestoreRegisters(RegList registers) {
490 DCHECK_GT(NumRegs(registers), 0);
491 RegList regs = 0;
492 for (int i = 0; i < Register::kNumRegisters; ++i) {
493 if ((registers >> i) & 1u) {
494 regs |= Register::from_code(i).bit();
495 }
496 }
497 MultiPop(regs);
498 }
499
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)500 void TurboAssembler::CallRecordWriteStub(
501 Register object, Register address,
502 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
503 // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
504 // i.e. always emit remember set and save FP registers in RecordWriteStub. If
505 // large performance regression is observed, we should use these values to
506 // avoid unnecessary work.
507
508 Callable const callable =
509 Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
510 RegList registers = callable.descriptor().allocatable_registers();
511
512 SaveRegisters(registers);
513 Register object_parameter(callable.descriptor().GetRegisterParameter(
514 RecordWriteDescriptor::kObject));
515 Register slot_parameter(
516 callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
517 Register isolate_parameter(callable.descriptor().GetRegisterParameter(
518 RecordWriteDescriptor::kIsolate));
519 Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
520 RecordWriteDescriptor::kRememberedSet));
521 Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
522 RecordWriteDescriptor::kFPMode));
523
524 Push(object);
525 Push(address);
526
527 Pop(slot_parameter);
528 Pop(object_parameter);
529
530 Move(isolate_parameter, ExternalReference::isolate_address(isolate()));
531 Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
532 Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
533 Call(callable.code(), RelocInfo::CODE_TARGET);
534
535 RestoreRegisters(registers);
536 }
537
538 // Will clobber 4 registers: object, address, scratch, ip. The
539 // register 'object' contains a heap object pointer. The heap object
540 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)541 void MacroAssembler::RecordWrite(Register object, Register address,
542 Register value, LinkRegisterStatus lr_status,
543 SaveFPRegsMode fp_mode,
544 RememberedSetAction remembered_set_action,
545 SmiCheck smi_check) {
546 DCHECK(object != value);
547 if (emit_debug_code()) {
548 CmpP(value, MemOperand(address));
549 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
550 }
551
552 if (remembered_set_action == OMIT_REMEMBERED_SET &&
553 !FLAG_incremental_marking) {
554 return;
555 }
556 // First, check if a write barrier is even needed. The tests below
557 // catch stores of smis and stores into the young generation.
558 Label done;
559
560 if (smi_check == INLINE_SMI_CHECK) {
561 JumpIfSmi(value, &done);
562 }
563
564 CheckPageFlag(value,
565 value, // Used as scratch.
566 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
567 CheckPageFlag(object,
568 value, // Used as scratch.
569 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
570
571 // Record the actual write.
572 if (lr_status == kLRHasNotBeenSaved) {
573 push(r14);
574 }
575 CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
576 if (lr_status == kLRHasNotBeenSaved) {
577 pop(r14);
578 }
579
580 bind(&done);
581
582 // Count number of write barriers in generated code.
583 isolate()->counters()->write_barriers_static()->Increment();
584 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
585 value);
586
587 // Clobber clobbered registers when running with the debug-code flag
588 // turned on to provoke errors.
589 if (emit_debug_code()) {
590 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
591 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
592 }
593 }
594
PushCommonFrame(Register marker_reg)595 void TurboAssembler::PushCommonFrame(Register marker_reg) {
596 int fp_delta = 0;
597 CleanseP(r14);
598 if (marker_reg.is_valid()) {
599 Push(r14, fp, marker_reg);
600 fp_delta = 1;
601 } else {
602 Push(r14, fp);
603 fp_delta = 0;
604 }
605 la(fp, MemOperand(sp, fp_delta * kPointerSize));
606 }
607
PopCommonFrame(Register marker_reg)608 void TurboAssembler::PopCommonFrame(Register marker_reg) {
609 if (marker_reg.is_valid()) {
610 Pop(r14, fp, marker_reg);
611 } else {
612 Pop(r14, fp);
613 }
614 }
615
PushStandardFrame(Register function_reg)616 void TurboAssembler::PushStandardFrame(Register function_reg) {
617 int fp_delta = 0;
618 CleanseP(r14);
619 if (function_reg.is_valid()) {
620 Push(r14, fp, cp, function_reg);
621 fp_delta = 2;
622 } else {
623 Push(r14, fp, cp);
624 fp_delta = 1;
625 }
626 la(fp, MemOperand(sp, fp_delta * kPointerSize));
627 }
628
RestoreFrameStateForTailCall()629 void TurboAssembler::RestoreFrameStateForTailCall() {
630 // if (FLAG_enable_embedded_constant_pool) {
631 // LoadP(kConstantPoolRegister,
632 // MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
633 // set_constant_pool_available(false);
634 // }
635 DCHECK(!FLAG_enable_embedded_constant_pool);
636 LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
637 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
638 }
639
640 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()641 void MacroAssembler::PushSafepointRegisters() {
642 // Safepoints expect a block of kNumSafepointRegisters values on the
643 // stack, so adjust the stack for unsaved registers.
644 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
645 DCHECK_GE(num_unsaved, 0);
646 if (num_unsaved > 0) {
647 lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize)));
648 }
649 MultiPush(kSafepointSavedRegisters);
650 }
651
PopSafepointRegisters()652 void MacroAssembler::PopSafepointRegisters() {
653 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
654 MultiPop(kSafepointSavedRegisters);
655 if (num_unsaved > 0) {
656 la(sp, MemOperand(sp, num_unsaved * kPointerSize));
657 }
658 }
659
SafepointRegisterStackIndex(int reg_code)660 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
661 // The registers are pushed starting with the highest encoding,
662 // which means that lowest encodings are closest to the stack pointer.
663 RegList regs = kSafepointSavedRegisters;
664 int index = 0;
665
666 DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
667
668 for (int16_t i = 0; i < reg_code; i++) {
669 if ((regs & (1 << i)) != 0) {
670 index++;
671 }
672 }
673
674 return index;
675 }
676
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)677 void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
678 const DoubleRegister src) {
679 // Turn potential sNaN into qNaN
680 if (dst != src) ldr(dst, src);
681 lzdr(kDoubleRegZero);
682 sdbr(dst, kDoubleRegZero);
683 }
684
ConvertIntToDouble(DoubleRegister dst,Register src)685 void TurboAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
686 cdfbr(dst, src);
687 }
688
ConvertUnsignedIntToDouble(DoubleRegister dst,Register src)689 void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
690 Register src) {
691 if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
692 cdlfbr(Condition(5), Condition(0), dst, src);
693 } else {
694 // zero-extend src
695 llgfr(src, src);
696 // convert to double
697 cdgbr(dst, src);
698 }
699 }
700
ConvertIntToFloat(DoubleRegister dst,Register src)701 void TurboAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
702 cefbra(Condition(4), dst, src);
703 }
704
ConvertUnsignedIntToFloat(DoubleRegister dst,Register src)705 void TurboAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
706 Register src) {
707 celfbr(Condition(4), Condition(0), dst, src);
708 }
709
ConvertInt64ToFloat(DoubleRegister double_dst,Register src)710 void TurboAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
711 Register src) {
712 cegbr(double_dst, src);
713 }
714
ConvertInt64ToDouble(DoubleRegister double_dst,Register src)715 void TurboAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
716 Register src) {
717 cdgbr(double_dst, src);
718 }
719
ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,Register src)720 void TurboAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
721 Register src) {
722 celgbr(Condition(0), Condition(0), double_dst, src);
723 }
724
ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,Register src)725 void TurboAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
726 Register src) {
727 cdlgbr(Condition(0), Condition(0), double_dst, src);
728 }
729
ConvertFloat32ToInt64(const Register dst,const DoubleRegister double_input,FPRoundingMode rounding_mode)730 void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
731 const DoubleRegister double_input,
732 FPRoundingMode rounding_mode) {
733 Condition m = Condition(0);
734 switch (rounding_mode) {
735 case kRoundToZero:
736 m = Condition(5);
737 break;
738 case kRoundToNearest:
739 UNIMPLEMENTED();
740 break;
741 case kRoundToPlusInf:
742 m = Condition(6);
743 break;
744 case kRoundToMinusInf:
745 m = Condition(7);
746 break;
747 default:
748 UNIMPLEMENTED();
749 break;
750 }
751 cgebr(m, dst, double_input);
752 }
753
ConvertDoubleToInt64(const Register dst,const DoubleRegister double_input,FPRoundingMode rounding_mode)754 void TurboAssembler::ConvertDoubleToInt64(const Register dst,
755 const DoubleRegister double_input,
756 FPRoundingMode rounding_mode) {
757 Condition m = Condition(0);
758 switch (rounding_mode) {
759 case kRoundToZero:
760 m = Condition(5);
761 break;
762 case kRoundToNearest:
763 UNIMPLEMENTED();
764 break;
765 case kRoundToPlusInf:
766 m = Condition(6);
767 break;
768 case kRoundToMinusInf:
769 m = Condition(7);
770 break;
771 default:
772 UNIMPLEMENTED();
773 break;
774 }
775 cgdbr(m, dst, double_input);
776 }
777
ConvertDoubleToInt32(const Register dst,const DoubleRegister double_input,FPRoundingMode rounding_mode)778 void TurboAssembler::ConvertDoubleToInt32(const Register dst,
779 const DoubleRegister double_input,
780 FPRoundingMode rounding_mode) {
781 Condition m = Condition(0);
782 switch (rounding_mode) {
783 case kRoundToZero:
784 m = Condition(5);
785 break;
786 case kRoundToNearest:
787 m = Condition(4);
788 break;
789 case kRoundToPlusInf:
790 m = Condition(6);
791 break;
792 case kRoundToMinusInf:
793 m = Condition(7);
794 break;
795 default:
796 UNIMPLEMENTED();
797 break;
798 }
799 #ifdef V8_TARGET_ARCH_S390X
800 lghi(dst, Operand::Zero());
801 #endif
802 cfdbr(m, dst, double_input);
803 }
804
ConvertFloat32ToInt32(const Register result,const DoubleRegister double_input,FPRoundingMode rounding_mode)805 void TurboAssembler::ConvertFloat32ToInt32(const Register result,
806 const DoubleRegister double_input,
807 FPRoundingMode rounding_mode) {
808 Condition m = Condition(0);
809 switch (rounding_mode) {
810 case kRoundToZero:
811 m = Condition(5);
812 break;
813 case kRoundToNearest:
814 m = Condition(4);
815 break;
816 case kRoundToPlusInf:
817 m = Condition(6);
818 break;
819 case kRoundToMinusInf:
820 m = Condition(7);
821 break;
822 default:
823 UNIMPLEMENTED();
824 break;
825 }
826 #ifdef V8_TARGET_ARCH_S390X
827 lghi(result, Operand::Zero());
828 #endif
829 cfebr(m, result, double_input);
830 }
831
ConvertFloat32ToUnsignedInt32(const Register result,const DoubleRegister double_input,FPRoundingMode rounding_mode)832 void TurboAssembler::ConvertFloat32ToUnsignedInt32(
833 const Register result, const DoubleRegister double_input,
834 FPRoundingMode rounding_mode) {
835 Condition m = Condition(0);
836 switch (rounding_mode) {
837 case kRoundToZero:
838 m = Condition(5);
839 break;
840 case kRoundToNearest:
841 UNIMPLEMENTED();
842 break;
843 case kRoundToPlusInf:
844 m = Condition(6);
845 break;
846 case kRoundToMinusInf:
847 m = Condition(7);
848 break;
849 default:
850 UNIMPLEMENTED();
851 break;
852 }
853 #ifdef V8_TARGET_ARCH_S390X
854 lghi(result, Operand::Zero());
855 #endif
856 clfebr(m, Condition(0), result, double_input);
857 }
858
ConvertFloat32ToUnsignedInt64(const Register result,const DoubleRegister double_input,FPRoundingMode rounding_mode)859 void TurboAssembler::ConvertFloat32ToUnsignedInt64(
860 const Register result, const DoubleRegister double_input,
861 FPRoundingMode rounding_mode) {
862 Condition m = Condition(0);
863 switch (rounding_mode) {
864 case kRoundToZero:
865 m = Condition(5);
866 break;
867 case kRoundToNearest:
868 UNIMPLEMENTED();
869 break;
870 case kRoundToPlusInf:
871 m = Condition(6);
872 break;
873 case kRoundToMinusInf:
874 m = Condition(7);
875 break;
876 default:
877 UNIMPLEMENTED();
878 break;
879 }
880 clgebr(m, Condition(0), result, double_input);
881 }
882
ConvertDoubleToUnsignedInt64(const Register dst,const DoubleRegister double_input,FPRoundingMode rounding_mode)883 void TurboAssembler::ConvertDoubleToUnsignedInt64(
884 const Register dst, const DoubleRegister double_input,
885 FPRoundingMode rounding_mode) {
886 Condition m = Condition(0);
887 switch (rounding_mode) {
888 case kRoundToZero:
889 m = Condition(5);
890 break;
891 case kRoundToNearest:
892 UNIMPLEMENTED();
893 break;
894 case kRoundToPlusInf:
895 m = Condition(6);
896 break;
897 case kRoundToMinusInf:
898 m = Condition(7);
899 break;
900 default:
901 UNIMPLEMENTED();
902 break;
903 }
904 clgdbr(m, Condition(0), dst, double_input);
905 }
906
ConvertDoubleToUnsignedInt32(const Register dst,const DoubleRegister double_input,FPRoundingMode rounding_mode)907 void TurboAssembler::ConvertDoubleToUnsignedInt32(
908 const Register dst, const DoubleRegister double_input,
909 FPRoundingMode rounding_mode) {
910 Condition m = Condition(0);
911 switch (rounding_mode) {
912 case kRoundToZero:
913 m = Condition(5);
914 break;
915 case kRoundToNearest:
916 UNIMPLEMENTED();
917 break;
918 case kRoundToPlusInf:
919 m = Condition(6);
920 break;
921 case kRoundToMinusInf:
922 m = Condition(7);
923 break;
924 default:
925 UNIMPLEMENTED();
926 break;
927 }
928 #ifdef V8_TARGET_ARCH_S390X
929 lghi(dst, Operand::Zero());
930 #endif
931 clfdbr(m, Condition(0), dst, double_input);
932 }
933
934 #if !V8_TARGET_ARCH_S390X
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)935 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
936 Register src_low, Register src_high,
937 Register scratch, Register shift) {
938 LoadRR(r0, src_high);
939 LoadRR(r1, src_low);
940 sldl(r0, shift, Operand::Zero());
941 LoadRR(dst_high, r0);
942 LoadRR(dst_low, r1);
943 }
944
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)945 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
946 Register src_low, Register src_high,
947 uint32_t shift) {
948 LoadRR(r0, src_high);
949 LoadRR(r1, src_low);
950 sldl(r0, r0, Operand(shift));
951 LoadRR(dst_high, r0);
952 LoadRR(dst_low, r1);
953 }
954
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)955 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
956 Register src_low, Register src_high,
957 Register scratch, Register shift) {
958 LoadRR(r0, src_high);
959 LoadRR(r1, src_low);
960 srdl(r0, shift, Operand::Zero());
961 LoadRR(dst_high, r0);
962 LoadRR(dst_low, r1);
963 }
964
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)965 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
966 Register src_low, Register src_high,
967 uint32_t shift) {
968 LoadRR(r0, src_high);
969 LoadRR(r1, src_low);
970 srdl(r0, Operand(shift));
971 LoadRR(dst_high, r0);
972 LoadRR(dst_low, r1);
973 }
974
ShiftRightArithPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)975 void TurboAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
976 Register src_low, Register src_high,
977 Register scratch, Register shift) {
978 LoadRR(r0, src_high);
979 LoadRR(r1, src_low);
980 srda(r0, shift, Operand::Zero());
981 LoadRR(dst_high, r0);
982 LoadRR(dst_low, r1);
983 }
984
ShiftRightArithPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)985 void TurboAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
986 Register src_low, Register src_high,
987 uint32_t shift) {
988 LoadRR(r0, src_high);
989 LoadRR(r1, src_low);
990 srda(r0, r0, Operand(shift));
991 LoadRR(dst_high, r0);
992 LoadRR(dst_low, r1);
993 }
994 #endif
995
MovDoubleToInt64(Register dst,DoubleRegister src)996 void TurboAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
997 lgdr(dst, src);
998 }
999
MovInt64ToDouble(DoubleRegister dst,Register src)1000 void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
1001 ldgr(dst, src);
1002 }
1003
StubPrologue(StackFrame::Type type,Register base,int prologue_offset)1004 void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
1005 int prologue_offset) {
1006 {
1007 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1008 Load(r1, Operand(StackFrame::TypeToMarker(type)));
1009 PushCommonFrame(r1);
1010 }
1011 }
1012
Prologue(Register base,int prologue_offset)1013 void TurboAssembler::Prologue(Register base, int prologue_offset) {
1014 DCHECK(base != no_reg);
1015 PushStandardFrame(r3);
1016 }
1017
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1018 void TurboAssembler::EnterFrame(StackFrame::Type type,
1019 bool load_constant_pool_pointer_reg) {
1020 // We create a stack frame with:
1021 // Return Addr <-- old sp
1022 // Old FP <-- new fp
1023 // CP
1024 // type
1025 // CodeObject <-- new sp
1026
1027 Load(ip, Operand(StackFrame::TypeToMarker(type)));
1028 PushCommonFrame(ip);
1029 }
1030
LeaveFrame(StackFrame::Type type,int stack_adjustment)1031 int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1032 // Drop the execution stack down to the frame pointer and restore
1033 // the caller frame pointer, return address and constant pool pointer.
1034 LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1035 if (is_int20(StandardFrameConstants::kCallerSPOffset + stack_adjustment)) {
1036 lay(r1, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
1037 stack_adjustment));
1038 } else {
1039 AddP(r1, fp,
1040 Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment));
1041 }
1042 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1043 LoadRR(sp, r1);
1044 int frame_ends = pc_offset();
1045 return frame_ends;
1046 }
1047
1048 // ExitFrame layout (probably wrongish.. needs updating)
1049 //
1050 // SP -> previousSP
1051 // LK reserved
1052 // code
1053 // sp_on_exit (for debug?)
1054 // oldSP->prev SP
1055 // LK
1056 // <parameters on stack>
1057
1058 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1059 // on the stack that we need to wrap a real frame around.. so first
1060 // we reserve a slot for LK and push the previous SP which is captured
1061 // in the fp register (r11)
1062 // Then - we buy a new frame
1063
1064 // r14
1065 // oldFP <- newFP
1066 // SP
1067 // Code
1068 // Floats
1069 // gaps
1070 // Args
1071 // ABIRes <- newSP
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1072 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1073 StackFrame::Type frame_type) {
1074 DCHECK(frame_type == StackFrame::EXIT ||
1075 frame_type == StackFrame::BUILTIN_EXIT);
1076 // Set up the frame structure on the stack.
1077 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1078 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1079 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1080 DCHECK_GT(stack_space, 0);
1081
1082 // This is an opportunity to build a frame to wrap
1083 // all of the pushes that have happened inside of V8
1084 // since we were called from C code
1085 CleanseP(r14);
1086 Load(r1, Operand(StackFrame::TypeToMarker(frame_type)));
1087 PushCommonFrame(r1);
1088 // Reserve room for saved entry sp and code object.
1089 lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
1090
1091 if (emit_debug_code()) {
1092 StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
1093 }
1094 Move(r1, CodeObject());
1095 StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1096
1097 // Save the frame pointer and the context in top.
1098 Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1099 isolate()));
1100 StoreP(fp, MemOperand(r1));
1101 Move(r1,
1102 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1103 StoreP(cp, MemOperand(r1));
1104
1105 // Optionally save all volatile double registers.
1106 if (save_doubles) {
1107 MultiPushDoubles(kCallerSavedDoubles);
1108 // Note that d0 will be accessible at
1109 // fp - ExitFrameConstants::kFrameSize -
1110 // kNumCallerSavedDoubles * kDoubleSize,
1111 // since the sp slot and code slot were pushed after the fp.
1112 }
1113
1114 lay(sp, MemOperand(sp, -stack_space * kPointerSize));
1115
1116 // Allocate and align the frame preparing for calling the runtime
1117 // function.
1118 const int frame_alignment = TurboAssembler::ActivationFrameAlignment();
1119 if (frame_alignment > 0) {
1120 DCHECK_EQ(frame_alignment, 8);
1121 ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
1122 }
1123
1124 lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1125 StoreP(MemOperand(sp), Operand::Zero(), r0);
1126 // Set the exit frame sp value to point just before the return address
1127 // location.
1128 lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
1129 StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
1130 }
1131
ActivationFrameAlignment()1132 int TurboAssembler::ActivationFrameAlignment() {
1133 #if !defined(USE_SIMULATOR)
1134 // Running on the real platform. Use the alignment as mandated by the local
1135 // environment.
1136 // Note: This will break if we ever start generating snapshots on one S390
1137 // platform for another S390 platform with a different alignment.
1138 return base::OS::ActivationFrameAlignment();
1139 #else // Simulated
1140 // If we are using the simulator then we should always align to the expected
1141 // alignment. As the simulator is used to generate snapshots we do not know
1142 // if the target platform will need alignment, so this is controlled from a
1143 // flag.
1144 return FLAG_sim_stack_alignment;
1145 #endif
1146 }
1147
LeaveExitFrame(bool save_doubles,Register argument_count,bool argument_count_is_length)1148 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1149 bool argument_count_is_length) {
1150 // Optionally restore all double registers.
1151 if (save_doubles) {
1152 // Calculate the stack location of the saved doubles and restore them.
1153 const int kNumRegs = kNumCallerSavedDoubles;
1154 lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
1155 kNumRegs * kDoubleSize)));
1156 MultiPopDoubles(kCallerSavedDoubles, r5);
1157 }
1158
1159 // Clear top frame.
1160 Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1161 isolate()));
1162 StoreP(MemOperand(ip), Operand(0, RelocInfo::NONE), r0);
1163
1164 // Restore current context from top and clear it in debug mode.
1165 Move(ip,
1166 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1167 LoadP(cp, MemOperand(ip));
1168
1169 #ifdef DEBUG
1170 mov(r1, Operand(Context::kInvalidContext));
1171 Move(ip,
1172 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1173 StoreP(r1, MemOperand(ip));
1174 #endif
1175
1176 // Tear down the exit frame, pop the arguments, and return.
1177 LeaveFrame(StackFrame::EXIT);
1178
1179 if (argument_count.is_valid()) {
1180 if (!argument_count_is_length) {
1181 ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
1182 }
1183 la(sp, MemOperand(sp, argument_count));
1184 }
1185 }
1186
MovFromFloatResult(const DoubleRegister dst)1187 void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
1188 Move(dst, d0);
1189 }
1190
MovFromFloatParameter(const DoubleRegister dst)1191 void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1192 Move(dst, d0);
1193 }
1194
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1195 void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1196 Register caller_args_count_reg,
1197 Register scratch0, Register scratch1) {
1198 #if DEBUG
1199 if (callee_args_count.is_reg()) {
1200 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1201 scratch1));
1202 } else {
1203 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1204 }
1205 #endif
1206
1207 // Calculate the end of destination area where we will put the arguments
1208 // after we drop current frame. We AddP kPointerSize to count the receiver
1209 // argument which is not included into formal parameters count.
1210 Register dst_reg = scratch0;
1211 ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1212 AddP(dst_reg, fp, dst_reg);
1213 AddP(dst_reg, dst_reg,
1214 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1215
1216 Register src_reg = caller_args_count_reg;
1217 // Calculate the end of source area. +kPointerSize is for the receiver.
1218 if (callee_args_count.is_reg()) {
1219 ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1220 AddP(src_reg, sp, src_reg);
1221 AddP(src_reg, src_reg, Operand(kPointerSize));
1222 } else {
1223 mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
1224 AddP(src_reg, src_reg, sp);
1225 }
1226
1227 if (FLAG_debug_code) {
1228 CmpLogicalP(src_reg, dst_reg);
1229 Check(lt, AbortReason::kStackAccessBelowStackPointer);
1230 }
1231
1232 // Restore caller's frame pointer and return address now as they will be
1233 // overwritten by the copying loop.
1234 RestoreFrameStateForTailCall();
1235
1236 // Now copy callee arguments to the caller frame going backwards to avoid
1237 // callee arguments corruption (source and destination areas could overlap).
1238
1239 // Both src_reg and dst_reg are pointing to the word after the one to copy,
1240 // so they must be pre-decremented in the loop.
1241 Register tmp_reg = scratch1;
1242 Label loop;
1243 if (callee_args_count.is_reg()) {
1244 AddP(tmp_reg, callee_args_count.reg(), Operand(1)); // +1 for receiver
1245 } else {
1246 mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1247 }
1248 LoadRR(r1, tmp_reg);
1249 bind(&loop);
1250 LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
1251 StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1252 lay(src_reg, MemOperand(src_reg, -kPointerSize));
1253 lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
1254 BranchOnCount(r1, &loop);
1255
1256 // Leave current frame.
1257 LoadRR(sp, dst_reg);
1258 }
1259
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag)1260 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1261 const ParameterCount& actual, Label* done,
1262 bool* definitely_mismatches,
1263 InvokeFlag flag) {
1264 bool definitely_matches = false;
1265 *definitely_mismatches = false;
1266 Label regular_invoke;
1267
1268 // Check whether the expected and actual arguments count match. If not,
1269 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1270 // r2: actual arguments count
1271 // r3: function (passed through to callee)
1272 // r4: expected arguments count
1273
1274 // The code below is made a lot easier because the calling code already sets
1275 // up actual and expected registers according to the contract if values are
1276 // passed in registers.
1277
1278 // ARM has some sanity checks as per below, considering add them for S390
1279 DCHECK(actual.is_immediate() || actual.reg() == r2);
1280 DCHECK(expected.is_immediate() || expected.reg() == r4);
1281
1282 if (expected.is_immediate()) {
1283 DCHECK(actual.is_immediate());
1284 mov(r2, Operand(actual.immediate()));
1285 if (expected.immediate() == actual.immediate()) {
1286 definitely_matches = true;
1287 } else {
1288 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1289 if (expected.immediate() == sentinel) {
1290 // Don't worry about adapting arguments for builtins that
1291 // don't want that done. Skip adaption code by making it look
1292 // like we have a match between expected and actual number of
1293 // arguments.
1294 definitely_matches = true;
1295 } else {
1296 *definitely_mismatches = true;
1297 mov(r4, Operand(expected.immediate()));
1298 }
1299 }
1300 } else {
1301 if (actual.is_immediate()) {
1302 mov(r2, Operand(actual.immediate()));
1303 CmpPH(expected.reg(), Operand(actual.immediate()));
1304 beq(®ular_invoke);
1305 } else {
1306 CmpP(expected.reg(), actual.reg());
1307 beq(®ular_invoke);
1308 }
1309 }
1310
1311 if (!definitely_matches) {
1312 Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1313 if (flag == CALL_FUNCTION) {
1314 Call(adaptor);
1315 if (!*definitely_mismatches) {
1316 b(done);
1317 }
1318 } else {
1319 Jump(adaptor, RelocInfo::CODE_TARGET);
1320 }
1321 bind(®ular_invoke);
1322 }
1323 }
1324
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1325 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1326 const ParameterCount& expected,
1327 const ParameterCount& actual) {
1328 Label skip_hook;
1329
1330 ExternalReference debug_hook_active =
1331 ExternalReference::debug_hook_on_function_call_address(isolate());
1332 Move(r6, debug_hook_active);
1333 tm(MemOperand(r6), Operand::Zero());
1334 bne(&skip_hook);
1335
1336 {
1337 // Load receiver to pass it later to DebugOnFunctionCall hook.
1338 if (actual.is_reg()) {
1339 LoadRR(r6, actual.reg());
1340 } else {
1341 mov(r6, Operand(actual.immediate()));
1342 }
1343 ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
1344 LoadP(r6, MemOperand(sp, r6));
1345 FrameScope frame(this,
1346 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1347 if (expected.is_reg()) {
1348 SmiTag(expected.reg());
1349 Push(expected.reg());
1350 }
1351 if (actual.is_reg()) {
1352 SmiTag(actual.reg());
1353 Push(actual.reg());
1354 }
1355 if (new_target.is_valid()) {
1356 Push(new_target);
1357 }
1358 Push(fun, fun, r6);
1359 CallRuntime(Runtime::kDebugOnFunctionCall);
1360 Pop(fun);
1361 if (new_target.is_valid()) {
1362 Pop(new_target);
1363 }
1364 if (actual.is_reg()) {
1365 Pop(actual.reg());
1366 SmiUntag(actual.reg());
1367 }
1368 if (expected.is_reg()) {
1369 Pop(expected.reg());
1370 SmiUntag(expected.reg());
1371 }
1372 }
1373 bind(&skip_hook);
1374 }
1375
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)1376 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1377 const ParameterCount& expected,
1378 const ParameterCount& actual,
1379 InvokeFlag flag) {
1380 // You can't call a function without a valid frame.
1381 DCHECK(flag == JUMP_FUNCTION || has_frame());
1382
1383 DCHECK(function == r3);
1384 DCHECK_IMPLIES(new_target.is_valid(), new_target == r5);
1385
1386 // On function call, call into the debugger if necessary.
1387 CheckDebugHook(function, new_target, expected, actual);
1388
1389 // Clear the new.target register if not given.
1390 if (!new_target.is_valid()) {
1391 LoadRoot(r5, Heap::kUndefinedValueRootIndex);
1392 }
1393
1394 Label done;
1395 bool definitely_mismatches = false;
1396 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
1397 if (!definitely_mismatches) {
1398 // We call indirectly through the code field in the function to
1399 // allow recompilation to take effect without changing any of the
1400 // call sites.
1401 Register code = kJavaScriptCallCodeStartRegister;
1402 LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
1403 AddP(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
1404 if (flag == CALL_FUNCTION) {
1405 CallJSEntry(code);
1406 } else {
1407 DCHECK(flag == JUMP_FUNCTION);
1408 JumpToJSEntry(code);
1409 }
1410
1411 // Continue here if InvokePrologue does handle the invocation due to
1412 // mismatched parameter counts.
1413 bind(&done);
1414 }
1415 }
1416
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag)1417 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1418 const ParameterCount& actual,
1419 InvokeFlag flag) {
1420 // You can't call a function without a valid frame.
1421 DCHECK(flag == JUMP_FUNCTION || has_frame());
1422
1423 // Contract with called JS functions requires that function is passed in r3.
1424 DCHECK(fun == r3);
1425
1426 Register expected_reg = r4;
1427 Register temp_reg = r6;
1428 LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
1429 LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1430 LoadLogicalHalfWordP(
1431 expected_reg,
1432 FieldMemOperand(temp_reg,
1433 SharedFunctionInfo::kFormalParameterCountOffset));
1434
1435 ParameterCount expected(expected_reg);
1436 InvokeFunctionCode(fun, new_target, expected, actual, flag);
1437 }
1438
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)1439 void MacroAssembler::InvokeFunction(Register function,
1440 const ParameterCount& expected,
1441 const ParameterCount& actual,
1442 InvokeFlag flag) {
1443 // You can't call a function without a valid frame.
1444 DCHECK(flag == JUMP_FUNCTION || has_frame());
1445
1446 // Contract with called JS functions requires that function is passed in r3.
1447 DCHECK(function == r3);
1448
1449 // Get the function and setup the context.
1450 LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1451
1452 InvokeFunctionCode(r3, no_reg, expected, actual, flag);
1453 }
1454
MaybeDropFrames()1455 void MacroAssembler::MaybeDropFrames() {
1456 // Check whether we need to drop frames to restart a function on the stack.
1457 ExternalReference restart_fp =
1458 ExternalReference::debug_restart_fp_address(isolate());
1459 Move(r3, restart_fp);
1460 LoadP(r3, MemOperand(r3));
1461 CmpP(r3, Operand::Zero());
1462 Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1463 ne);
1464 }
1465
PushStackHandler()1466 void MacroAssembler::PushStackHandler() {
1467 // Adjust this code if not the case.
1468 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1469 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1470
1471 // Link the current handler as the next handler.
1472 mov(r7, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1473 isolate())));
1474
1475 // Buy the full stack frame for 5 slots.
1476 lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
1477
1478 // Store padding.
1479 mov(r0, Operand(Smi::kZero));
1480 StoreP(r0, MemOperand(sp)); // Padding.
1481
1482 // Copy the old handler into the next handler slot.
1483 MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
1484 Operand(kPointerSize));
1485 // Set this new handler as the current one.
1486 StoreP(sp, MemOperand(r7));
1487 }
1488
PopStackHandler()1489 void MacroAssembler::PopStackHandler() {
1490 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1491 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1492
1493 // Pop the Next Handler into r3 and store it into Handler Address reference.
1494 Pop(r3);
1495 mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1496 isolate())));
1497 StoreP(r3, MemOperand(ip));
1498
1499 Drop(1); // Drop padding.
1500 }
1501
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1502 void MacroAssembler::CompareObjectType(Register object, Register map,
1503 Register type_reg, InstanceType type) {
1504 const Register temp = type_reg == no_reg ? r0 : type_reg;
1505
1506 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1507 CompareInstanceType(map, temp, type);
1508 }
1509
CompareInstanceType(Register map,Register type_reg,InstanceType type)1510 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1511 InstanceType type) {
1512 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1513 STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
1514 LoadHalfWordP(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1515 CmpP(type_reg, Operand(type));
1516 }
1517
CompareRoot(Register obj,Heap::RootListIndex index)1518 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1519 CmpP(obj, MemOperand(kRootRegister, RootRegisterOffset(index)));
1520 }
1521
CallStub(CodeStub * stub,Condition cond)1522 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
1523 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1524 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1525 }
1526
CallStubDelayed(CodeStub * stub)1527 void TurboAssembler::CallStubDelayed(CodeStub* stub) {
1528 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1529 call(stub);
1530 }
1531
TailCallStub(CodeStub * stub,Condition cond)1532 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1533 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1534 }
1535
AllowThisStubCall(CodeStub * stub)1536 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
1537 return has_frame_ || !stub->SometimesSetsUpAFrame();
1538 }
1539
TryDoubleToInt32Exact(Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch)1540 void MacroAssembler::TryDoubleToInt32Exact(Register result,
1541 DoubleRegister double_input,
1542 Register scratch,
1543 DoubleRegister double_scratch) {
1544 Label done;
1545 DCHECK(double_input != double_scratch);
1546
1547 ConvertDoubleToInt64(result, double_input);
1548
1549 TestIfInt32(result);
1550 bne(&done);
1551
1552 // convert back and compare
1553 cdfbr(double_scratch, result);
1554 cdbr(double_scratch, double_input);
1555 bind(&done);
1556 }
1557
TruncateDoubleToI(Isolate * isolate,Zone * zone,Register result,DoubleRegister double_input,StubCallMode stub_mode)1558 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1559 Register result,
1560 DoubleRegister double_input,
1561 StubCallMode stub_mode) {
1562 Label done;
1563
1564 TryInlineTruncateDoubleToI(result, double_input, &done);
1565
1566 // If we fell through then inline version didn't succeed - call stub instead.
1567 push(r14);
1568 // Put input on stack.
1569 lay(sp, MemOperand(sp, -kDoubleSize));
1570 StoreDouble(double_input, MemOperand(sp));
1571
1572 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1573 Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1574 } else {
1575 Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1576 }
1577
1578 LoadP(result, MemOperand(sp, 0));
1579 la(sp, MemOperand(sp, kDoubleSize));
1580 pop(r14);
1581
1582 bind(&done);
1583 }
1584
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)1585 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1586 DoubleRegister double_input,
1587 Label* done) {
1588 ConvertDoubleToInt64(result, double_input);
1589
1590 // Test for overflow
1591 TestIfInt32(result);
1592 beq(done);
1593 }
1594
CallRuntimeWithCEntry(Runtime::FunctionId fid,Register centry)1595 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
1596 Register centry) {
1597 const Runtime::Function* f = Runtime::FunctionForId(fid);
1598 mov(r2, Operand(f->nargs));
1599 Move(r3, ExternalReference::Create(f));
1600 DCHECK(!AreAliased(centry, r2, r3));
1601 la(centry, MemOperand(centry, Code::kHeaderSize - kHeapObjectTag));
1602 Call(centry);
1603 }
1604
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1605 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1606 SaveFPRegsMode save_doubles) {
1607 // All parameters are on the stack. r2 has the return value after call.
1608
1609 // If the expected number of arguments of the runtime function is
1610 // constant, we check that the actual number of arguments match the
1611 // expectation.
1612 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1613
1614 // TODO(1236192): Most runtime routines don't need the number of
1615 // arguments passed in because it is constant. At some point we
1616 // should remove this need and make the runtime routine entry code
1617 // smarter.
1618 mov(r2, Operand(num_arguments));
1619 Move(r3, ExternalReference::Create(f));
1620 #if V8_TARGET_ARCH_S390X
1621 Handle<Code> code =
1622 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1623 #else
1624 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
1625 #endif
1626
1627 Call(code, RelocInfo::CODE_TARGET);
1628 }
1629
TailCallRuntime(Runtime::FunctionId fid)1630 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1631 const Runtime::Function* function = Runtime::FunctionForId(fid);
1632 DCHECK_EQ(1, function->result_size);
1633 if (function->nargs >= 0) {
1634 mov(r2, Operand(function->nargs));
1635 }
1636 JumpToExternalReference(ExternalReference::Create(fid));
1637 }
1638
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1639 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1640 bool builtin_exit_frame) {
1641 Move(r3, builtin);
1642 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1643 kArgvOnStack, builtin_exit_frame);
1644 Jump(code, RelocInfo::CODE_TARGET);
1645 }
1646
JumpToInstructionStream(Address entry)1647 void MacroAssembler::JumpToInstructionStream(Address entry) {
1648 mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1649 Jump(kOffHeapTrampolineRegister);
1650 }
1651
LoadWeakValue(Register out,Register in,Label * target_if_cleared)1652 void MacroAssembler::LoadWeakValue(Register out, Register in,
1653 Label* target_if_cleared) {
1654 CmpP(in, Operand(kClearedWeakHeapObject));
1655 beq(target_if_cleared);
1656
1657 AndP(out, in, Operand(~kWeakHeapObjectMask));
1658 }
1659
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1660 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1661 Register scratch1, Register scratch2) {
1662 DCHECK(value > 0 && is_int8(value));
1663 if (FLAG_native_code_counters && counter->Enabled()) {
1664 Move(scratch2, ExternalReference::Create(counter));
1665 // @TODO(john.yan): can be optimized by asi()
1666 LoadW(scratch1, MemOperand(scratch2));
1667 AddP(scratch1, Operand(value));
1668 StoreW(scratch1, MemOperand(scratch2));
1669 }
1670 }
1671
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1672 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1673 Register scratch1, Register scratch2) {
1674 DCHECK(value > 0 && is_int8(value));
1675 if (FLAG_native_code_counters && counter->Enabled()) {
1676 Move(scratch2, ExternalReference::Create(counter));
1677 // @TODO(john.yan): can be optimized by asi()
1678 LoadW(scratch1, MemOperand(scratch2));
1679 AddP(scratch1, Operand(-value));
1680 StoreW(scratch1, MemOperand(scratch2));
1681 }
1682 }
1683
Assert(Condition cond,AbortReason reason,CRegister cr)1684 void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
1685 if (emit_debug_code()) Check(cond, reason, cr);
1686 }
1687
Check(Condition cond,AbortReason reason,CRegister cr)1688 void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
1689 Label L;
1690 b(cond, &L);
1691 Abort(reason);
1692 // will not return here
1693 bind(&L);
1694 }
1695
Abort(AbortReason reason)1696 void TurboAssembler::Abort(AbortReason reason) {
1697 Label abort_start;
1698 bind(&abort_start);
1699 const char* msg = GetAbortReason(reason);
1700 #ifdef DEBUG
1701 RecordComment("Abort message: ");
1702 RecordComment(msg);
1703 #endif
1704
1705 // Avoid emitting call to builtin if requested.
1706 if (trap_on_abort()) {
1707 stop(msg);
1708 return;
1709 }
1710
1711 if (should_abort_hard()) {
1712 // We don't care if we constructed a frame. Just pretend we did.
1713 FrameScope assume_frame(this, StackFrame::NONE);
1714 lgfi(r2, Operand(static_cast<int>(reason)));
1715 PrepareCallCFunction(1, 0, r3);
1716 Move(r3, ExternalReference::abort_with_reason());
1717 // Use Call directly to avoid any unneeded overhead. The function won't
1718 // return anyway.
1719 Call(r3);
1720 return;
1721 }
1722
1723 LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
1724
1725 // Disable stub call restrictions to always allow calls to abort.
1726 if (!has_frame_) {
1727 // We don't actually want to generate a pile of code for this, so just
1728 // claim there is a stack frame, without generating one.
1729 FrameScope scope(this, StackFrame::NONE);
1730 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1731 } else {
1732 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1733 }
1734 // will not return here
1735 }
1736
LoadNativeContextSlot(int index,Register dst)1737 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
1738 LoadP(dst, NativeContextMemOperand());
1739 LoadP(dst, ContextMemOperand(dst, index));
1740 }
1741
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)1742 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
1743 Label* smi_case) {
1744 STATIC_ASSERT(kSmiTag == 0);
1745 STATIC_ASSERT(kSmiTagSize == 1);
1746 // this won't work if src == dst
1747 DCHECK(src.code() != dst.code());
1748 SmiUntag(dst, src);
1749 TestIfSmi(src);
1750 beq(smi_case);
1751 }
1752
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)1753 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
1754 Label* on_either_smi) {
1755 STATIC_ASSERT(kSmiTag == 0);
1756 JumpIfSmi(reg1, on_either_smi);
1757 JumpIfSmi(reg2, on_either_smi);
1758 }
1759
AssertNotSmi(Register object)1760 void MacroAssembler::AssertNotSmi(Register object) {
1761 if (emit_debug_code()) {
1762 STATIC_ASSERT(kSmiTag == 0);
1763 TestIfSmi(object);
1764 Check(ne, AbortReason::kOperandIsASmi, cr0);
1765 }
1766 }
1767
AssertSmi(Register object)1768 void MacroAssembler::AssertSmi(Register object) {
1769 if (emit_debug_code()) {
1770 STATIC_ASSERT(kSmiTag == 0);
1771 TestIfSmi(object);
1772 Check(eq, AbortReason::kOperandIsNotASmi, cr0);
1773 }
1774 }
1775
AssertConstructor(Register object,Register scratch)1776 void MacroAssembler::AssertConstructor(Register object, Register scratch) {
1777 if (emit_debug_code()) {
1778 STATIC_ASSERT(kSmiTag == 0);
1779 TestIfSmi(object);
1780 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
1781 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1782 tm(FieldMemOperand(scratch, Map::kBitFieldOffset),
1783 Operand(Map::IsConstructorBit::kMask));
1784 Check(ne, AbortReason::kOperandIsNotAConstructor);
1785 }
1786 }
1787
AssertFunction(Register object)1788 void MacroAssembler::AssertFunction(Register object) {
1789 if (emit_debug_code()) {
1790 STATIC_ASSERT(kSmiTag == 0);
1791 TestIfSmi(object);
1792 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
1793 push(object);
1794 CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
1795 pop(object);
1796 Check(eq, AbortReason::kOperandIsNotAFunction);
1797 }
1798 }
1799
AssertBoundFunction(Register object)1800 void MacroAssembler::AssertBoundFunction(Register object) {
1801 if (emit_debug_code()) {
1802 STATIC_ASSERT(kSmiTag == 0);
1803 TestIfSmi(object);
1804 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
1805 push(object);
1806 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
1807 pop(object);
1808 Check(eq, AbortReason::kOperandIsNotABoundFunction);
1809 }
1810 }
1811
AssertGeneratorObject(Register object)1812 void MacroAssembler::AssertGeneratorObject(Register object) {
1813 if (!emit_debug_code()) return;
1814 TestIfSmi(object);
1815 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
1816
1817 // Load map
1818 Register map = object;
1819 push(object);
1820 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1821
1822 // Check if JSGeneratorObject
1823 Label do_check;
1824 Register instance_type = object;
1825 CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
1826 beq(&do_check);
1827
1828 // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
1829 CmpP(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
1830
1831 bind(&do_check);
1832 // Restore generator object to register and perform assertion
1833 pop(object);
1834 Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
1835 }
1836
AssertUndefinedOrAllocationSite(Register object,Register scratch)1837 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1838 Register scratch) {
1839 if (emit_debug_code()) {
1840 Label done_checking;
1841 AssertNotSmi(object);
1842 CompareRoot(object, Heap::kUndefinedValueRootIndex);
1843 beq(&done_checking, Label::kNear);
1844 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1845 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
1846 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1847 bind(&done_checking);
1848 }
1849 }
1850
1851 static const int kRegisterPassedArguments = 5;
1852
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)1853 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
1854 int num_double_arguments) {
1855 int stack_passed_words = 0;
1856 if (num_double_arguments > DoubleRegister::kNumRegisters) {
1857 stack_passed_words +=
1858 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
1859 }
1860 // Up to five simple arguments are passed in registers r2..r6
1861 if (num_reg_arguments > kRegisterPassedArguments) {
1862 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
1863 }
1864 return stack_passed_words;
1865 }
1866
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)1867 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1868 int num_double_arguments,
1869 Register scratch) {
1870 int frame_alignment = ActivationFrameAlignment();
1871 int stack_passed_arguments =
1872 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1873 int stack_space = kNumRequiredStackFrameSlots;
1874 if (frame_alignment > kPointerSize) {
1875 // Make stack end at alignment and make room for stack arguments
1876 // -- preserving original value of sp.
1877 LoadRR(scratch, sp);
1878 lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
1879 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1880 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
1881 StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
1882 } else {
1883 stack_space += stack_passed_arguments;
1884 }
1885 lay(sp, MemOperand(sp, (-stack_space) * kPointerSize));
1886 }
1887
PrepareCallCFunction(int num_reg_arguments,Register scratch)1888 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1889 Register scratch) {
1890 PrepareCallCFunction(num_reg_arguments, 0, scratch);
1891 }
1892
MovToFloatParameter(DoubleRegister src)1893 void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
1894
MovToFloatResult(DoubleRegister src)1895 void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
1896
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)1897 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
1898 DoubleRegister src2) {
1899 if (src2 == d0) {
1900 DCHECK(src1 != d2);
1901 Move(d2, src2);
1902 Move(d0, src1);
1903 } else {
1904 Move(d0, src1);
1905 Move(d2, src2);
1906 }
1907 }
1908
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)1909 void TurboAssembler::CallCFunction(ExternalReference function,
1910 int num_reg_arguments,
1911 int num_double_arguments) {
1912 Move(ip, function);
1913 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
1914 }
1915
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)1916 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
1917 int num_double_arguments) {
1918 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
1919 }
1920
CallCFunction(ExternalReference function,int num_arguments)1921 void TurboAssembler::CallCFunction(ExternalReference function,
1922 int num_arguments) {
1923 CallCFunction(function, num_arguments, 0);
1924 }
1925
CallCFunction(Register function,int num_arguments)1926 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
1927 CallCFunction(function, num_arguments, 0);
1928 }
1929
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)1930 void TurboAssembler::CallCFunctionHelper(Register function,
1931 int num_reg_arguments,
1932 int num_double_arguments) {
1933 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
1934 DCHECK(has_frame());
1935
1936 // Just call directly. The function called cannot cause a GC, or
1937 // allow preemption, so the return address in the link register
1938 // stays correct.
1939 Register dest = function;
1940 if (ABI_CALL_VIA_IP) {
1941 Move(ip, function);
1942 dest = ip;
1943 }
1944
1945 Call(dest);
1946
1947 int stack_passed_arguments =
1948 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1949 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
1950 if (ActivationFrameAlignment() > kPointerSize) {
1951 // Load the original stack pointer (pre-alignment) from the stack
1952 LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
1953 } else {
1954 la(sp, MemOperand(sp, stack_space * kPointerSize));
1955 }
1956 }
1957
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)1958 void TurboAssembler::CheckPageFlag(
1959 Register object,
1960 Register scratch, // scratch may be same register as object
1961 int mask, Condition cc, Label* condition_met) {
1962 DCHECK(cc == ne || cc == eq);
1963 ClearRightImm(scratch, object, Operand(kPageSizeBits));
1964
1965 if (base::bits::IsPowerOfTwo(mask)) {
1966 // If it's a power of two, we can use Test-Under-Mask Memory-Imm form
1967 // which allows testing of a single byte in memory.
1968 int32_t byte_offset = 4;
1969 uint32_t shifted_mask = mask;
1970 // Determine the byte offset to be tested
1971 if (mask <= 0x80) {
1972 byte_offset = kPointerSize - 1;
1973 } else if (mask < 0x8000) {
1974 byte_offset = kPointerSize - 2;
1975 shifted_mask = mask >> 8;
1976 } else if (mask < 0x800000) {
1977 byte_offset = kPointerSize - 3;
1978 shifted_mask = mask >> 16;
1979 } else {
1980 byte_offset = kPointerSize - 4;
1981 shifted_mask = mask >> 24;
1982 }
1983 #if V8_TARGET_LITTLE_ENDIAN
1984 // Reverse the byte_offset if emulating on little endian platform
1985 byte_offset = kPointerSize - byte_offset - 1;
1986 #endif
1987 tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
1988 Operand(shifted_mask));
1989 } else {
1990 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
1991 AndP(r0, scratch, Operand(mask));
1992 }
1993 // Should be okay to remove rc
1994
1995 if (cc == ne) {
1996 bne(condition_met);
1997 }
1998 if (cc == eq) {
1999 beq(condition_met);
2000 }
2001 }
2002
2003 ////////////////////////////////////////////////////////////////////////////////
2004 //
2005 // New MacroAssembler Interfaces added for S390
2006 //
2007 ////////////////////////////////////////////////////////////////////////////////
2008 // Primarily used for loading constants
2009 // This should really move to be in macro-assembler as it
2010 // is really a pseudo instruction
2011 // Some usages of this intend for a FIXED_SEQUENCE to be used
2012 // @TODO - break this dependency so we can optimize mov() in general
2013 // and only use the generic version when we require a fixed sequence
LoadRepresentation(Register dst,const MemOperand & mem,Representation r,Register scratch)2014 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
2015 Representation r, Register scratch) {
2016 DCHECK(!r.IsDouble());
2017 if (r.IsInteger8()) {
2018 LoadB(dst, mem);
2019 } else if (r.IsUInteger8()) {
2020 LoadlB(dst, mem);
2021 } else if (r.IsInteger16()) {
2022 LoadHalfWordP(dst, mem, scratch);
2023 } else if (r.IsUInteger16()) {
2024 LoadHalfWordP(dst, mem, scratch);
2025 #if V8_TARGET_ARCH_S390X
2026 } else if (r.IsInteger32()) {
2027 LoadW(dst, mem, scratch);
2028 #endif
2029 } else {
2030 LoadP(dst, mem, scratch);
2031 }
2032 }
2033
StoreRepresentation(Register src,const MemOperand & mem,Representation r,Register scratch)2034 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
2035 Representation r, Register scratch) {
2036 DCHECK(!r.IsDouble());
2037 if (r.IsInteger8() || r.IsUInteger8()) {
2038 StoreByte(src, mem, scratch);
2039 } else if (r.IsInteger16() || r.IsUInteger16()) {
2040 StoreHalfWord(src, mem, scratch);
2041 #if V8_TARGET_ARCH_S390X
2042 } else if (r.IsInteger32()) {
2043 StoreW(src, mem, scratch);
2044 #endif
2045 } else {
2046 if (r.IsHeapObject()) {
2047 AssertNotSmi(src);
2048 } else if (r.IsSmi()) {
2049 AssertSmi(src);
2050 }
2051 StoreP(src, mem, scratch);
2052 }
2053 }
2054
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)2055 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2056 Register reg4, Register reg5,
2057 Register reg6) {
2058 RegList regs = 0;
2059 if (reg1.is_valid()) regs |= reg1.bit();
2060 if (reg2.is_valid()) regs |= reg2.bit();
2061 if (reg3.is_valid()) regs |= reg3.bit();
2062 if (reg4.is_valid()) regs |= reg4.bit();
2063 if (reg5.is_valid()) regs |= reg5.bit();
2064 if (reg6.is_valid()) regs |= reg6.bit();
2065
2066 const RegisterConfiguration* config = RegisterConfiguration::Default();
2067 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2068 int code = config->GetAllocatableGeneralCode(i);
2069 Register candidate = Register::from_code(code);
2070 if (regs & candidate.bit()) continue;
2071 return candidate;
2072 }
2073 UNREACHABLE();
2074 }
2075
mov(Register dst,const Operand & src)2076 void TurboAssembler::mov(Register dst, const Operand& src) {
2077 #if V8_TARGET_ARCH_S390X
2078 int64_t value;
2079 #else
2080 int value;
2081 #endif
2082 if (src.is_heap_object_request()) {
2083 RequestHeapObject(src.heap_object_request());
2084 value = 0;
2085 } else {
2086 value = src.immediate();
2087 }
2088
2089 if (src.rmode() != RelocInfo::NONE) {
2090 // some form of relocation needed
2091 RecordRelocInfo(src.rmode(), value);
2092 }
2093
2094 #if V8_TARGET_ARCH_S390X
2095 int32_t hi_32 = static_cast<int64_t>(value) >> 32;
2096 int32_t lo_32 = static_cast<int32_t>(value);
2097
2098 iihf(dst, Operand(hi_32));
2099 iilf(dst, Operand(lo_32));
2100 #else
2101 iilf(dst, Operand(value));
2102 #endif
2103 }
2104
Mul32(Register dst,const MemOperand & src1)2105 void TurboAssembler::Mul32(Register dst, const MemOperand& src1) {
2106 if (is_uint12(src1.offset())) {
2107 ms(dst, src1);
2108 } else if (is_int20(src1.offset())) {
2109 msy(dst, src1);
2110 } else {
2111 UNIMPLEMENTED();
2112 }
2113 }
2114
Mul32(Register dst,Register src1)2115 void TurboAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
2116
Mul32(Register dst,const Operand & src1)2117 void TurboAssembler::Mul32(Register dst, const Operand& src1) {
2118 msfi(dst, src1);
2119 }
2120
2121 #define Generate_MulHigh32(instr) \
2122 { \
2123 lgfr(dst, src1); \
2124 instr(dst, src2); \
2125 srlg(dst, dst, Operand(32)); \
2126 }
2127
MulHigh32(Register dst,Register src1,const MemOperand & src2)2128 void TurboAssembler::MulHigh32(Register dst, Register src1,
2129 const MemOperand& src2) {
2130 Generate_MulHigh32(msgf);
2131 }
2132
MulHigh32(Register dst,Register src1,Register src2)2133 void TurboAssembler::MulHigh32(Register dst, Register src1, Register src2) {
2134 if (dst == src2) {
2135 std::swap(src1, src2);
2136 }
2137 Generate_MulHigh32(msgfr);
2138 }
2139
MulHigh32(Register dst,Register src1,const Operand & src2)2140 void TurboAssembler::MulHigh32(Register dst, Register src1,
2141 const Operand& src2) {
2142 Generate_MulHigh32(msgfi);
2143 }
2144
2145 #undef Generate_MulHigh32
2146
2147 #define Generate_MulHighU32(instr) \
2148 { \
2149 lr(r1, src1); \
2150 instr(r0, src2); \
2151 LoadlW(dst, r0); \
2152 }
2153
MulHighU32(Register dst,Register src1,const MemOperand & src2)2154 void TurboAssembler::MulHighU32(Register dst, Register src1,
2155 const MemOperand& src2) {
2156 Generate_MulHighU32(ml);
2157 }
2158
MulHighU32(Register dst,Register src1,Register src2)2159 void TurboAssembler::MulHighU32(Register dst, Register src1, Register src2) {
2160 Generate_MulHighU32(mlr);
2161 }
2162
MulHighU32(Register dst,Register src1,const Operand & src2)2163 void TurboAssembler::MulHighU32(Register dst, Register src1,
2164 const Operand& src2) {
2165 USE(dst);
2166 USE(src1);
2167 USE(src2);
2168 UNREACHABLE();
2169 }
2170
2171 #undef Generate_MulHighU32
2172
2173 #define Generate_Mul32WithOverflowIfCCUnequal(instr) \
2174 { \
2175 lgfr(dst, src1); \
2176 instr(dst, src2); \
2177 cgfr(dst, dst); \
2178 }
2179
Mul32WithOverflowIfCCUnequal(Register dst,Register src1,const MemOperand & src2)2180 void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2181 const MemOperand& src2) {
2182 Register result = dst;
2183 if (src2.rx() == dst || src2.rb() == dst) dst = r0;
2184 Generate_Mul32WithOverflowIfCCUnequal(msgf);
2185 if (result != dst) llgfr(result, dst);
2186 }
2187
Mul32WithOverflowIfCCUnequal(Register dst,Register src1,Register src2)2188 void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2189 Register src2) {
2190 if (dst == src2) {
2191 std::swap(src1, src2);
2192 }
2193 Generate_Mul32WithOverflowIfCCUnequal(msgfr);
2194 }
2195
Mul32WithOverflowIfCCUnequal(Register dst,Register src1,const Operand & src2)2196 void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2197 const Operand& src2) {
2198 Generate_Mul32WithOverflowIfCCUnequal(msgfi);
2199 }
2200
2201 #undef Generate_Mul32WithOverflowIfCCUnequal
2202
Mul64(Register dst,const MemOperand & src1)2203 void TurboAssembler::Mul64(Register dst, const MemOperand& src1) {
2204 if (is_int20(src1.offset())) {
2205 msg(dst, src1);
2206 } else {
2207 UNIMPLEMENTED();
2208 }
2209 }
2210
Mul64(Register dst,Register src1)2211 void TurboAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
2212
Mul64(Register dst,const Operand & src1)2213 void TurboAssembler::Mul64(Register dst, const Operand& src1) {
2214 msgfi(dst, src1);
2215 }
2216
Mul(Register dst,Register src1,Register src2)2217 void TurboAssembler::Mul(Register dst, Register src1, Register src2) {
2218 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
2219 MulPWithCondition(dst, src1, src2);
2220 } else {
2221 if (dst == src2) {
2222 MulP(dst, src1);
2223 } else if (dst == src1) {
2224 MulP(dst, src2);
2225 } else {
2226 Move(dst, src1);
2227 MulP(dst, src2);
2228 }
2229 }
2230 }
2231
DivP(Register dividend,Register divider)2232 void TurboAssembler::DivP(Register dividend, Register divider) {
2233 // have to make sure the src and dst are reg pairs
2234 DCHECK_EQ(dividend.code() % 2, 0);
2235 #if V8_TARGET_ARCH_S390X
2236 dsgr(dividend, divider);
2237 #else
2238 dr(dividend, divider);
2239 #endif
2240 }
2241
2242 #define Generate_Div32(instr) \
2243 { \
2244 lgfr(r1, src1); \
2245 instr(r0, src2); \
2246 LoadlW(dst, r1); \
2247 }
2248
Div32(Register dst,Register src1,const MemOperand & src2)2249 void TurboAssembler::Div32(Register dst, Register src1,
2250 const MemOperand& src2) {
2251 Generate_Div32(dsgf);
2252 }
2253
Div32(Register dst,Register src1,Register src2)2254 void TurboAssembler::Div32(Register dst, Register src1, Register src2) {
2255 Generate_Div32(dsgfr);
2256 }
2257
2258 #undef Generate_Div32
2259
2260 #define Generate_DivU32(instr) \
2261 { \
2262 lr(r0, src1); \
2263 srdl(r0, Operand(32)); \
2264 instr(r0, src2); \
2265 LoadlW(dst, r1); \
2266 }
2267
DivU32(Register dst,Register src1,const MemOperand & src2)2268 void TurboAssembler::DivU32(Register dst, Register src1,
2269 const MemOperand& src2) {
2270 Generate_DivU32(dl);
2271 }
2272
DivU32(Register dst,Register src1,Register src2)2273 void TurboAssembler::DivU32(Register dst, Register src1, Register src2) {
2274 Generate_DivU32(dlr);
2275 }
2276
2277 #undef Generate_DivU32
2278
2279 #define Generate_Div64(instr) \
2280 { \
2281 lgr(r1, src1); \
2282 instr(r0, src2); \
2283 lgr(dst, r1); \
2284 }
2285
Div64(Register dst,Register src1,const MemOperand & src2)2286 void TurboAssembler::Div64(Register dst, Register src1,
2287 const MemOperand& src2) {
2288 Generate_Div64(dsg);
2289 }
2290
Div64(Register dst,Register src1,Register src2)2291 void TurboAssembler::Div64(Register dst, Register src1, Register src2) {
2292 Generate_Div64(dsgr);
2293 }
2294
2295 #undef Generate_Div64
2296
2297 #define Generate_DivU64(instr) \
2298 { \
2299 lgr(r1, src1); \
2300 lghi(r0, Operand::Zero()); \
2301 instr(r0, src2); \
2302 lgr(dst, r1); \
2303 }
2304
DivU64(Register dst,Register src1,const MemOperand & src2)2305 void TurboAssembler::DivU64(Register dst, Register src1,
2306 const MemOperand& src2) {
2307 Generate_DivU64(dlg);
2308 }
2309
DivU64(Register dst,Register src1,Register src2)2310 void TurboAssembler::DivU64(Register dst, Register src1, Register src2) {
2311 Generate_DivU64(dlgr);
2312 }
2313
2314 #undef Generate_DivU64
2315
2316 #define Generate_Mod32(instr) \
2317 { \
2318 lgfr(r1, src1); \
2319 instr(r0, src2); \
2320 LoadlW(dst, r0); \
2321 }
2322
Mod32(Register dst,Register src1,const MemOperand & src2)2323 void TurboAssembler::Mod32(Register dst, Register src1,
2324 const MemOperand& src2) {
2325 Generate_Mod32(dsgf);
2326 }
2327
Mod32(Register dst,Register src1,Register src2)2328 void TurboAssembler::Mod32(Register dst, Register src1, Register src2) {
2329 Generate_Mod32(dsgfr);
2330 }
2331
2332 #undef Generate_Mod32
2333
2334 #define Generate_ModU32(instr) \
2335 { \
2336 lr(r0, src1); \
2337 srdl(r0, Operand(32)); \
2338 instr(r0, src2); \
2339 LoadlW(dst, r0); \
2340 }
2341
ModU32(Register dst,Register src1,const MemOperand & src2)2342 void TurboAssembler::ModU32(Register dst, Register src1,
2343 const MemOperand& src2) {
2344 Generate_ModU32(dl);
2345 }
2346
ModU32(Register dst,Register src1,Register src2)2347 void TurboAssembler::ModU32(Register dst, Register src1, Register src2) {
2348 Generate_ModU32(dlr);
2349 }
2350
2351 #undef Generate_ModU32
2352
2353 #define Generate_Mod64(instr) \
2354 { \
2355 lgr(r1, src1); \
2356 instr(r0, src2); \
2357 lgr(dst, r0); \
2358 }
2359
Mod64(Register dst,Register src1,const MemOperand & src2)2360 void TurboAssembler::Mod64(Register dst, Register src1,
2361 const MemOperand& src2) {
2362 Generate_Mod64(dsg);
2363 }
2364
Mod64(Register dst,Register src1,Register src2)2365 void TurboAssembler::Mod64(Register dst, Register src1, Register src2) {
2366 Generate_Mod64(dsgr);
2367 }
2368
2369 #undef Generate_Mod64
2370
2371 #define Generate_ModU64(instr) \
2372 { \
2373 lgr(r1, src1); \
2374 lghi(r0, Operand::Zero()); \
2375 instr(r0, src2); \
2376 lgr(dst, r0); \
2377 }
2378
ModU64(Register dst,Register src1,const MemOperand & src2)2379 void TurboAssembler::ModU64(Register dst, Register src1,
2380 const MemOperand& src2) {
2381 Generate_ModU64(dlg);
2382 }
2383
ModU64(Register dst,Register src1,Register src2)2384 void TurboAssembler::ModU64(Register dst, Register src1, Register src2) {
2385 Generate_ModU64(dlgr);
2386 }
2387
2388 #undef Generate_ModU64
2389
MulP(Register dst,const Operand & opnd)2390 void TurboAssembler::MulP(Register dst, const Operand& opnd) {
2391 #if V8_TARGET_ARCH_S390X
2392 msgfi(dst, opnd);
2393 #else
2394 msfi(dst, opnd);
2395 #endif
2396 }
2397
MulP(Register dst,Register src)2398 void TurboAssembler::MulP(Register dst, Register src) {
2399 #if V8_TARGET_ARCH_S390X
2400 msgr(dst, src);
2401 #else
2402 msr(dst, src);
2403 #endif
2404 }
2405
MulPWithCondition(Register dst,Register src1,Register src2)2406 void TurboAssembler::MulPWithCondition(Register dst, Register src1,
2407 Register src2) {
2408 CHECK(CpuFeatures::IsSupported(MISC_INSTR_EXT2));
2409 #if V8_TARGET_ARCH_S390X
2410 msgrkc(dst, src1, src2);
2411 #else
2412 msrkc(dst, src1, src2);
2413 #endif
2414 }
2415
MulP(Register dst,const MemOperand & opnd)2416 void TurboAssembler::MulP(Register dst, const MemOperand& opnd) {
2417 #if V8_TARGET_ARCH_S390X
2418 if (is_uint16(opnd.offset())) {
2419 ms(dst, opnd);
2420 } else if (is_int20(opnd.offset())) {
2421 msy(dst, opnd);
2422 } else {
2423 UNIMPLEMENTED();
2424 }
2425 #else
2426 if (is_int20(opnd.offset())) {
2427 msg(dst, opnd);
2428 } else {
2429 UNIMPLEMENTED();
2430 }
2431 #endif
2432 }
2433
Sqrt(DoubleRegister result,DoubleRegister input)2434 void TurboAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
2435 sqdbr(result, input);
2436 }
Sqrt(DoubleRegister result,const MemOperand & input)2437 void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
2438 if (is_uint12(input.offset())) {
2439 sqdb(result, input);
2440 } else {
2441 ldy(result, input);
2442 sqdbr(result, result);
2443 }
2444 }
2445 //----------------------------------------------------------------------------
2446 // Add Instructions
2447 //----------------------------------------------------------------------------
2448
2449 // Add 32-bit (Register dst = Register dst + Immediate opnd)
Add32(Register dst,const Operand & opnd)2450 void TurboAssembler::Add32(Register dst, const Operand& opnd) {
2451 if (is_int16(opnd.immediate()))
2452 ahi(dst, opnd);
2453 else
2454 afi(dst, opnd);
2455 }
2456
2457 // Add 32-bit (Register dst = Register dst + Immediate opnd)
Add32_RI(Register dst,const Operand & opnd)2458 void TurboAssembler::Add32_RI(Register dst, const Operand& opnd) {
2459 // Just a wrapper for above
2460 Add32(dst, opnd);
2461 }
2462
2463 // Add Pointer Size (Register dst = Register dst + Immediate opnd)
AddP(Register dst,const Operand & opnd)2464 void TurboAssembler::AddP(Register dst, const Operand& opnd) {
2465 #if V8_TARGET_ARCH_S390X
2466 if (is_int16(opnd.immediate()))
2467 aghi(dst, opnd);
2468 else
2469 agfi(dst, opnd);
2470 #else
2471 Add32(dst, opnd);
2472 #endif
2473 }
2474
2475 // Add 32-bit (Register dst = Register src + Immediate opnd)
Add32(Register dst,Register src,const Operand & opnd)2476 void TurboAssembler::Add32(Register dst, Register src, const Operand& opnd) {
2477 if (dst != src) {
2478 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
2479 ahik(dst, src, opnd);
2480 return;
2481 }
2482 lr(dst, src);
2483 }
2484 Add32(dst, opnd);
2485 }
2486
2487 // Add 32-bit (Register dst = Register src + Immediate opnd)
Add32_RRI(Register dst,Register src,const Operand & opnd)2488 void TurboAssembler::Add32_RRI(Register dst, Register src,
2489 const Operand& opnd) {
2490 // Just a wrapper for above
2491 Add32(dst, src, opnd);
2492 }
2493
2494 // Add Pointer Size (Register dst = Register src + Immediate opnd)
AddP(Register dst,Register src,const Operand & opnd)2495 void TurboAssembler::AddP(Register dst, Register src, const Operand& opnd) {
2496 if (dst != src) {
2497 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
2498 AddPImm_RRI(dst, src, opnd);
2499 return;
2500 }
2501 LoadRR(dst, src);
2502 }
2503 AddP(dst, opnd);
2504 }
2505
2506 // Add 32-bit (Register dst = Register dst + Register src)
Add32(Register dst,Register src)2507 void TurboAssembler::Add32(Register dst, Register src) { ar(dst, src); }
2508
2509 // Add Pointer Size (Register dst = Register dst + Register src)
AddP(Register dst,Register src)2510 void TurboAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
2511
2512 // Add Pointer Size with src extension
2513 // (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
2514 // src is treated as a 32-bit signed integer, which is sign extended to
2515 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,Register src)2516 void TurboAssembler::AddP_ExtendSrc(Register dst, Register src) {
2517 #if V8_TARGET_ARCH_S390X
2518 agfr(dst, src);
2519 #else
2520 ar(dst, src);
2521 #endif
2522 }
2523
2524 // Add 32-bit (Register dst = Register src1 + Register src2)
Add32(Register dst,Register src1,Register src2)2525 void TurboAssembler::Add32(Register dst, Register src1, Register src2) {
2526 if (dst != src1 && dst != src2) {
2527 // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
2528 // as AR is a smaller instruction
2529 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2530 ark(dst, src1, src2);
2531 return;
2532 } else {
2533 lr(dst, src1);
2534 }
2535 } else if (dst == src2) {
2536 src2 = src1;
2537 }
2538 ar(dst, src2);
2539 }
2540
2541 // Add Pointer Size (Register dst = Register src1 + Register src2)
AddP(Register dst,Register src1,Register src2)2542 void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
2543 if (dst != src1 && dst != src2) {
2544 // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
2545 // as AR is a smaller instruction
2546 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2547 AddP_RRR(dst, src1, src2);
2548 return;
2549 } else {
2550 LoadRR(dst, src1);
2551 }
2552 } else if (dst == src2) {
2553 src2 = src1;
2554 }
2555 AddRR(dst, src2);
2556 }
2557
2558 // Add Pointer Size with src extension
2559 // (Register dst (ptr) = Register dst (ptr) + Register src1 (ptr) +
2560 // Register src2 (32 | 32->64))
2561 // src is treated as a 32-bit signed integer, which is sign extended to
2562 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,Register src1,Register src2)2563 void TurboAssembler::AddP_ExtendSrc(Register dst, Register src1,
2564 Register src2) {
2565 #if V8_TARGET_ARCH_S390X
2566 if (dst == src2) {
2567 // The source we need to sign extend is the same as result.
2568 lgfr(dst, src2);
2569 agr(dst, src1);
2570 } else {
2571 if (dst != src1) LoadRR(dst, src1);
2572 agfr(dst, src2);
2573 }
2574 #else
2575 AddP(dst, src1, src2);
2576 #endif
2577 }
2578
2579 // Add 32-bit (Register-Memory)
Add32(Register dst,const MemOperand & opnd)2580 void TurboAssembler::Add32(Register dst, const MemOperand& opnd) {
2581 DCHECK(is_int20(opnd.offset()));
2582 if (is_uint12(opnd.offset()))
2583 a(dst, opnd);
2584 else
2585 ay(dst, opnd);
2586 }
2587
2588 // Add Pointer Size (Register-Memory)
AddP(Register dst,const MemOperand & opnd)2589 void TurboAssembler::AddP(Register dst, const MemOperand& opnd) {
2590 #if V8_TARGET_ARCH_S390X
2591 DCHECK(is_int20(opnd.offset()));
2592 ag(dst, opnd);
2593 #else
2594 Add32(dst, opnd);
2595 #endif
2596 }
2597
2598 // Add Pointer Size with src extension
2599 // (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64))
2600 // src is treated as a 32-bit signed integer, which is sign extended to
2601 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,const MemOperand & opnd)2602 void TurboAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
2603 #if V8_TARGET_ARCH_S390X
2604 DCHECK(is_int20(opnd.offset()));
2605 agf(dst, opnd);
2606 #else
2607 Add32(dst, opnd);
2608 #endif
2609 }
2610
2611 // Add 32-bit (Memory - Immediate)
Add32(const MemOperand & opnd,const Operand & imm)2612 void TurboAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
2613 DCHECK(is_int8(imm.immediate()));
2614 DCHECK(is_int20(opnd.offset()));
2615 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
2616 asi(opnd, imm);
2617 }
2618
2619 // Add Pointer-sized (Memory - Immediate)
AddP(const MemOperand & opnd,const Operand & imm)2620 void TurboAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
2621 DCHECK(is_int8(imm.immediate()));
2622 DCHECK(is_int20(opnd.offset()));
2623 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
2624 #if V8_TARGET_ARCH_S390X
2625 agsi(opnd, imm);
2626 #else
2627 asi(opnd, imm);
2628 #endif
2629 }
2630
2631 //----------------------------------------------------------------------------
2632 // Add Logical Instructions
2633 //----------------------------------------------------------------------------
2634
2635 // Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
AddLogicalWithCarry32(Register dst,Register src1,Register src2)2636 void TurboAssembler::AddLogicalWithCarry32(Register dst, Register src1,
2637 Register src2) {
2638 if (dst != src2 && dst != src1) {
2639 lr(dst, src1);
2640 alcr(dst, src2);
2641 } else if (dst != src2) {
2642 // dst == src1
2643 DCHECK(dst == src1);
2644 alcr(dst, src2);
2645 } else {
2646 // dst == src2
2647 DCHECK(dst == src2);
2648 alcr(dst, src1);
2649 }
2650 }
2651
2652 // Add Logical 32-bit (Register dst = Register src1 + Register src2)
AddLogical32(Register dst,Register src1,Register src2)2653 void TurboAssembler::AddLogical32(Register dst, Register src1, Register src2) {
2654 if (dst != src2 && dst != src1) {
2655 lr(dst, src1);
2656 alr(dst, src2);
2657 } else if (dst != src2) {
2658 // dst == src1
2659 DCHECK(dst == src1);
2660 alr(dst, src2);
2661 } else {
2662 // dst == src2
2663 DCHECK(dst == src2);
2664 alr(dst, src1);
2665 }
2666 }
2667
2668 // Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
AddLogical(Register dst,const Operand & imm)2669 void TurboAssembler::AddLogical(Register dst, const Operand& imm) {
2670 alfi(dst, imm);
2671 }
2672
2673 // Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
AddLogicalP(Register dst,const Operand & imm)2674 void TurboAssembler::AddLogicalP(Register dst, const Operand& imm) {
2675 #ifdef V8_TARGET_ARCH_S390X
2676 algfi(dst, imm);
2677 #else
2678 AddLogical(dst, imm);
2679 #endif
2680 }
2681
2682 // Add Logical 32-bit (Register-Memory)
AddLogical(Register dst,const MemOperand & opnd)2683 void TurboAssembler::AddLogical(Register dst, const MemOperand& opnd) {
2684 DCHECK(is_int20(opnd.offset()));
2685 if (is_uint12(opnd.offset()))
2686 al_z(dst, opnd);
2687 else
2688 aly(dst, opnd);
2689 }
2690
2691 // Add Logical Pointer Size (Register-Memory)
AddLogicalP(Register dst,const MemOperand & opnd)2692 void TurboAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
2693 #if V8_TARGET_ARCH_S390X
2694 DCHECK(is_int20(opnd.offset()));
2695 alg(dst, opnd);
2696 #else
2697 AddLogical(dst, opnd);
2698 #endif
2699 }
2700
2701 //----------------------------------------------------------------------------
2702 // Subtract Instructions
2703 //----------------------------------------------------------------------------
2704
2705 // Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
2706 // src2)
SubLogicalWithBorrow32(Register dst,Register src1,Register src2)2707 void TurboAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
2708 Register src2) {
2709 if (dst != src2 && dst != src1) {
2710 lr(dst, src1);
2711 slbr(dst, src2);
2712 } else if (dst != src2) {
2713 // dst == src1
2714 DCHECK(dst == src1);
2715 slbr(dst, src2);
2716 } else {
2717 // dst == src2
2718 DCHECK(dst == src2);
2719 lr(r0, dst);
2720 SubLogicalWithBorrow32(dst, src1, r0);
2721 }
2722 }
2723
2724 // Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
SubLogical32(Register dst,Register src1,Register src2)2725 void TurboAssembler::SubLogical32(Register dst, Register src1, Register src2) {
2726 if (dst != src2 && dst != src1) {
2727 lr(dst, src1);
2728 slr(dst, src2);
2729 } else if (dst != src2) {
2730 // dst == src1
2731 DCHECK(dst == src1);
2732 slr(dst, src2);
2733 } else {
2734 // dst == src2
2735 DCHECK(dst == src2);
2736 lr(r0, dst);
2737 SubLogical32(dst, src1, r0);
2738 }
2739 }
2740
2741 // Subtract 32-bit (Register dst = Register dst - Immediate opnd)
Sub32(Register dst,const Operand & imm)2742 void TurboAssembler::Sub32(Register dst, const Operand& imm) {
2743 Add32(dst, Operand(-(imm.immediate())));
2744 }
2745
2746 // Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
SubP(Register dst,const Operand & imm)2747 void TurboAssembler::SubP(Register dst, const Operand& imm) {
2748 AddP(dst, Operand(-(imm.immediate())));
2749 }
2750
2751 // Subtract 32-bit (Register dst = Register src - Immediate opnd)
Sub32(Register dst,Register src,const Operand & imm)2752 void TurboAssembler::Sub32(Register dst, Register src, const Operand& imm) {
2753 Add32(dst, src, Operand(-(imm.immediate())));
2754 }
2755
2756 // Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
SubP(Register dst,Register src,const Operand & imm)2757 void TurboAssembler::SubP(Register dst, Register src, const Operand& imm) {
2758 AddP(dst, src, Operand(-(imm.immediate())));
2759 }
2760
2761 // Subtract 32-bit (Register dst = Register dst - Register src)
Sub32(Register dst,Register src)2762 void TurboAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
2763
2764 // Subtract Pointer Size (Register dst = Register dst - Register src)
SubP(Register dst,Register src)2765 void TurboAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
2766
2767 // Subtract Pointer Size with src extension
2768 // (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
2769 // src is treated as a 32-bit signed integer, which is sign extended to
2770 // 64-bit if necessary.
SubP_ExtendSrc(Register dst,Register src)2771 void TurboAssembler::SubP_ExtendSrc(Register dst, Register src) {
2772 #if V8_TARGET_ARCH_S390X
2773 sgfr(dst, src);
2774 #else
2775 sr(dst, src);
2776 #endif
2777 }
2778
2779 // Subtract 32-bit (Register = Register - Register)
Sub32(Register dst,Register src1,Register src2)2780 void TurboAssembler::Sub32(Register dst, Register src1, Register src2) {
2781 // Use non-clobbering version if possible
2782 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2783 srk(dst, src1, src2);
2784 return;
2785 }
2786 if (dst != src1 && dst != src2) lr(dst, src1);
2787 // In scenario where we have dst = src - dst, we need to swap and negate
2788 if (dst != src1 && dst == src2) {
2789 Label done;
2790 lcr(dst, dst); // dst = -dst
2791 b(overflow, &done);
2792 ar(dst, src1); // dst = dst + src
2793 bind(&done);
2794 } else {
2795 sr(dst, src2);
2796 }
2797 }
2798
2799 // Subtract Pointer Sized (Register = Register - Register)
SubP(Register dst,Register src1,Register src2)2800 void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
2801 // Use non-clobbering version if possible
2802 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2803 SubP_RRR(dst, src1, src2);
2804 return;
2805 }
2806 if (dst != src1 && dst != src2) LoadRR(dst, src1);
2807 // In scenario where we have dst = src - dst, we need to swap and negate
2808 if (dst != src1 && dst == src2) {
2809 Label done;
2810 LoadComplementRR(dst, dst); // dst = -dst
2811 b(overflow, &done);
2812 AddP(dst, src1); // dst = dst + src
2813 bind(&done);
2814 } else {
2815 SubP(dst, src2);
2816 }
2817 }
2818
2819 // Subtract Pointer Size with src extension
2820 // (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
2821 // src is treated as a 32-bit signed integer, which is sign extended to
2822 // 64-bit if necessary.
SubP_ExtendSrc(Register dst,Register src1,Register src2)2823 void TurboAssembler::SubP_ExtendSrc(Register dst, Register src1,
2824 Register src2) {
2825 #if V8_TARGET_ARCH_S390X
2826 if (dst != src1 && dst != src2) LoadRR(dst, src1);
2827
2828 // In scenario where we have dst = src - dst, we need to swap and negate
2829 if (dst != src1 && dst == src2) {
2830 lgfr(dst, dst); // Sign extend this operand first.
2831 LoadComplementRR(dst, dst); // dst = -dst
2832 AddP(dst, src1); // dst = -dst + src
2833 } else {
2834 sgfr(dst, src2);
2835 }
2836 #else
2837 SubP(dst, src1, src2);
2838 #endif
2839 }
2840
2841 // Subtract 32-bit (Register-Memory)
Sub32(Register dst,const MemOperand & opnd)2842 void TurboAssembler::Sub32(Register dst, const MemOperand& opnd) {
2843 DCHECK(is_int20(opnd.offset()));
2844 if (is_uint12(opnd.offset()))
2845 s(dst, opnd);
2846 else
2847 sy(dst, opnd);
2848 }
2849
2850 // Subtract Pointer Sized (Register - Memory)
SubP(Register dst,const MemOperand & opnd)2851 void TurboAssembler::SubP(Register dst, const MemOperand& opnd) {
2852 #if V8_TARGET_ARCH_S390X
2853 sg(dst, opnd);
2854 #else
2855 Sub32(dst, opnd);
2856 #endif
2857 }
2858
MovIntToFloat(DoubleRegister dst,Register src)2859 void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
2860 sllg(r0, src, Operand(32));
2861 ldgr(dst, r0);
2862 }
2863
MovFloatToInt(Register dst,DoubleRegister src)2864 void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
2865 lgdr(dst, src);
2866 srlg(dst, dst, Operand(32));
2867 }
2868
SubP_ExtendSrc(Register dst,const MemOperand & opnd)2869 void TurboAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
2870 #if V8_TARGET_ARCH_S390X
2871 DCHECK(is_int20(opnd.offset()));
2872 sgf(dst, opnd);
2873 #else
2874 Sub32(dst, opnd);
2875 #endif
2876 }
2877
2878 // Load And Subtract 32-bit (similar to laa/lan/lao/lax)
LoadAndSub32(Register dst,Register src,const MemOperand & opnd)2879 void TurboAssembler::LoadAndSub32(Register dst, Register src,
2880 const MemOperand& opnd) {
2881 lcr(dst, src);
2882 laa(dst, dst, opnd);
2883 }
2884
2885 //----------------------------------------------------------------------------
2886 // Subtract Logical Instructions
2887 //----------------------------------------------------------------------------
2888
2889 // Subtract Logical 32-bit (Register - Memory)
SubLogical(Register dst,const MemOperand & opnd)2890 void TurboAssembler::SubLogical(Register dst, const MemOperand& opnd) {
2891 DCHECK(is_int20(opnd.offset()));
2892 if (is_uint12(opnd.offset()))
2893 sl(dst, opnd);
2894 else
2895 sly(dst, opnd);
2896 }
2897
2898 // Subtract Logical Pointer Sized (Register - Memory)
SubLogicalP(Register dst,const MemOperand & opnd)2899 void TurboAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
2900 DCHECK(is_int20(opnd.offset()));
2901 #if V8_TARGET_ARCH_S390X
2902 slgf(dst, opnd);
2903 #else
2904 SubLogical(dst, opnd);
2905 #endif
2906 }
2907
2908 // Subtract Logical Pointer Size with src extension
2909 // (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64))
2910 // src is treated as a 32-bit signed integer, which is sign extended to
2911 // 64-bit if necessary.
SubLogicalP_ExtendSrc(Register dst,const MemOperand & opnd)2912 void TurboAssembler::SubLogicalP_ExtendSrc(Register dst,
2913 const MemOperand& opnd) {
2914 #if V8_TARGET_ARCH_S390X
2915 DCHECK(is_int20(opnd.offset()));
2916 slgf(dst, opnd);
2917 #else
2918 SubLogical(dst, opnd);
2919 #endif
2920 }
2921
2922 //----------------------------------------------------------------------------
2923 // Bitwise Operations
2924 //----------------------------------------------------------------------------
2925
2926 // AND 32-bit - dst = dst & src
And(Register dst,Register src)2927 void TurboAssembler::And(Register dst, Register src) { nr(dst, src); }
2928
2929 // AND Pointer Size - dst = dst & src
AndP(Register dst,Register src)2930 void TurboAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
2931
2932 // Non-clobbering AND 32-bit - dst = src1 & src1
And(Register dst,Register src1,Register src2)2933 void TurboAssembler::And(Register dst, Register src1, Register src2) {
2934 if (dst != src1 && dst != src2) {
2935 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
2936 // as XR is a smaller instruction
2937 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2938 nrk(dst, src1, src2);
2939 return;
2940 } else {
2941 lr(dst, src1);
2942 }
2943 } else if (dst == src2) {
2944 src2 = src1;
2945 }
2946 And(dst, src2);
2947 }
2948
2949 // Non-clobbering AND pointer size - dst = src1 & src1
AndP(Register dst,Register src1,Register src2)2950 void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
2951 if (dst != src1 && dst != src2) {
2952 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
2953 // as XR is a smaller instruction
2954 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2955 AndP_RRR(dst, src1, src2);
2956 return;
2957 } else {
2958 LoadRR(dst, src1);
2959 }
2960 } else if (dst == src2) {
2961 src2 = src1;
2962 }
2963 AndP(dst, src2);
2964 }
2965
2966 // AND 32-bit (Reg - Mem)
And(Register dst,const MemOperand & opnd)2967 void TurboAssembler::And(Register dst, const MemOperand& opnd) {
2968 DCHECK(is_int20(opnd.offset()));
2969 if (is_uint12(opnd.offset()))
2970 n(dst, opnd);
2971 else
2972 ny(dst, opnd);
2973 }
2974
2975 // AND Pointer Size (Reg - Mem)
AndP(Register dst,const MemOperand & opnd)2976 void TurboAssembler::AndP(Register dst, const MemOperand& opnd) {
2977 DCHECK(is_int20(opnd.offset()));
2978 #if V8_TARGET_ARCH_S390X
2979 ng(dst, opnd);
2980 #else
2981 And(dst, opnd);
2982 #endif
2983 }
2984
2985 // AND 32-bit - dst = dst & imm
And(Register dst,const Operand & opnd)2986 void TurboAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
2987
2988 // AND Pointer Size - dst = dst & imm
AndP(Register dst,const Operand & opnd)2989 void TurboAssembler::AndP(Register dst, const Operand& opnd) {
2990 #if V8_TARGET_ARCH_S390X
2991 intptr_t value = opnd.immediate();
2992 if (value >> 32 != -1) {
2993 // this may not work b/c condition code won't be set correctly
2994 nihf(dst, Operand(value >> 32));
2995 }
2996 nilf(dst, Operand(value & 0xFFFFFFFF));
2997 #else
2998 And(dst, opnd);
2999 #endif
3000 }
3001
3002 // AND 32-bit - dst = src & imm
And(Register dst,Register src,const Operand & opnd)3003 void TurboAssembler::And(Register dst, Register src, const Operand& opnd) {
3004 if (dst != src) lr(dst, src);
3005 nilf(dst, opnd);
3006 }
3007
3008 // AND Pointer Size - dst = src & imm
AndP(Register dst,Register src,const Operand & opnd)3009 void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
3010 // Try to exploit RISBG first
3011 intptr_t value = opnd.immediate();
3012 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
3013 intptr_t shifted_value = value;
3014 int trailing_zeros = 0;
3015
3016 // We start checking how many trailing zeros are left at the end.
3017 while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
3018 trailing_zeros++;
3019 shifted_value >>= 1;
3020 }
3021
3022 // If temp (value with right-most set of zeros shifted out) is 1 less
3023 // than power of 2, we have consecutive bits of 1.
3024 // Special case: If shift_value is zero, we cannot use RISBG, as it requires
3025 // selection of at least 1 bit.
3026 if ((0 != shifted_value) && base::bits::IsPowerOfTwo(shifted_value + 1)) {
3027 int startBit =
3028 base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
3029 int endBit = 63 - trailing_zeros;
3030 // Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
3031 RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
3032 Operand::Zero(), true);
3033 return;
3034 } else if (-1 == shifted_value) {
3035 // A Special case in which all top bits up to MSB are 1's. In this case,
3036 // we can set startBit to be 0.
3037 int endBit = 63 - trailing_zeros;
3038 RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
3039 Operand::Zero(), true);
3040 return;
3041 }
3042 }
3043
3044 // If we are &'ing zero, we can just whack the dst register and skip copy
3045 if (dst != src && (0 != value)) LoadRR(dst, src);
3046 AndP(dst, opnd);
3047 }
3048
3049 // OR 32-bit - dst = dst & src
Or(Register dst,Register src)3050 void TurboAssembler::Or(Register dst, Register src) { or_z(dst, src); }
3051
3052 // OR Pointer Size - dst = dst & src
OrP(Register dst,Register src)3053 void TurboAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
3054
3055 // Non-clobbering OR 32-bit - dst = src1 & src1
Or(Register dst,Register src1,Register src2)3056 void TurboAssembler::Or(Register dst, Register src1, Register src2) {
3057 if (dst != src1 && dst != src2) {
3058 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3059 // as XR is a smaller instruction
3060 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3061 ork(dst, src1, src2);
3062 return;
3063 } else {
3064 lr(dst, src1);
3065 }
3066 } else if (dst == src2) {
3067 src2 = src1;
3068 }
3069 Or(dst, src2);
3070 }
3071
3072 // Non-clobbering OR pointer size - dst = src1 & src1
OrP(Register dst,Register src1,Register src2)3073 void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
3074 if (dst != src1 && dst != src2) {
3075 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3076 // as XR is a smaller instruction
3077 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3078 OrP_RRR(dst, src1, src2);
3079 return;
3080 } else {
3081 LoadRR(dst, src1);
3082 }
3083 } else if (dst == src2) {
3084 src2 = src1;
3085 }
3086 OrP(dst, src2);
3087 }
3088
3089 // OR 32-bit (Reg - Mem)
Or(Register dst,const MemOperand & opnd)3090 void TurboAssembler::Or(Register dst, const MemOperand& opnd) {
3091 DCHECK(is_int20(opnd.offset()));
3092 if (is_uint12(opnd.offset()))
3093 o(dst, opnd);
3094 else
3095 oy(dst, opnd);
3096 }
3097
3098 // OR Pointer Size (Reg - Mem)
OrP(Register dst,const MemOperand & opnd)3099 void TurboAssembler::OrP(Register dst, const MemOperand& opnd) {
3100 DCHECK(is_int20(opnd.offset()));
3101 #if V8_TARGET_ARCH_S390X
3102 og(dst, opnd);
3103 #else
3104 Or(dst, opnd);
3105 #endif
3106 }
3107
3108 // OR 32-bit - dst = dst & imm
Or(Register dst,const Operand & opnd)3109 void TurboAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
3110
3111 // OR Pointer Size - dst = dst & imm
OrP(Register dst,const Operand & opnd)3112 void TurboAssembler::OrP(Register dst, const Operand& opnd) {
3113 #if V8_TARGET_ARCH_S390X
3114 intptr_t value = opnd.immediate();
3115 if (value >> 32 != 0) {
3116 // this may not work b/c condition code won't be set correctly
3117 oihf(dst, Operand(value >> 32));
3118 }
3119 oilf(dst, Operand(value & 0xFFFFFFFF));
3120 #else
3121 Or(dst, opnd);
3122 #endif
3123 }
3124
3125 // OR 32-bit - dst = src & imm
Or(Register dst,Register src,const Operand & opnd)3126 void TurboAssembler::Or(Register dst, Register src, const Operand& opnd) {
3127 if (dst != src) lr(dst, src);
3128 oilf(dst, opnd);
3129 }
3130
3131 // OR Pointer Size - dst = src & imm
OrP(Register dst,Register src,const Operand & opnd)3132 void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) {
3133 if (dst != src) LoadRR(dst, src);
3134 OrP(dst, opnd);
3135 }
3136
3137 // XOR 32-bit - dst = dst & src
Xor(Register dst,Register src)3138 void TurboAssembler::Xor(Register dst, Register src) { xr(dst, src); }
3139
3140 // XOR Pointer Size - dst = dst & src
XorP(Register dst,Register src)3141 void TurboAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
3142
3143 // Non-clobbering XOR 32-bit - dst = src1 & src1
Xor(Register dst,Register src1,Register src2)3144 void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
3145 if (dst != src1 && dst != src2) {
3146 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3147 // as XR is a smaller instruction
3148 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3149 xrk(dst, src1, src2);
3150 return;
3151 } else {
3152 lr(dst, src1);
3153 }
3154 } else if (dst == src2) {
3155 src2 = src1;
3156 }
3157 Xor(dst, src2);
3158 }
3159
3160 // Non-clobbering XOR pointer size - dst = src1 & src1
XorP(Register dst,Register src1,Register src2)3161 void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
3162 if (dst != src1 && dst != src2) {
3163 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3164 // as XR is a smaller instruction
3165 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3166 XorP_RRR(dst, src1, src2);
3167 return;
3168 } else {
3169 LoadRR(dst, src1);
3170 }
3171 } else if (dst == src2) {
3172 src2 = src1;
3173 }
3174 XorP(dst, src2);
3175 }
3176
3177 // XOR 32-bit (Reg - Mem)
Xor(Register dst,const MemOperand & opnd)3178 void TurboAssembler::Xor(Register dst, const MemOperand& opnd) {
3179 DCHECK(is_int20(opnd.offset()));
3180 if (is_uint12(opnd.offset()))
3181 x(dst, opnd);
3182 else
3183 xy(dst, opnd);
3184 }
3185
3186 // XOR Pointer Size (Reg - Mem)
XorP(Register dst,const MemOperand & opnd)3187 void TurboAssembler::XorP(Register dst, const MemOperand& opnd) {
3188 DCHECK(is_int20(opnd.offset()));
3189 #if V8_TARGET_ARCH_S390X
3190 xg(dst, opnd);
3191 #else
3192 Xor(dst, opnd);
3193 #endif
3194 }
3195
3196 // XOR 32-bit - dst = dst & imm
Xor(Register dst,const Operand & opnd)3197 void TurboAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
3198
3199 // XOR Pointer Size - dst = dst & imm
XorP(Register dst,const Operand & opnd)3200 void TurboAssembler::XorP(Register dst, const Operand& opnd) {
3201 #if V8_TARGET_ARCH_S390X
3202 intptr_t value = opnd.immediate();
3203 xihf(dst, Operand(value >> 32));
3204 xilf(dst, Operand(value & 0xFFFFFFFF));
3205 #else
3206 Xor(dst, opnd);
3207 #endif
3208 }
3209
3210 // XOR 32-bit - dst = src & imm
Xor(Register dst,Register src,const Operand & opnd)3211 void TurboAssembler::Xor(Register dst, Register src, const Operand& opnd) {
3212 if (dst != src) lr(dst, src);
3213 xilf(dst, opnd);
3214 }
3215
3216 // XOR Pointer Size - dst = src & imm
XorP(Register dst,Register src,const Operand & opnd)3217 void TurboAssembler::XorP(Register dst, Register src, const Operand& opnd) {
3218 if (dst != src) LoadRR(dst, src);
3219 XorP(dst, opnd);
3220 }
3221
Not32(Register dst,Register src)3222 void TurboAssembler::Not32(Register dst, Register src) {
3223 if (src != no_reg && src != dst) lr(dst, src);
3224 xilf(dst, Operand(0xFFFFFFFF));
3225 }
3226
Not64(Register dst,Register src)3227 void TurboAssembler::Not64(Register dst, Register src) {
3228 if (src != no_reg && src != dst) lgr(dst, src);
3229 xihf(dst, Operand(0xFFFFFFFF));
3230 xilf(dst, Operand(0xFFFFFFFF));
3231 }
3232
NotP(Register dst,Register src)3233 void TurboAssembler::NotP(Register dst, Register src) {
3234 #if V8_TARGET_ARCH_S390X
3235 Not64(dst, src);
3236 #else
3237 Not32(dst, src);
3238 #endif
3239 }
3240
3241 // works the same as mov
Load(Register dst,const Operand & opnd)3242 void TurboAssembler::Load(Register dst, const Operand& opnd) {
3243 intptr_t value = opnd.immediate();
3244 if (is_int16(value)) {
3245 #if V8_TARGET_ARCH_S390X
3246 lghi(dst, opnd);
3247 #else
3248 lhi(dst, opnd);
3249 #endif
3250 } else if (is_int32(value)) {
3251 #if V8_TARGET_ARCH_S390X
3252 lgfi(dst, opnd);
3253 #else
3254 iilf(dst, opnd);
3255 #endif
3256 } else if (is_uint32(value)) {
3257 #if V8_TARGET_ARCH_S390X
3258 llilf(dst, opnd);
3259 #else
3260 iilf(dst, opnd);
3261 #endif
3262 } else {
3263 int32_t hi_32 = static_cast<int64_t>(value) >> 32;
3264 int32_t lo_32 = static_cast<int32_t>(value);
3265
3266 iihf(dst, Operand(hi_32));
3267 iilf(dst, Operand(lo_32));
3268 }
3269 }
3270
Load(Register dst,const MemOperand & opnd)3271 void TurboAssembler::Load(Register dst, const MemOperand& opnd) {
3272 DCHECK(is_int20(opnd.offset()));
3273 #if V8_TARGET_ARCH_S390X
3274 lgf(dst, opnd); // 64<-32
3275 #else
3276 if (is_uint12(opnd.offset())) {
3277 l(dst, opnd);
3278 } else {
3279 ly(dst, opnd);
3280 }
3281 #endif
3282 }
3283
LoadPositiveP(Register result,Register input)3284 void TurboAssembler::LoadPositiveP(Register result, Register input) {
3285 #if V8_TARGET_ARCH_S390X
3286 lpgr(result, input);
3287 #else
3288 lpr(result, input);
3289 #endif
3290 }
3291
LoadPositive32(Register result,Register input)3292 void TurboAssembler::LoadPositive32(Register result, Register input) {
3293 lpr(result, input);
3294 lgfr(result, result);
3295 }
3296
3297 //-----------------------------------------------------------------------------
3298 // Compare Helpers
3299 //-----------------------------------------------------------------------------
3300
3301 // Compare 32-bit Register vs Register
Cmp32(Register src1,Register src2)3302 void TurboAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
3303
3304 // Compare Pointer Sized Register vs Register
CmpP(Register src1,Register src2)3305 void TurboAssembler::CmpP(Register src1, Register src2) {
3306 #if V8_TARGET_ARCH_S390X
3307 cgr(src1, src2);
3308 #else
3309 Cmp32(src1, src2);
3310 #endif
3311 }
3312
3313 // Compare 32-bit Register vs Immediate
3314 // This helper will set up proper relocation entries if required.
Cmp32(Register dst,const Operand & opnd)3315 void TurboAssembler::Cmp32(Register dst, const Operand& opnd) {
3316 if (opnd.rmode() == RelocInfo::NONE) {
3317 intptr_t value = opnd.immediate();
3318 if (is_int16(value))
3319 chi(dst, opnd);
3320 else
3321 cfi(dst, opnd);
3322 } else {
3323 // Need to generate relocation record here
3324 RecordRelocInfo(opnd.rmode(), opnd.immediate());
3325 cfi(dst, opnd);
3326 }
3327 }
3328
3329 // Compare Pointer Sized Register vs Immediate
3330 // This helper will set up proper relocation entries if required.
CmpP(Register dst,const Operand & opnd)3331 void TurboAssembler::CmpP(Register dst, const Operand& opnd) {
3332 #if V8_TARGET_ARCH_S390X
3333 if (opnd.rmode() == RelocInfo::NONE) {
3334 cgfi(dst, opnd);
3335 } else {
3336 mov(r0, opnd); // Need to generate 64-bit relocation
3337 cgr(dst, r0);
3338 }
3339 #else
3340 Cmp32(dst, opnd);
3341 #endif
3342 }
3343
3344 // Compare 32-bit Register vs Memory
Cmp32(Register dst,const MemOperand & opnd)3345 void TurboAssembler::Cmp32(Register dst, const MemOperand& opnd) {
3346 // make sure offset is within 20 bit range
3347 DCHECK(is_int20(opnd.offset()));
3348 if (is_uint12(opnd.offset()))
3349 c(dst, opnd);
3350 else
3351 cy(dst, opnd);
3352 }
3353
3354 // Compare Pointer Size Register vs Memory
CmpP(Register dst,const MemOperand & opnd)3355 void TurboAssembler::CmpP(Register dst, const MemOperand& opnd) {
3356 // make sure offset is within 20 bit range
3357 DCHECK(is_int20(opnd.offset()));
3358 #if V8_TARGET_ARCH_S390X
3359 cg(dst, opnd);
3360 #else
3361 Cmp32(dst, opnd);
3362 #endif
3363 }
3364
3365 // Using cs or scy based on the offset
CmpAndSwap(Register old_val,Register new_val,const MemOperand & opnd)3366 void TurboAssembler::CmpAndSwap(Register old_val, Register new_val,
3367 const MemOperand& opnd) {
3368 if (is_uint12(opnd.offset())) {
3369 cs(old_val, new_val, opnd);
3370 } else {
3371 csy(old_val, new_val, opnd);
3372 }
3373 }
3374
3375 //-----------------------------------------------------------------------------
3376 // Compare Logical Helpers
3377 //-----------------------------------------------------------------------------
3378
3379 // Compare Logical 32-bit Register vs Register
CmpLogical32(Register dst,Register src)3380 void TurboAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
3381
3382 // Compare Logical Pointer Sized Register vs Register
CmpLogicalP(Register dst,Register src)3383 void TurboAssembler::CmpLogicalP(Register dst, Register src) {
3384 #ifdef V8_TARGET_ARCH_S390X
3385 clgr(dst, src);
3386 #else
3387 CmpLogical32(dst, src);
3388 #endif
3389 }
3390
3391 // Compare Logical 32-bit Register vs Immediate
CmpLogical32(Register dst,const Operand & opnd)3392 void TurboAssembler::CmpLogical32(Register dst, const Operand& opnd) {
3393 clfi(dst, opnd);
3394 }
3395
3396 // Compare Logical Pointer Sized Register vs Immediate
CmpLogicalP(Register dst,const Operand & opnd)3397 void TurboAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
3398 #if V8_TARGET_ARCH_S390X
3399 DCHECK_EQ(static_cast<uint32_t>(opnd.immediate() >> 32), 0);
3400 clgfi(dst, opnd);
3401 #else
3402 CmpLogical32(dst, opnd);
3403 #endif
3404 }
3405
3406 // Compare Logical 32-bit Register vs Memory
CmpLogical32(Register dst,const MemOperand & opnd)3407 void TurboAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
3408 // make sure offset is within 20 bit range
3409 DCHECK(is_int20(opnd.offset()));
3410 if (is_uint12(opnd.offset()))
3411 cl(dst, opnd);
3412 else
3413 cly(dst, opnd);
3414 }
3415
3416 // Compare Logical Pointer Sized Register vs Memory
CmpLogicalP(Register dst,const MemOperand & opnd)3417 void TurboAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
3418 // make sure offset is within 20 bit range
3419 DCHECK(is_int20(opnd.offset()));
3420 #if V8_TARGET_ARCH_S390X
3421 clg(dst, opnd);
3422 #else
3423 CmpLogical32(dst, opnd);
3424 #endif
3425 }
3426
3427 // Compare Logical Byte (Mem - Imm)
CmpLogicalByte(const MemOperand & mem,const Operand & imm)3428 void TurboAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
3429 DCHECK(is_uint8(imm.immediate()));
3430 if (is_uint12(mem.offset()))
3431 cli(mem, imm);
3432 else
3433 cliy(mem, imm);
3434 }
3435
Branch(Condition c,const Operand & opnd)3436 void TurboAssembler::Branch(Condition c, const Operand& opnd) {
3437 intptr_t value = opnd.immediate();
3438 if (is_int16(value))
3439 brc(c, opnd);
3440 else
3441 brcl(c, opnd);
3442 }
3443
3444 // Branch On Count. Decrement R1, and branch if R1 != 0.
BranchOnCount(Register r1,Label * l)3445 void TurboAssembler::BranchOnCount(Register r1, Label* l) {
3446 int32_t offset = branch_offset(l);
3447 if (is_int16(offset)) {
3448 #if V8_TARGET_ARCH_S390X
3449 brctg(r1, Operand(offset));
3450 #else
3451 brct(r1, Operand(offset));
3452 #endif
3453 } else {
3454 AddP(r1, Operand(-1));
3455 Branch(ne, Operand(offset));
3456 }
3457 }
3458
LoadIntLiteral(Register dst,int value)3459 void TurboAssembler::LoadIntLiteral(Register dst, int value) {
3460 Load(dst, Operand(value));
3461 }
3462
LoadSmiLiteral(Register dst,Smi * smi)3463 void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
3464 intptr_t value = reinterpret_cast<intptr_t>(smi);
3465 #if V8_TARGET_ARCH_S390X
3466 DCHECK_EQ(value & 0xFFFFFFFF, 0);
3467 // The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
3468 llihf(dst, Operand(value >> 32));
3469 #else
3470 llilf(dst, Operand(value));
3471 #endif
3472 }
3473
LoadDoubleLiteral(DoubleRegister result,uint64_t value,Register scratch)3474 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
3475 Register scratch) {
3476 uint32_t hi_32 = value >> 32;
3477 uint32_t lo_32 = static_cast<uint32_t>(value);
3478
3479 // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
3480 if (value == 0) {
3481 lzdr(result);
3482 } else if (lo_32 == 0) {
3483 llihf(scratch, Operand(hi_32));
3484 ldgr(result, scratch);
3485 } else {
3486 iihf(scratch, Operand(hi_32));
3487 iilf(scratch, Operand(lo_32));
3488 ldgr(result, scratch);
3489 }
3490 }
3491
LoadDoubleLiteral(DoubleRegister result,double value,Register scratch)3492 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
3493 Register scratch) {
3494 uint64_t int_val = bit_cast<uint64_t, double>(value);
3495 LoadDoubleLiteral(result, int_val, scratch);
3496 }
3497
LoadFloat32Literal(DoubleRegister result,float value,Register scratch)3498 void TurboAssembler::LoadFloat32Literal(DoubleRegister result, float value,
3499 Register scratch) {
3500 uint64_t int_val = static_cast<uint64_t>(bit_cast<uint32_t, float>(value))
3501 << 32;
3502 LoadDoubleLiteral(result, int_val, scratch);
3503 }
3504
CmpSmiLiteral(Register src1,Smi * smi,Register scratch)3505 void TurboAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
3506 #if V8_TARGET_ARCH_S390X
3507 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3508 cih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
3509 } else {
3510 LoadSmiLiteral(scratch, smi);
3511 cgr(src1, scratch);
3512 }
3513 #else
3514 // CFI takes 32-bit immediate.
3515 cfi(src1, Operand(smi));
3516 #endif
3517 }
3518
CmpLogicalSmiLiteral(Register src1,Smi * smi,Register scratch)3519 void TurboAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
3520 Register scratch) {
3521 #if V8_TARGET_ARCH_S390X
3522 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3523 clih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
3524 } else {
3525 LoadSmiLiteral(scratch, smi);
3526 clgr(src1, scratch);
3527 }
3528 #else
3529 // CLFI takes 32-bit immediate
3530 clfi(src1, Operand(smi));
3531 #endif
3532 }
3533
AddSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)3534 void TurboAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
3535 Register scratch) {
3536 #if V8_TARGET_ARCH_S390X
3537 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3538 if (dst != src) LoadRR(dst, src);
3539 aih(dst, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
3540 } else {
3541 LoadSmiLiteral(scratch, smi);
3542 AddP(dst, src, scratch);
3543 }
3544 #else
3545 AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
3546 #endif
3547 }
3548
SubSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)3549 void TurboAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
3550 Register scratch) {
3551 #if V8_TARGET_ARCH_S390X
3552 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3553 if (dst != src) LoadRR(dst, src);
3554 aih(dst, Operand((-reinterpret_cast<intptr_t>(smi)) >> 32));
3555 } else {
3556 LoadSmiLiteral(scratch, smi);
3557 SubP(dst, src, scratch);
3558 }
3559 #else
3560 AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
3561 #endif
3562 }
3563
AndSmiLiteral(Register dst,Register src,Smi * smi)3564 void TurboAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
3565 if (dst != src) LoadRR(dst, src);
3566 #if V8_TARGET_ARCH_S390X
3567 DCHECK_EQ(reinterpret_cast<intptr_t>(smi) & 0xFFFFFFFF, 0);
3568 int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
3569 nihf(dst, Operand(value));
3570 #else
3571 nilf(dst, Operand(reinterpret_cast<int>(smi)));
3572 #endif
3573 }
3574
3575 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)3576 void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
3577 Register scratch) {
3578 int offset = mem.offset();
3579
3580 #if V8_TARGET_ARCH_S390X
3581 MemOperand src = mem;
3582 if (!is_int20(offset)) {
3583 DCHECK(scratch != no_reg && scratch != r0 && mem.rx() == r0);
3584 DCHECK(scratch != mem.rb());
3585 LoadIntLiteral(scratch, offset);
3586 src = MemOperand(mem.rb(), scratch);
3587 }
3588 lg(dst, src);
3589 #else
3590 if (is_uint12(offset)) {
3591 l(dst, mem);
3592 } else if (is_int20(offset)) {
3593 ly(dst, mem);
3594 } else {
3595 DCHECK(scratch != no_reg && scratch != r0 && mem.rx() == r0);
3596 DCHECK(scratch != mem.rb());
3597 LoadIntLiteral(scratch, offset);
3598 l(dst, MemOperand(mem.rb(), scratch));
3599 }
3600 #endif
3601 }
3602
3603 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)3604 void TurboAssembler::StoreP(Register src, const MemOperand& mem,
3605 Register scratch) {
3606 if (!is_int20(mem.offset())) {
3607 DCHECK(scratch != no_reg);
3608 DCHECK(scratch != r0);
3609 LoadIntLiteral(scratch, mem.offset());
3610 #if V8_TARGET_ARCH_S390X
3611 stg(src, MemOperand(mem.rb(), scratch));
3612 #else
3613 st(src, MemOperand(mem.rb(), scratch));
3614 #endif
3615 } else {
3616 #if V8_TARGET_ARCH_S390X
3617 stg(src, mem);
3618 #else
3619 // StoreW will try to generate ST if offset fits, otherwise
3620 // it'll generate STY.
3621 StoreW(src, mem);
3622 #endif
3623 }
3624 }
3625
3626 // Store a "pointer" sized constant to the memory location
StoreP(const MemOperand & mem,const Operand & opnd,Register scratch)3627 void TurboAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
3628 Register scratch) {
3629 // Relocations not supported
3630 DCHECK_EQ(opnd.rmode(), RelocInfo::NONE);
3631
3632 // Try to use MVGHI/MVHI
3633 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
3634 mem.getIndexRegister() == r0 && is_int16(opnd.immediate())) {
3635 #if V8_TARGET_ARCH_S390X
3636 mvghi(mem, opnd);
3637 #else
3638 mvhi(mem, opnd);
3639 #endif
3640 } else {
3641 LoadImmP(scratch, opnd);
3642 StoreP(scratch, mem);
3643 }
3644 }
3645
LoadMultipleP(Register dst1,Register dst2,const MemOperand & mem)3646 void TurboAssembler::LoadMultipleP(Register dst1, Register dst2,
3647 const MemOperand& mem) {
3648 #if V8_TARGET_ARCH_S390X
3649 DCHECK(is_int20(mem.offset()));
3650 lmg(dst1, dst2, mem);
3651 #else
3652 if (is_uint12(mem.offset())) {
3653 lm(dst1, dst2, mem);
3654 } else {
3655 DCHECK(is_int20(mem.offset()));
3656 lmy(dst1, dst2, mem);
3657 }
3658 #endif
3659 }
3660
StoreMultipleP(Register src1,Register src2,const MemOperand & mem)3661 void TurboAssembler::StoreMultipleP(Register src1, Register src2,
3662 const MemOperand& mem) {
3663 #if V8_TARGET_ARCH_S390X
3664 DCHECK(is_int20(mem.offset()));
3665 stmg(src1, src2, mem);
3666 #else
3667 if (is_uint12(mem.offset())) {
3668 stm(src1, src2, mem);
3669 } else {
3670 DCHECK(is_int20(mem.offset()));
3671 stmy(src1, src2, mem);
3672 }
3673 #endif
3674 }
3675
LoadMultipleW(Register dst1,Register dst2,const MemOperand & mem)3676 void TurboAssembler::LoadMultipleW(Register dst1, Register dst2,
3677 const MemOperand& mem) {
3678 if (is_uint12(mem.offset())) {
3679 lm(dst1, dst2, mem);
3680 } else {
3681 DCHECK(is_int20(mem.offset()));
3682 lmy(dst1, dst2, mem);
3683 }
3684 }
3685
StoreMultipleW(Register src1,Register src2,const MemOperand & mem)3686 void TurboAssembler::StoreMultipleW(Register src1, Register src2,
3687 const MemOperand& mem) {
3688 if (is_uint12(mem.offset())) {
3689 stm(src1, src2, mem);
3690 } else {
3691 DCHECK(is_int20(mem.offset()));
3692 stmy(src1, src2, mem);
3693 }
3694 }
3695
3696 // Load 32-bits and sign extend if necessary.
LoadW(Register dst,Register src)3697 void TurboAssembler::LoadW(Register dst, Register src) {
3698 #if V8_TARGET_ARCH_S390X
3699 lgfr(dst, src);
3700 #else
3701 if (dst != src) lr(dst, src);
3702 #endif
3703 }
3704
3705 // Load 32-bits and sign extend if necessary.
LoadW(Register dst,const MemOperand & mem,Register scratch)3706 void TurboAssembler::LoadW(Register dst, const MemOperand& mem,
3707 Register scratch) {
3708 int offset = mem.offset();
3709
3710 if (!is_int20(offset)) {
3711 DCHECK(scratch != no_reg);
3712 LoadIntLiteral(scratch, offset);
3713 #if V8_TARGET_ARCH_S390X
3714 lgf(dst, MemOperand(mem.rb(), scratch));
3715 #else
3716 l(dst, MemOperand(mem.rb(), scratch));
3717 #endif
3718 } else {
3719 #if V8_TARGET_ARCH_S390X
3720 lgf(dst, mem);
3721 #else
3722 if (is_uint12(offset)) {
3723 l(dst, mem);
3724 } else {
3725 ly(dst, mem);
3726 }
3727 #endif
3728 }
3729 }
3730
3731 // Load 32-bits and zero extend if necessary.
LoadlW(Register dst,Register src)3732 void TurboAssembler::LoadlW(Register dst, Register src) {
3733 #if V8_TARGET_ARCH_S390X
3734 llgfr(dst, src);
3735 #else
3736 if (dst != src) lr(dst, src);
3737 #endif
3738 }
3739
3740 // Variable length depending on whether offset fits into immediate field
3741 // MemOperand of RX or RXY format
LoadlW(Register dst,const MemOperand & mem,Register scratch)3742 void TurboAssembler::LoadlW(Register dst, const MemOperand& mem,
3743 Register scratch) {
3744 Register base = mem.rb();
3745 int offset = mem.offset();
3746
3747 #if V8_TARGET_ARCH_S390X
3748 if (is_int20(offset)) {
3749 llgf(dst, mem);
3750 } else if (scratch != no_reg) {
3751 // Materialize offset into scratch register.
3752 LoadIntLiteral(scratch, offset);
3753 llgf(dst, MemOperand(base, scratch));
3754 } else {
3755 DCHECK(false);
3756 }
3757 #else
3758 bool use_RXform = false;
3759 bool use_RXYform = false;
3760 if (is_uint12(offset)) {
3761 // RX-format supports unsigned 12-bits offset.
3762 use_RXform = true;
3763 } else if (is_int20(offset)) {
3764 // RXY-format supports signed 20-bits offset.
3765 use_RXYform = true;
3766 } else if (scratch != no_reg) {
3767 // Materialize offset into scratch register.
3768 LoadIntLiteral(scratch, offset);
3769 } else {
3770 DCHECK(false);
3771 }
3772
3773 if (use_RXform) {
3774 l(dst, mem);
3775 } else if (use_RXYform) {
3776 ly(dst, mem);
3777 } else {
3778 ly(dst, MemOperand(base, scratch));
3779 }
3780 #endif
3781 }
3782
LoadLogicalHalfWordP(Register dst,const MemOperand & mem)3783 void TurboAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
3784 #if V8_TARGET_ARCH_S390X
3785 llgh(dst, mem);
3786 #else
3787 llh(dst, mem);
3788 #endif
3789 }
3790
LoadLogicalHalfWordP(Register dst,Register src)3791 void TurboAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
3792 #if V8_TARGET_ARCH_S390X
3793 llghr(dst, src);
3794 #else
3795 llhr(dst, src);
3796 #endif
3797 }
3798
LoadB(Register dst,const MemOperand & mem)3799 void TurboAssembler::LoadB(Register dst, const MemOperand& mem) {
3800 #if V8_TARGET_ARCH_S390X
3801 lgb(dst, mem);
3802 #else
3803 lb(dst, mem);
3804 #endif
3805 }
3806
LoadB(Register dst,Register src)3807 void TurboAssembler::LoadB(Register dst, Register src) {
3808 #if V8_TARGET_ARCH_S390X
3809 lgbr(dst, src);
3810 #else
3811 lbr(dst, src);
3812 #endif
3813 }
3814
LoadlB(Register dst,const MemOperand & mem)3815 void TurboAssembler::LoadlB(Register dst, const MemOperand& mem) {
3816 #if V8_TARGET_ARCH_S390X
3817 llgc(dst, mem);
3818 #else
3819 llc(dst, mem);
3820 #endif
3821 }
3822
LoadlB(Register dst,Register src)3823 void TurboAssembler::LoadlB(Register dst, Register src) {
3824 #if V8_TARGET_ARCH_S390X
3825 llgcr(dst, src);
3826 #else
3827 llcr(dst, src);
3828 #endif
3829 }
3830
LoadLogicalReversedWordP(Register dst,const MemOperand & mem)3831 void TurboAssembler::LoadLogicalReversedWordP(Register dst,
3832 const MemOperand& mem) {
3833 lrv(dst, mem);
3834 LoadlW(dst, dst);
3835 }
3836
LoadLogicalReversedHalfWordP(Register dst,const MemOperand & mem)3837 void TurboAssembler::LoadLogicalReversedHalfWordP(Register dst,
3838 const MemOperand& mem) {
3839 lrvh(dst, mem);
3840 LoadLogicalHalfWordP(dst, dst);
3841 }
3842
3843
3844 // Load And Test (Reg <- Reg)
LoadAndTest32(Register dst,Register src)3845 void TurboAssembler::LoadAndTest32(Register dst, Register src) {
3846 ltr(dst, src);
3847 }
3848
3849 // Load And Test
3850 // (Register dst(ptr) = Register src (32 | 32->64))
3851 // src is treated as a 32-bit signed integer, which is sign extended to
3852 // 64-bit if necessary.
LoadAndTestP_ExtendSrc(Register dst,Register src)3853 void TurboAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
3854 #if V8_TARGET_ARCH_S390X
3855 ltgfr(dst, src);
3856 #else
3857 ltr(dst, src);
3858 #endif
3859 }
3860
3861 // Load And Test Pointer Sized (Reg <- Reg)
LoadAndTestP(Register dst,Register src)3862 void TurboAssembler::LoadAndTestP(Register dst, Register src) {
3863 #if V8_TARGET_ARCH_S390X
3864 ltgr(dst, src);
3865 #else
3866 ltr(dst, src);
3867 #endif
3868 }
3869
3870 // Load And Test 32-bit (Reg <- Mem)
LoadAndTest32(Register dst,const MemOperand & mem)3871 void TurboAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
3872 lt_z(dst, mem);
3873 }
3874
3875 // Load And Test Pointer Sized (Reg <- Mem)
LoadAndTestP(Register dst,const MemOperand & mem)3876 void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
3877 #if V8_TARGET_ARCH_S390X
3878 ltg(dst, mem);
3879 #else
3880 lt_z(dst, mem);
3881 #endif
3882 }
3883
3884 // Load On Condition Pointer Sized (Reg <- Reg)
LoadOnConditionP(Condition cond,Register dst,Register src)3885 void TurboAssembler::LoadOnConditionP(Condition cond, Register dst,
3886 Register src) {
3887 #if V8_TARGET_ARCH_S390X
3888 locgr(cond, dst, src);
3889 #else
3890 locr(cond, dst, src);
3891 #endif
3892 }
3893
3894 // Load Double Precision (64-bit) Floating Point number from memory
LoadDouble(DoubleRegister dst,const MemOperand & mem)3895 void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
3896 // for 32bit and 64bit we all use 64bit floating point regs
3897 if (is_uint12(mem.offset())) {
3898 ld(dst, mem);
3899 } else {
3900 ldy(dst, mem);
3901 }
3902 }
3903
3904 // Load Single Precision (32-bit) Floating Point number from memory
LoadFloat32(DoubleRegister dst,const MemOperand & mem)3905 void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
3906 if (is_uint12(mem.offset())) {
3907 le_z(dst, mem);
3908 } else {
3909 DCHECK(is_int20(mem.offset()));
3910 ley(dst, mem);
3911 }
3912 }
3913
3914 // Load Single Precision (32-bit) Floating Point number from memory,
3915 // and convert to Double Precision (64-bit)
LoadFloat32ConvertToDouble(DoubleRegister dst,const MemOperand & mem)3916 void TurboAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
3917 const MemOperand& mem) {
3918 LoadFloat32(dst, mem);
3919 ldebr(dst, dst);
3920 }
3921
3922 // Store Double Precision (64-bit) Floating Point number to memory
StoreDouble(DoubleRegister dst,const MemOperand & mem)3923 void TurboAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
3924 if (is_uint12(mem.offset())) {
3925 std(dst, mem);
3926 } else {
3927 stdy(dst, mem);
3928 }
3929 }
3930
3931 // Store Single Precision (32-bit) Floating Point number to memory
StoreFloat32(DoubleRegister src,const MemOperand & mem)3932 void TurboAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
3933 if (is_uint12(mem.offset())) {
3934 ste(src, mem);
3935 } else {
3936 stey(src, mem);
3937 }
3938 }
3939
3940 // Convert Double precision (64-bit) to Single Precision (32-bit)
3941 // and store resulting Float32 to memory
StoreDoubleAsFloat32(DoubleRegister src,const MemOperand & mem,DoubleRegister scratch)3942 void TurboAssembler::StoreDoubleAsFloat32(DoubleRegister src,
3943 const MemOperand& mem,
3944 DoubleRegister scratch) {
3945 ledbr(scratch, src);
3946 StoreFloat32(scratch, mem);
3947 }
3948
AddFloat32(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3949 void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
3950 DoubleRegister scratch) {
3951 if (is_uint12(opnd.offset())) {
3952 aeb(dst, opnd);
3953 } else {
3954 ley(scratch, opnd);
3955 aebr(dst, scratch);
3956 }
3957 }
3958
AddFloat64(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3959 void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
3960 DoubleRegister scratch) {
3961 if (is_uint12(opnd.offset())) {
3962 adb(dst, opnd);
3963 } else {
3964 ldy(scratch, opnd);
3965 adbr(dst, scratch);
3966 }
3967 }
3968
SubFloat32(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3969 void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
3970 DoubleRegister scratch) {
3971 if (is_uint12(opnd.offset())) {
3972 seb(dst, opnd);
3973 } else {
3974 ley(scratch, opnd);
3975 sebr(dst, scratch);
3976 }
3977 }
3978
SubFloat64(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3979 void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
3980 DoubleRegister scratch) {
3981 if (is_uint12(opnd.offset())) {
3982 sdb(dst, opnd);
3983 } else {
3984 ldy(scratch, opnd);
3985 sdbr(dst, scratch);
3986 }
3987 }
3988
MulFloat32(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3989 void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
3990 DoubleRegister scratch) {
3991 if (is_uint12(opnd.offset())) {
3992 meeb(dst, opnd);
3993 } else {
3994 ley(scratch, opnd);
3995 meebr(dst, scratch);
3996 }
3997 }
3998
MulFloat64(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)3999 void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
4000 DoubleRegister scratch) {
4001 if (is_uint12(opnd.offset())) {
4002 mdb(dst, opnd);
4003 } else {
4004 ldy(scratch, opnd);
4005 mdbr(dst, scratch);
4006 }
4007 }
4008
DivFloat32(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)4009 void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
4010 DoubleRegister scratch) {
4011 if (is_uint12(opnd.offset())) {
4012 deb(dst, opnd);
4013 } else {
4014 ley(scratch, opnd);
4015 debr(dst, scratch);
4016 }
4017 }
4018
DivFloat64(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)4019 void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
4020 DoubleRegister scratch) {
4021 if (is_uint12(opnd.offset())) {
4022 ddb(dst, opnd);
4023 } else {
4024 ldy(scratch, opnd);
4025 ddbr(dst, scratch);
4026 }
4027 }
4028
LoadFloat32ToDouble(DoubleRegister dst,const MemOperand & opnd,DoubleRegister scratch)4029 void TurboAssembler::LoadFloat32ToDouble(DoubleRegister dst,
4030 const MemOperand& opnd,
4031 DoubleRegister scratch) {
4032 if (is_uint12(opnd.offset())) {
4033 ldeb(dst, opnd);
4034 } else {
4035 ley(scratch, opnd);
4036 ldebr(dst, scratch);
4037 }
4038 }
4039
4040 // Variable length depending on whether offset fits into immediate field
4041 // MemOperand of RX or RXY format
StoreW(Register src,const MemOperand & mem,Register scratch)4042 void TurboAssembler::StoreW(Register src, const MemOperand& mem,
4043 Register scratch) {
4044 Register base = mem.rb();
4045 int offset = mem.offset();
4046
4047 bool use_RXform = false;
4048 bool use_RXYform = false;
4049
4050 if (is_uint12(offset)) {
4051 // RX-format supports unsigned 12-bits offset.
4052 use_RXform = true;
4053 } else if (is_int20(offset)) {
4054 // RXY-format supports signed 20-bits offset.
4055 use_RXYform = true;
4056 } else if (scratch != no_reg) {
4057 // Materialize offset into scratch register.
4058 LoadIntLiteral(scratch, offset);
4059 } else {
4060 // scratch is no_reg
4061 DCHECK(false);
4062 }
4063
4064 if (use_RXform) {
4065 st(src, mem);
4066 } else if (use_RXYform) {
4067 sty(src, mem);
4068 } else {
4069 StoreW(src, MemOperand(base, scratch));
4070 }
4071 }
4072
LoadHalfWordP(Register dst,Register src)4073 void TurboAssembler::LoadHalfWordP(Register dst, Register src) {
4074 #if V8_TARGET_ARCH_S390X
4075 lghr(dst, src);
4076 #else
4077 lhr(dst, src);
4078 #endif
4079 }
4080
4081 // Loads 16-bits half-word value from memory and sign extends to pointer
4082 // sized register
LoadHalfWordP(Register dst,const MemOperand & mem,Register scratch)4083 void TurboAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
4084 Register scratch) {
4085 Register base = mem.rb();
4086 int offset = mem.offset();
4087
4088 if (!is_int20(offset)) {
4089 DCHECK(scratch != no_reg);
4090 LoadIntLiteral(scratch, offset);
4091 #if V8_TARGET_ARCH_S390X
4092 lgh(dst, MemOperand(base, scratch));
4093 #else
4094 lh(dst, MemOperand(base, scratch));
4095 #endif
4096 } else {
4097 #if V8_TARGET_ARCH_S390X
4098 lgh(dst, mem);
4099 #else
4100 if (is_uint12(offset)) {
4101 lh(dst, mem);
4102 } else {
4103 lhy(dst, mem);
4104 }
4105 #endif
4106 }
4107 }
4108
4109 // Variable length depending on whether offset fits into immediate field
4110 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)4111 void TurboAssembler::StoreHalfWord(Register src, const MemOperand& mem,
4112 Register scratch) {
4113 Register base = mem.rb();
4114 int offset = mem.offset();
4115
4116 if (is_uint12(offset)) {
4117 sth(src, mem);
4118 } else if (is_int20(offset)) {
4119 sthy(src, mem);
4120 } else {
4121 DCHECK(scratch != no_reg);
4122 LoadIntLiteral(scratch, offset);
4123 sth(src, MemOperand(base, scratch));
4124 }
4125 }
4126
4127 // Variable length depending on whether offset fits into immediate field
4128 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)4129 void TurboAssembler::StoreByte(Register src, const MemOperand& mem,
4130 Register scratch) {
4131 Register base = mem.rb();
4132 int offset = mem.offset();
4133
4134 if (is_uint12(offset)) {
4135 stc(src, mem);
4136 } else if (is_int20(offset)) {
4137 stcy(src, mem);
4138 } else {
4139 DCHECK(scratch != no_reg);
4140 LoadIntLiteral(scratch, offset);
4141 stc(src, MemOperand(base, scratch));
4142 }
4143 }
4144
4145 // Shift left logical for 32-bit integer types.
ShiftLeft(Register dst,Register src,const Operand & val)4146 void TurboAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
4147 if (dst == src) {
4148 sll(dst, val);
4149 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4150 sllk(dst, src, val);
4151 } else {
4152 lr(dst, src);
4153 sll(dst, val);
4154 }
4155 }
4156
4157 // Shift left logical for 32-bit integer types.
ShiftLeft(Register dst,Register src,Register val)4158 void TurboAssembler::ShiftLeft(Register dst, Register src, Register val) {
4159 if (dst == src) {
4160 sll(dst, val);
4161 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4162 sllk(dst, src, val);
4163 } else {
4164 DCHECK(dst != val); // The lr/sll path clobbers val.
4165 lr(dst, src);
4166 sll(dst, val);
4167 }
4168 }
4169
4170 // Shift right logical for 32-bit integer types.
ShiftRight(Register dst,Register src,const Operand & val)4171 void TurboAssembler::ShiftRight(Register dst, Register src,
4172 const Operand& val) {
4173 if (dst == src) {
4174 srl(dst, val);
4175 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4176 srlk(dst, src, val);
4177 } else {
4178 lr(dst, src);
4179 srl(dst, val);
4180 }
4181 }
4182
4183 // Shift right logical for 32-bit integer types.
ShiftRight(Register dst,Register src,Register val)4184 void TurboAssembler::ShiftRight(Register dst, Register src, Register val) {
4185 if (dst == src) {
4186 srl(dst, val);
4187 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4188 srlk(dst, src, val);
4189 } else {
4190 DCHECK(dst != val); // The lr/srl path clobbers val.
4191 lr(dst, src);
4192 srl(dst, val);
4193 }
4194 }
4195
4196 // Shift left arithmetic for 32-bit integer types.
ShiftLeftArith(Register dst,Register src,const Operand & val)4197 void TurboAssembler::ShiftLeftArith(Register dst, Register src,
4198 const Operand& val) {
4199 if (dst == src) {
4200 sla(dst, val);
4201 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4202 slak(dst, src, val);
4203 } else {
4204 lr(dst, src);
4205 sla(dst, val);
4206 }
4207 }
4208
4209 // Shift left arithmetic for 32-bit integer types.
ShiftLeftArith(Register dst,Register src,Register val)4210 void TurboAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
4211 if (dst == src) {
4212 sla(dst, val);
4213 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4214 slak(dst, src, val);
4215 } else {
4216 DCHECK(dst != val); // The lr/sla path clobbers val.
4217 lr(dst, src);
4218 sla(dst, val);
4219 }
4220 }
4221
4222 // Shift right arithmetic for 32-bit integer types.
ShiftRightArith(Register dst,Register src,const Operand & val)4223 void TurboAssembler::ShiftRightArith(Register dst, Register src,
4224 const Operand& val) {
4225 if (dst == src) {
4226 sra(dst, val);
4227 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4228 srak(dst, src, val);
4229 } else {
4230 lr(dst, src);
4231 sra(dst, val);
4232 }
4233 }
4234
4235 // Shift right arithmetic for 32-bit integer types.
ShiftRightArith(Register dst,Register src,Register val)4236 void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) {
4237 if (dst == src) {
4238 sra(dst, val);
4239 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4240 srak(dst, src, val);
4241 } else {
4242 DCHECK(dst != val); // The lr/sra path clobbers val.
4243 lr(dst, src);
4244 sra(dst, val);
4245 }
4246 }
4247
4248 // Clear right most # of bits
ClearRightImm(Register dst,Register src,const Operand & val)4249 void TurboAssembler::ClearRightImm(Register dst, Register src,
4250 const Operand& val) {
4251 int numBitsToClear = val.immediate() % (kPointerSize * 8);
4252
4253 // Try to use RISBG if possible
4254 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
4255 int endBit = 63 - numBitsToClear;
4256 RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
4257 Operand::Zero(), true);
4258 return;
4259 }
4260
4261 uint64_t hexMask = ~((1L << numBitsToClear) - 1);
4262
4263 // S390 AND instr clobbers source. Make a copy if necessary
4264 if (dst != src) LoadRR(dst, src);
4265
4266 if (numBitsToClear <= 16) {
4267 nill(dst, Operand(static_cast<uint16_t>(hexMask)));
4268 } else if (numBitsToClear <= 32) {
4269 nilf(dst, Operand(static_cast<uint32_t>(hexMask)));
4270 } else if (numBitsToClear <= 64) {
4271 nilf(dst, Operand(static_cast<intptr_t>(0)));
4272 nihf(dst, Operand(hexMask >> 32));
4273 }
4274 }
4275
Popcnt32(Register dst,Register src)4276 void TurboAssembler::Popcnt32(Register dst, Register src) {
4277 DCHECK(src != r0);
4278 DCHECK(dst != r0);
4279
4280 popcnt(dst, src);
4281 ShiftRight(r0, dst, Operand(16));
4282 ar(dst, r0);
4283 ShiftRight(r0, dst, Operand(8));
4284 ar(dst, r0);
4285 llgcr(dst, dst);
4286 }
4287
4288 #ifdef V8_TARGET_ARCH_S390X
Popcnt64(Register dst,Register src)4289 void TurboAssembler::Popcnt64(Register dst, Register src) {
4290 DCHECK(src != r0);
4291 DCHECK(dst != r0);
4292
4293 popcnt(dst, src);
4294 ShiftRightP(r0, dst, Operand(32));
4295 AddP(dst, r0);
4296 ShiftRightP(r0, dst, Operand(16));
4297 AddP(dst, r0);
4298 ShiftRightP(r0, dst, Operand(8));
4299 AddP(dst, r0);
4300 LoadlB(dst, dst);
4301 }
4302 #endif
4303
SwapP(Register src,Register dst,Register scratch)4304 void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
4305 if (src == dst) return;
4306 DCHECK(!AreAliased(src, dst, scratch));
4307 LoadRR(scratch, src);
4308 LoadRR(src, dst);
4309 LoadRR(dst, scratch);
4310 }
4311
SwapP(Register src,MemOperand dst,Register scratch)4312 void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
4313 if (dst.rx() != r0) DCHECK(!AreAliased(src, dst.rx(), scratch));
4314 if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
4315 DCHECK(!AreAliased(src, scratch));
4316 LoadRR(scratch, src);
4317 LoadP(src, dst);
4318 StoreP(scratch, dst);
4319 }
4320
SwapP(MemOperand src,MemOperand dst,Register scratch_0,Register scratch_1)4321 void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
4322 Register scratch_1) {
4323 if (src.rx() != r0) DCHECK(!AreAliased(src.rx(), scratch_0, scratch_1));
4324 if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
4325 if (dst.rx() != r0) DCHECK(!AreAliased(dst.rx(), scratch_0, scratch_1));
4326 if (dst.rb() != r0) DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
4327 DCHECK(!AreAliased(scratch_0, scratch_1));
4328 LoadP(scratch_0, src);
4329 LoadP(scratch_1, dst);
4330 StoreP(scratch_0, dst);
4331 StoreP(scratch_1, src);
4332 }
4333
SwapFloat32(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)4334 void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
4335 DoubleRegister scratch) {
4336 if (src == dst) return;
4337 DCHECK(!AreAliased(src, dst, scratch));
4338 ldr(scratch, src);
4339 ldr(src, dst);
4340 ldr(dst, scratch);
4341 }
4342
SwapFloat32(DoubleRegister src,MemOperand dst,DoubleRegister scratch)4343 void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
4344 DoubleRegister scratch) {
4345 DCHECK(!AreAliased(src, scratch));
4346 ldr(scratch, src);
4347 LoadFloat32(src, dst);
4348 StoreFloat32(scratch, dst);
4349 }
4350
SwapFloat32(MemOperand src,MemOperand dst,DoubleRegister scratch_0,DoubleRegister scratch_1)4351 void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
4352 DoubleRegister scratch_0,
4353 DoubleRegister scratch_1) {
4354 DCHECK(!AreAliased(scratch_0, scratch_1));
4355 LoadFloat32(scratch_0, src);
4356 LoadFloat32(scratch_1, dst);
4357 StoreFloat32(scratch_0, dst);
4358 StoreFloat32(scratch_1, src);
4359 }
4360
SwapDouble(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)4361 void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
4362 DoubleRegister scratch) {
4363 if (src == dst) return;
4364 DCHECK(!AreAliased(src, dst, scratch));
4365 ldr(scratch, src);
4366 ldr(src, dst);
4367 ldr(dst, scratch);
4368 }
4369
SwapDouble(DoubleRegister src,MemOperand dst,DoubleRegister scratch)4370 void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
4371 DoubleRegister scratch) {
4372 DCHECK(!AreAliased(src, scratch));
4373 ldr(scratch, src);
4374 LoadDouble(src, dst);
4375 StoreDouble(scratch, dst);
4376 }
4377
SwapDouble(MemOperand src,MemOperand dst,DoubleRegister scratch_0,DoubleRegister scratch_1)4378 void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
4379 DoubleRegister scratch_0,
4380 DoubleRegister scratch_1) {
4381 DCHECK(!AreAliased(scratch_0, scratch_1));
4382 LoadDouble(scratch_0, src);
4383 LoadDouble(scratch_1, dst);
4384 StoreDouble(scratch_0, dst);
4385 StoreDouble(scratch_1, src);
4386 }
4387
ResetSpeculationPoisonRegister()4388 void TurboAssembler::ResetSpeculationPoisonRegister() {
4389 mov(kSpeculationPoisonRegister, Operand(-1));
4390 }
4391
ComputeCodeStartAddress(Register dst)4392 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
4393 larl(dst, Operand(-pc_offset() / 2));
4394 }
4395
JumpIfEqual(Register x,int32_t y,Label * dest)4396 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
4397 Cmp32(x, Operand(y));
4398 beq(dest);
4399 }
4400
JumpIfLessThan(Register x,int32_t y,Label * dest)4401 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
4402 Cmp32(x, Operand(y));
4403 blt(dest);
4404 }
4405
4406 } // namespace internal
4407 } // namespace v8
4408
4409 #endif // V8_TARGET_ARCH_S390
4410